hash
stringlengths
64
64
content
stringlengths
0
1.51M
3d75a72160028a17c3bd54c53088e1034b5a812e4c80c1fe38fef2cfa483e02c
from datetime import datetime from decimal import Decimal from django import forms from django.conf import settings from django.contrib.admin import helpers from django.contrib.admin.utils import ( NestedObjects, display_for_field, display_for_value, flatten, flatten_fieldsets, help_text_for_field, label_for_field, lookup_field, quote, ) from django.db import DEFAULT_DB_ALIAS, models from django.test import SimpleTestCase, TestCase, override_settings from django.utils.formats import localize from django.utils.safestring import mark_safe from .models import ( Article, Car, Count, Event, EventGuide, Location, Site, Vehicle, ) class NestedObjectsTests(TestCase): """ Tests for ``NestedObject`` utility collection. """ @classmethod def setUpTestData(cls): cls.n = NestedObjects(using=DEFAULT_DB_ALIAS) cls.objs = [Count.objects.create(num=i) for i in range(5)] def _check(self, target): self.assertEqual(self.n.nested(lambda obj: obj.num), target) def _connect(self, i, j): self.objs[i].parent = self.objs[j] self.objs[i].save() def _collect(self, *indices): self.n.collect([self.objs[i] for i in indices]) def test_unrelated_roots(self): self._connect(2, 1) self._collect(0) self._collect(1) self._check([0, 1, [2]]) def test_siblings(self): self._connect(1, 0) self._connect(2, 0) self._collect(0) self._check([0, [1, 2]]) def test_non_added_parent(self): self._connect(0, 1) self._collect(0) self._check([0]) def test_cyclic(self): self._connect(0, 2) self._connect(1, 0) self._connect(2, 1) self._collect(0) self._check([0, [1, [2]]]) def test_queries(self): self._connect(1, 0) self._connect(2, 0) # 1 query to fetch all children of 0 (1 and 2) # 1 query to fetch all children of 1 and 2 (none) # Should not require additional queries to populate the nested graph. self.assertNumQueries(2, self._collect, 0) def test_on_delete_do_nothing(self): """ The nested collector doesn't query for DO_NOTHING objects. """ n = NestedObjects(using=DEFAULT_DB_ALIAS) objs = [Event.objects.create()] EventGuide.objects.create(event=objs[0]) with self.assertNumQueries(2): # One for Location, one for Guest, and no query for EventGuide n.collect(objs) def test_relation_on_abstract(self): """ NestedObjects.collect() doesn't trip (AttributeError) on the special notation for relations on abstract models (related_name that contains %(app_label)s and/or %(class)s) (#21846). """ n = NestedObjects(using=DEFAULT_DB_ALIAS) Car.objects.create() n.collect([Vehicle.objects.first()]) class UtilsTests(SimpleTestCase): empty_value = '-empty-' def test_values_from_lookup_field(self): """ Regression test for #12654: lookup_field """ SITE_NAME = 'example.com' TITLE_TEXT = 'Some title' CREATED_DATE = datetime.min ADMIN_METHOD = 'admin method' SIMPLE_FUNCTION = 'function' INSTANCE_ATTRIBUTE = 'attr' class MockModelAdmin: def get_admin_value(self, obj): return ADMIN_METHOD def simple_function(obj): return SIMPLE_FUNCTION site_obj = Site(domain=SITE_NAME) article = Article( site=site_obj, title=TITLE_TEXT, created=CREATED_DATE, ) article.non_field = INSTANCE_ATTRIBUTE verifications = ( ('site', SITE_NAME), ('created', localize(CREATED_DATE)), ('title', TITLE_TEXT), ('get_admin_value', ADMIN_METHOD), (simple_function, SIMPLE_FUNCTION), ('test_from_model', article.test_from_model()), ('non_field', INSTANCE_ATTRIBUTE) ) mock_admin = MockModelAdmin() for name, value in verifications: field, attr, resolved_value = lookup_field(name, article, mock_admin) if field is not None: resolved_value = display_for_field(resolved_value, field, self.empty_value) self.assertEqual(value, resolved_value) def test_null_display_for_field(self): """ Regression test for #12550: display_for_field should handle None value. """ display_value = display_for_field(None, models.CharField(), self.empty_value) self.assertEqual(display_value, self.empty_value) display_value = display_for_field(None, models.CharField( choices=( (None, "test_none"), ) ), self.empty_value) self.assertEqual(display_value, "test_none") display_value = display_for_field(None, models.DateField(), self.empty_value) self.assertEqual(display_value, self.empty_value) display_value = display_for_field(None, models.TimeField(), self.empty_value) self.assertEqual(display_value, self.empty_value) # Regression test for #13071: NullBooleanField has special # handling. display_value = display_for_field(None, models.NullBooleanField(), self.empty_value) expected = '<img src="%sadmin/img/icon-unknown.svg" alt="None">' % settings.STATIC_URL self.assertHTMLEqual(display_value, expected) display_value = display_for_field(None, models.BooleanField(null=True), self.empty_value) expected = '<img src="%sadmin/img/icon-unknown.svg" alt="None" />' % settings.STATIC_URL self.assertHTMLEqual(display_value, expected) display_value = display_for_field(None, models.DecimalField(), self.empty_value) self.assertEqual(display_value, self.empty_value) display_value = display_for_field(None, models.FloatField(), self.empty_value) self.assertEqual(display_value, self.empty_value) display_value = display_for_field(None, models.JSONField(), self.empty_value) self.assertEqual(display_value, self.empty_value) def test_json_display_for_field(self): tests = [ ({'a': {'b': 'c'}}, '{"a": {"b": "c"}}'), (['a', 'b'], '["a", "b"]'), ('a', '"a"'), ({('a', 'b'): 'c'}, "{('a', 'b'): 'c'}"), # Invalid JSON. ] for value, display_value in tests: with self.subTest(value=value): self.assertEqual( display_for_field(value, models.JSONField(), self.empty_value), display_value, ) def test_number_formats_display_for_field(self): display_value = display_for_field(12345.6789, models.FloatField(), self.empty_value) self.assertEqual(display_value, '12345.6789') display_value = display_for_field(Decimal('12345.6789'), models.DecimalField(), self.empty_value) self.assertEqual(display_value, '12345.6789') display_value = display_for_field(12345, models.IntegerField(), self.empty_value) self.assertEqual(display_value, '12345') @override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True) def test_number_formats_with_thousand_separator_display_for_field(self): display_value = display_for_field(12345.6789, models.FloatField(), self.empty_value) self.assertEqual(display_value, '12,345.6789') display_value = display_for_field(Decimal('12345.6789'), models.DecimalField(), self.empty_value) self.assertEqual(display_value, '12,345.6789') display_value = display_for_field(12345, models.IntegerField(), self.empty_value) self.assertEqual(display_value, '12,345') def test_list_display_for_value(self): display_value = display_for_value([1, 2, 3], self.empty_value) self.assertEqual(display_value, '1, 2, 3') display_value = display_for_value([1, 2, 'buckle', 'my', 'shoe'], self.empty_value) self.assertEqual(display_value, '1, 2, buckle, my, shoe') @override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True) def test_list_display_for_value_boolean(self): self.assertEqual( display_for_value(True, '', boolean=True), '<img src="/static/admin/img/icon-yes.svg" alt="True">' ) self.assertEqual( display_for_value(False, '', boolean=True), '<img src="/static/admin/img/icon-no.svg" alt="False">' ) self.assertEqual(display_for_value(True, ''), 'True') self.assertEqual(display_for_value(False, ''), 'False') def test_label_for_field(self): """ Tests for label_for_field """ self.assertEqual( label_for_field("title", Article), "title" ) self.assertEqual( label_for_field("hist", Article), "History" ) self.assertEqual( label_for_field("hist", Article, return_attr=True), ("History", None) ) self.assertEqual( label_for_field("__str__", Article), "article" ) with self.assertRaisesMessage(AttributeError, "Unable to lookup 'unknown' on Article"): label_for_field("unknown", Article) def test_callable(obj): return "nothing" self.assertEqual( label_for_field(test_callable, Article), "Test callable" ) self.assertEqual( label_for_field(test_callable, Article, return_attr=True), ("Test callable", test_callable) ) self.assertEqual( label_for_field("test_from_model", Article), "Test from model" ) self.assertEqual( label_for_field("test_from_model", Article, return_attr=True), ("Test from model", Article.test_from_model) ) self.assertEqual( label_for_field("test_from_model_with_override", Article), "not What you Expect" ) self.assertEqual( label_for_field(lambda x: "nothing", Article), "--" ) self.assertEqual(label_for_field('site_id', Article), 'Site id') class MockModelAdmin: def test_from_model(self, obj): return "nothing" test_from_model.short_description = "not Really the Model" self.assertEqual( label_for_field("test_from_model", Article, model_admin=MockModelAdmin), "not Really the Model" ) self.assertEqual( label_for_field("test_from_model", Article, model_admin=MockModelAdmin, return_attr=True), ("not Really the Model", MockModelAdmin.test_from_model) ) def test_label_for_field_form_argument(self): class ArticleForm(forms.ModelForm): extra_form_field = forms.BooleanField() class Meta: fields = '__all__' model = Article self.assertEqual( label_for_field('extra_form_field', Article, form=ArticleForm()), 'Extra form field' ) msg = "Unable to lookup 'nonexistent' on Article or ArticleForm" with self.assertRaisesMessage(AttributeError, msg): label_for_field('nonexistent', Article, form=ArticleForm()), def test_label_for_property(self): # NOTE: cannot use @property decorator, because of # AttributeError: 'property' object has no attribute 'short_description' class MockModelAdmin: def my_property(self): return "this if from property" my_property.short_description = 'property short description' test_from_property = property(my_property) self.assertEqual( label_for_field("test_from_property", Article, model_admin=MockModelAdmin), 'property short description' ) def test_help_text_for_field(self): tests = [ ('article', ''), ('unknown', ''), ('hist', 'History help text'), ] for name, help_text in tests: with self.subTest(name=name): self.assertEqual(help_text_for_field(name, Article), help_text) def test_related_name(self): """ Regression test for #13963 """ self.assertEqual( label_for_field('location', Event, return_attr=True), ('location', None), ) self.assertEqual( label_for_field('event', Location, return_attr=True), ('awesome event', None), ) self.assertEqual( label_for_field('guest', Event, return_attr=True), ('awesome guest', None), ) def test_safestring_in_field_label(self): # safestring should not be escaped class MyForm(forms.Form): text = forms.CharField(label=mark_safe('<i>text</i>')) cb = forms.BooleanField(label=mark_safe('<i>cb</i>')) form = MyForm() self.assertHTMLEqual(helpers.AdminField(form, 'text', is_first=False).label_tag(), '<label for="id_text" class="required inline"><i>text</i>:</label>') self.assertHTMLEqual(helpers.AdminField(form, 'cb', is_first=False).label_tag(), '<label for="id_cb" class="vCheckboxLabel required inline"><i>cb</i></label>') # normal strings needs to be escaped class MyForm(forms.Form): text = forms.CharField(label='&text') cb = forms.BooleanField(label='&cb') form = MyForm() self.assertHTMLEqual(helpers.AdminField(form, 'text', is_first=False).label_tag(), '<label for="id_text" class="required inline">&amp;text:</label>') self.assertHTMLEqual(helpers.AdminField(form, 'cb', is_first=False).label_tag(), '<label for="id_cb" class="vCheckboxLabel required inline">&amp;cb</label>') def test_flatten(self): flat_all = ['url', 'title', 'content', 'sites'] inputs = ( ((), []), (('url', 'title', ('content', 'sites')), flat_all), (('url', 'title', 'content', 'sites'), flat_all), ((('url', 'title'), ('content', 'sites')), flat_all) ) for orig, expected in inputs: self.assertEqual(flatten(orig), expected) def test_flatten_fieldsets(self): """ Regression test for #18051 """ fieldsets = ( (None, { 'fields': ('url', 'title', ('content', 'sites')) }), ) self.assertEqual(flatten_fieldsets(fieldsets), ['url', 'title', 'content', 'sites']) fieldsets = ( (None, { 'fields': ('url', 'title', ['content', 'sites']) }), ) self.assertEqual(flatten_fieldsets(fieldsets), ['url', 'title', 'content', 'sites']) def test_quote(self): self.assertEqual(quote('something\nor\nother'), 'something_0Aor_0Aother')
16013120f384e055b822f449fe267d5e4ef2e36e3c01061b6629988e066f7fb5
from django.db import models from django.utils.translation import gettext_lazy as _ class Site(models.Model): domain = models.CharField(max_length=100) def __str__(self): return self.domain class Article(models.Model): """ A simple Article model for testing """ site = models.ForeignKey(Site, models.CASCADE, related_name="admin_articles") title = models.CharField(max_length=100) hist = models.CharField( max_length=100, verbose_name=_('History'), help_text=_('History help text'), ) created = models.DateTimeField(null=True) def __str__(self): return self.title def test_from_model(self): return "nothing" def test_from_model_with_override(self): return "nothing" test_from_model_with_override.short_description = "not What you Expect" class ArticleProxy(Article): class Meta: proxy = True class Count(models.Model): num = models.PositiveSmallIntegerField() parent = models.ForeignKey('self', models.CASCADE, null=True) def __str__(self): return str(self.num) class Event(models.Model): date = models.DateTimeField(auto_now_add=True) class Location(models.Model): event = models.OneToOneField(Event, models.CASCADE, verbose_name='awesome event') class Guest(models.Model): event = models.OneToOneField(Event, models.CASCADE) name = models.CharField(max_length=255) class Meta: verbose_name = "awesome guest" class EventGuide(models.Model): event = models.ForeignKey(Event, models.DO_NOTHING) class Vehicle(models.Model): pass class VehicleMixin(Vehicle): vehicle = models.OneToOneField( Vehicle, models.CASCADE, parent_link=True, related_name='vehicle_%(app_label)s_%(class)s', ) class Meta: abstract = True class Car(VehicleMixin): pass
76690cafd6764ad4ba44ff07199c460f64db147e7fd3abc26ab253c33853e6e9
import datetime import os from decimal import Decimal from unittest import mock, skipUnless from django import forms from django.core.exceptions import ( NON_FIELD_ERRORS, FieldError, ImproperlyConfigured, ValidationError, ) from django.core.files.uploadedfile import SimpleUploadedFile from django.db import connection, models from django.db.models.query import EmptyQuerySet from django.forms.models import ( ModelFormMetaclass, construct_instance, fields_for_model, model_to_dict, modelform_factory, ) from django.template import Context, Template from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from .models import ( Article, ArticleStatus, Author, Author1, Award, BetterWriter, BigInt, Book, Category, Character, Colour, ColourfulItem, CustomErrorMessage, CustomFF, CustomFieldForExclusionModel, DateTimePost, DerivedBook, DerivedPost, Document, ExplicitPK, FilePathModel, FlexibleDatePost, Homepage, ImprovedArticle, ImprovedArticleWithParentLink, Inventory, NullableUniqueCharFieldModel, Person, Photo, Post, Price, Product, Publication, PublicationDefaults, StrictAssignmentAll, StrictAssignmentFieldSpecific, Student, StumpJoke, TextFile, Triple, Writer, WriterProfile, test_images, ) if test_images: from .models import ImageFile, OptionalImageFile, NoExtensionImageFile class ImageFileForm(forms.ModelForm): class Meta: model = ImageFile fields = '__all__' class OptionalImageFileForm(forms.ModelForm): class Meta: model = OptionalImageFile fields = '__all__' class NoExtensionImageFileForm(forms.ModelForm): class Meta: model = NoExtensionImageFile fields = '__all__' class ProductForm(forms.ModelForm): class Meta: model = Product fields = '__all__' class PriceForm(forms.ModelForm): class Meta: model = Price fields = '__all__' class BookForm(forms.ModelForm): class Meta: model = Book fields = '__all__' class DerivedBookForm(forms.ModelForm): class Meta: model = DerivedBook fields = '__all__' class ExplicitPKForm(forms.ModelForm): class Meta: model = ExplicitPK fields = ('key', 'desc',) class PostForm(forms.ModelForm): class Meta: model = Post fields = '__all__' class DerivedPostForm(forms.ModelForm): class Meta: model = DerivedPost fields = '__all__' class CustomWriterForm(forms.ModelForm): name = forms.CharField(required=False) class Meta: model = Writer fields = '__all__' class BaseCategoryForm(forms.ModelForm): class Meta: model = Category fields = '__all__' class ArticleForm(forms.ModelForm): class Meta: model = Article fields = '__all__' class RoykoForm(forms.ModelForm): class Meta: model = Writer fields = '__all__' class ArticleStatusForm(forms.ModelForm): class Meta: model = ArticleStatus fields = '__all__' class InventoryForm(forms.ModelForm): class Meta: model = Inventory fields = '__all__' class SelectInventoryForm(forms.Form): items = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode') class CustomFieldForExclusionForm(forms.ModelForm): class Meta: model = CustomFieldForExclusionModel fields = ['name', 'markup'] class TextFileForm(forms.ModelForm): class Meta: model = TextFile fields = '__all__' class BigIntForm(forms.ModelForm): class Meta: model = BigInt fields = '__all__' class ModelFormWithMedia(forms.ModelForm): class Media: js = ('/some/form/javascript',) css = { 'all': ('/some/form/css',) } class Meta: model = TextFile fields = '__all__' class CustomErrorMessageForm(forms.ModelForm): name1 = forms.CharField(error_messages={'invalid': 'Form custom error message.'}) class Meta: fields = '__all__' model = CustomErrorMessage class ModelFormBaseTest(TestCase): def test_base_form(self): self.assertEqual(list(BaseCategoryForm.base_fields), ['name', 'slug', 'url']) def test_no_model_class(self): class NoModelModelForm(forms.ModelForm): pass with self.assertRaisesMessage(ValueError, 'ModelForm has no model class specified.'): NoModelModelForm() def test_empty_fields_to_fields_for_model(self): """ An argument of fields=() to fields_for_model should return an empty dictionary """ field_dict = fields_for_model(Person, fields=()) self.assertEqual(len(field_dict), 0) def test_empty_fields_on_modelform(self): """ No fields on a ModelForm should actually result in no fields. """ class EmptyPersonForm(forms.ModelForm): class Meta: model = Person fields = () form = EmptyPersonForm() self.assertEqual(len(form.fields), 0) def test_empty_fields_to_construct_instance(self): """ No fields should be set on a model instance if construct_instance receives fields=(). """ form = modelform_factory(Person, fields="__all__")({'name': 'John Doe'}) self.assertTrue(form.is_valid()) instance = construct_instance(form, Person(), fields=()) self.assertEqual(instance.name, '') def test_blank_with_null_foreign_key_field(self): """ #13776 -- ModelForm's with models having a FK set to null=False and required=False should be valid. """ class FormForTestingIsValid(forms.ModelForm): class Meta: model = Student fields = '__all__' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['character'].required = False char = Character.objects.create(username='user', last_action=datetime.datetime.today()) data = {'study': 'Engineering'} data2 = {'study': 'Engineering', 'character': char.pk} # form is valid because required=False for field 'character' f1 = FormForTestingIsValid(data) self.assertTrue(f1.is_valid()) f2 = FormForTestingIsValid(data2) self.assertTrue(f2.is_valid()) obj = f2.save() self.assertEqual(obj.character, char) def test_blank_false_with_null_true_foreign_key_field(self): """ A ModelForm with a model having ForeignKey(blank=False, null=True) and the form field set to required=False should allow the field to be unset. """ class AwardForm(forms.ModelForm): class Meta: model = Award fields = '__all__' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['character'].required = False character = Character.objects.create(username='user', last_action=datetime.datetime.today()) award = Award.objects.create(name='Best sprinter', character=character) data = {'name': 'Best tester', 'character': ''} # remove character form = AwardForm(data=data, instance=award) self.assertTrue(form.is_valid()) award = form.save() self.assertIsNone(award.character) def test_blank_foreign_key_with_radio(self): class BookForm(forms.ModelForm): class Meta: model = Book fields = ['author'] widgets = {'author': forms.RadioSelect()} writer = Writer.objects.create(name='Joe Doe') form = BookForm() self.assertEqual(list(form.fields['author'].choices), [ ('', '---------'), (writer.pk, 'Joe Doe'), ]) def test_non_blank_foreign_key_with_radio(self): class AwardForm(forms.ModelForm): class Meta: model = Award fields = ['character'] widgets = {'character': forms.RadioSelect()} character = Character.objects.create( username='user', last_action=datetime.datetime.today(), ) form = AwardForm() self.assertEqual( list(form.fields['character'].choices), [(character.pk, 'user')], ) def test_save_blank_false_with_required_false(self): """ A ModelForm with a model with a field set to blank=False and the form field set to required=False should allow the field to be unset. """ obj = Writer.objects.create(name='test') form = CustomWriterForm(data={'name': ''}, instance=obj) self.assertTrue(form.is_valid()) obj = form.save() self.assertEqual(obj.name, '') def test_save_blank_null_unique_charfield_saves_null(self): form_class = modelform_factory(model=NullableUniqueCharFieldModel, fields=['codename']) empty_value = '' if connection.features.interprets_empty_strings_as_nulls else None form = form_class(data={'codename': ''}) self.assertTrue(form.is_valid()) form.save() self.assertEqual(form.instance.codename, empty_value) # Save a second form to verify there isn't a unique constraint violation. form = form_class(data={'codename': ''}) self.assertTrue(form.is_valid()) form.save() self.assertEqual(form.instance.codename, empty_value) def test_missing_fields_attribute(self): message = ( "Creating a ModelForm without either the 'fields' attribute " "or the 'exclude' attribute is prohibited; form " "MissingFieldsForm needs updating." ) with self.assertRaisesMessage(ImproperlyConfigured, message): class MissingFieldsForm(forms.ModelForm): class Meta: model = Category def test_extra_fields(self): class ExtraFields(BaseCategoryForm): some_extra_field = forms.BooleanField() self.assertEqual(list(ExtraFields.base_fields), ['name', 'slug', 'url', 'some_extra_field']) def test_extra_field_model_form(self): with self.assertRaisesMessage(FieldError, 'no-field'): class ExtraPersonForm(forms.ModelForm): """ ModelForm with an extra field """ age = forms.IntegerField() class Meta: model = Person fields = ('name', 'no-field') def test_extra_declared_field_model_form(self): class ExtraPersonForm(forms.ModelForm): """ ModelForm with an extra field """ age = forms.IntegerField() class Meta: model = Person fields = ('name', 'age') def test_extra_field_modelform_factory(self): with self.assertRaisesMessage(FieldError, 'Unknown field(s) (no-field) specified for Person'): modelform_factory(Person, fields=['no-field', 'name']) def test_replace_field(self): class ReplaceField(forms.ModelForm): url = forms.BooleanField() class Meta: model = Category fields = '__all__' self.assertIsInstance(ReplaceField.base_fields['url'], forms.fields.BooleanField) def test_replace_field_variant_2(self): # Should have the same result as before, # but 'fields' attribute specified differently class ReplaceField(forms.ModelForm): url = forms.BooleanField() class Meta: model = Category fields = ['url'] self.assertIsInstance(ReplaceField.base_fields['url'], forms.fields.BooleanField) def test_replace_field_variant_3(self): # Should have the same result as before, # but 'fields' attribute specified differently class ReplaceField(forms.ModelForm): url = forms.BooleanField() class Meta: model = Category fields = [] # url will still appear, since it is explicit above self.assertIsInstance(ReplaceField.base_fields['url'], forms.fields.BooleanField) def test_override_field(self): class WriterForm(forms.ModelForm): book = forms.CharField(required=False) class Meta: model = Writer fields = '__all__' wf = WriterForm({'name': 'Richard Lockridge'}) self.assertTrue(wf.is_valid()) def test_limit_nonexistent_field(self): expected_msg = 'Unknown field(s) (nonexistent) specified for Category' with self.assertRaisesMessage(FieldError, expected_msg): class InvalidCategoryForm(forms.ModelForm): class Meta: model = Category fields = ['nonexistent'] def test_limit_fields_with_string(self): expected_msg = "CategoryForm.Meta.fields cannot be a string. Did you mean to type: ('url',)?" with self.assertRaisesMessage(TypeError, expected_msg): class CategoryForm(forms.ModelForm): class Meta: model = Category fields = ('url') # note the missing comma def test_exclude_fields(self): class ExcludeFields(forms.ModelForm): class Meta: model = Category exclude = ['url'] self.assertEqual(list(ExcludeFields.base_fields), ['name', 'slug']) def test_exclude_nonexistent_field(self): class ExcludeFields(forms.ModelForm): class Meta: model = Category exclude = ['nonexistent'] self.assertEqual(list(ExcludeFields.base_fields), ['name', 'slug', 'url']) def test_exclude_fields_with_string(self): expected_msg = "CategoryForm.Meta.exclude cannot be a string. Did you mean to type: ('url',)?" with self.assertRaisesMessage(TypeError, expected_msg): class CategoryForm(forms.ModelForm): class Meta: model = Category exclude = ('url') # note the missing comma def test_exclude_and_validation(self): # This Price instance generated by this form is not valid because the quantity # field is required, but the form is valid because the field is excluded from # the form. This is for backwards compatibility. class PriceFormWithoutQuantity(forms.ModelForm): class Meta: model = Price exclude = ('quantity',) form = PriceFormWithoutQuantity({'price': '6.00'}) self.assertTrue(form.is_valid()) price = form.save(commit=False) msg = "{'quantity': ['This field cannot be null.']}" with self.assertRaisesMessage(ValidationError, msg): price.full_clean() # The form should not validate fields that it doesn't contain even if they are # specified using 'fields', not 'exclude'. class PriceFormWithoutQuantity(forms.ModelForm): class Meta: model = Price fields = ('price',) form = PriceFormWithoutQuantity({'price': '6.00'}) self.assertTrue(form.is_valid()) # The form should still have an instance of a model that is not complete and # not saved into a DB yet. self.assertEqual(form.instance.price, Decimal('6.00')) self.assertIsNone(form.instance.quantity) self.assertIsNone(form.instance.pk) def test_confused_form(self): class ConfusedForm(forms.ModelForm): """ Using 'fields' *and* 'exclude'. Not sure why you'd want to do this, but uh, "be liberal in what you accept" and all. """ class Meta: model = Category fields = ['name', 'url'] exclude = ['url'] self.assertEqual(list(ConfusedForm.base_fields), ['name']) def test_mixmodel_form(self): class MixModelForm(BaseCategoryForm): """ Don't allow more than one 'model' definition in the inheritance hierarchy. Technically, it would generate a valid form, but the fact that the resulting save method won't deal with multiple objects is likely to trip up people not familiar with the mechanics. """ class Meta: model = Article fields = '__all__' # MixModelForm is now an Article-related thing, because MixModelForm.Meta # overrides BaseCategoryForm.Meta. self.assertEqual( list(MixModelForm.base_fields), ['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status'] ) def test_article_form(self): self.assertEqual( list(ArticleForm.base_fields), ['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status'] ) def test_bad_form(self): # First class with a Meta class wins... class BadForm(ArticleForm, BaseCategoryForm): pass self.assertEqual( list(BadForm.base_fields), ['headline', 'slug', 'pub_date', 'writer', 'article', 'categories', 'status'] ) def test_invalid_meta_model(self): class InvalidModelForm(forms.ModelForm): class Meta: pass # no model # Can't create new form msg = 'ModelForm has no model class specified.' with self.assertRaisesMessage(ValueError, msg): InvalidModelForm() # Even if you provide a model instance with self.assertRaisesMessage(ValueError, msg): InvalidModelForm(instance=Category) def test_subcategory_form(self): class SubCategoryForm(BaseCategoryForm): """ Subclassing without specifying a Meta on the class will use the parent's Meta (or the first parent in the MRO if there are multiple parent classes). """ pass self.assertEqual(list(SubCategoryForm.base_fields), ['name', 'slug', 'url']) def test_subclassmeta_form(self): class SomeCategoryForm(forms.ModelForm): checkbox = forms.BooleanField() class Meta: model = Category fields = '__all__' class SubclassMeta(SomeCategoryForm): """ We can also subclass the Meta inner class to change the fields list. """ class Meta(SomeCategoryForm.Meta): exclude = ['url'] self.assertHTMLEqual( str(SubclassMeta()), """<tr><th><label for="id_name">Name:</label></th> <td><input id="id_name" type="text" name="name" maxlength="20" required></td></tr> <tr><th><label for="id_slug">Slug:</label></th> <td><input id="id_slug" type="text" name="slug" maxlength="20" required></td></tr> <tr><th><label for="id_checkbox">Checkbox:</label></th> <td><input type="checkbox" name="checkbox" id="id_checkbox" required></td></tr>""" ) def test_orderfields_form(self): class OrderFields(forms.ModelForm): class Meta: model = Category fields = ['url', 'name'] self.assertEqual(list(OrderFields.base_fields), ['url', 'name']) self.assertHTMLEqual( str(OrderFields()), """<tr><th><label for="id_url">The URL:</label></th> <td><input id="id_url" type="text" name="url" maxlength="40" required></td></tr> <tr><th><label for="id_name">Name:</label></th> <td><input id="id_name" type="text" name="name" maxlength="20" required></td></tr>""" ) def test_orderfields2_form(self): class OrderFields2(forms.ModelForm): class Meta: model = Category fields = ['slug', 'url', 'name'] exclude = ['url'] self.assertEqual(list(OrderFields2.base_fields), ['slug', 'name']) def test_default_populated_on_optional_field(self): class PubForm(forms.ModelForm): mode = forms.CharField(max_length=255, required=False) class Meta: model = PublicationDefaults fields = ('mode',) # Empty data uses the model field default. mf1 = PubForm({}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertEqual(m1.mode, 'di') self.assertEqual(m1._meta.get_field('mode').get_default(), 'di') # Blank data doesn't use the model field default. mf2 = PubForm({'mode': ''}) self.assertEqual(mf2.errors, {}) m2 = mf2.save(commit=False) self.assertEqual(m2.mode, '') def test_default_not_populated_on_non_empty_value_in_cleaned_data(self): class PubForm(forms.ModelForm): mode = forms.CharField(max_length=255, required=False) mocked_mode = None def clean(self): self.cleaned_data['mode'] = self.mocked_mode return self.cleaned_data class Meta: model = PublicationDefaults fields = ('mode',) pub_form = PubForm({}) pub_form.mocked_mode = 'de' pub = pub_form.save(commit=False) self.assertEqual(pub.mode, 'de') # Default should be populated on an empty value in cleaned_data. default_mode = 'di' for empty_value in pub_form.fields['mode'].empty_values: with self.subTest(empty_value=empty_value): pub_form = PubForm({}) pub_form.mocked_mode = empty_value pub = pub_form.save(commit=False) self.assertEqual(pub.mode, default_mode) def test_default_not_populated_on_optional_checkbox_input(self): class PubForm(forms.ModelForm): class Meta: model = PublicationDefaults fields = ('active',) # Empty data doesn't use the model default because CheckboxInput # doesn't have a value in HTML form submission. mf1 = PubForm({}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertIs(m1.active, False) self.assertIsInstance(mf1.fields['active'].widget, forms.CheckboxInput) self.assertIs(m1._meta.get_field('active').get_default(), True) def test_default_not_populated_on_checkboxselectmultiple(self): class PubForm(forms.ModelForm): mode = forms.CharField(required=False, widget=forms.CheckboxSelectMultiple) class Meta: model = PublicationDefaults fields = ('mode',) # Empty data doesn't use the model default because an unchecked # CheckboxSelectMultiple doesn't have a value in HTML form submission. mf1 = PubForm({}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertEqual(m1.mode, '') self.assertEqual(m1._meta.get_field('mode').get_default(), 'di') def test_default_not_populated_on_selectmultiple(self): class PubForm(forms.ModelForm): mode = forms.CharField(required=False, widget=forms.SelectMultiple) class Meta: model = PublicationDefaults fields = ('mode',) # Empty data doesn't use the model default because an unselected # SelectMultiple doesn't have a value in HTML form submission. mf1 = PubForm({}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertEqual(m1.mode, '') self.assertEqual(m1._meta.get_field('mode').get_default(), 'di') def test_prefixed_form_with_default_field(self): class PubForm(forms.ModelForm): prefix = 'form-prefix' class Meta: model = PublicationDefaults fields = ('mode',) mode = 'de' self.assertNotEqual(mode, PublicationDefaults._meta.get_field('mode').get_default()) mf1 = PubForm({'form-prefix-mode': mode}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertEqual(m1.mode, mode) def test_renderer_kwarg(self): custom = object() self.assertIs(ProductForm(renderer=custom).renderer, custom) def test_default_splitdatetime_field(self): class PubForm(forms.ModelForm): datetime_published = forms.SplitDateTimeField(required=False) class Meta: model = PublicationDefaults fields = ('datetime_published',) mf1 = PubForm({}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertEqual(m1.datetime_published, datetime.datetime(2000, 1, 1)) mf2 = PubForm({'datetime_published_0': '2010-01-01', 'datetime_published_1': '0:00:00'}) self.assertEqual(mf2.errors, {}) m2 = mf2.save(commit=False) self.assertEqual(m2.datetime_published, datetime.datetime(2010, 1, 1)) def test_default_filefield(self): class PubForm(forms.ModelForm): class Meta: model = PublicationDefaults fields = ('file',) mf1 = PubForm({}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertEqual(m1.file.name, 'default.txt') mf2 = PubForm({}, {'file': SimpleUploadedFile('name', b'foo')}) self.assertEqual(mf2.errors, {}) m2 = mf2.save(commit=False) self.assertEqual(m2.file.name, 'name') def test_default_selectdatewidget(self): class PubForm(forms.ModelForm): date_published = forms.DateField(required=False, widget=forms.SelectDateWidget) class Meta: model = PublicationDefaults fields = ('date_published',) mf1 = PubForm({}) self.assertEqual(mf1.errors, {}) m1 = mf1.save(commit=False) self.assertEqual(m1.date_published, datetime.date.today()) mf2 = PubForm({'date_published_year': '2010', 'date_published_month': '1', 'date_published_day': '1'}) self.assertEqual(mf2.errors, {}) m2 = mf2.save(commit=False) self.assertEqual(m2.date_published, datetime.date(2010, 1, 1)) class FieldOverridesByFormMetaForm(forms.ModelForm): class Meta: model = Category fields = ['name', 'url', 'slug'] widgets = { 'name': forms.Textarea, 'url': forms.TextInput(attrs={'class': 'url'}) } labels = { 'name': 'Title', } help_texts = { 'slug': 'Watch out! Letters, numbers, underscores and hyphens only.', } error_messages = { 'slug': { 'invalid': ( "Didn't you read the help text? " "We said letters, numbers, underscores and hyphens only!" ) } } field_classes = { 'url': forms.URLField, } class TestFieldOverridesByFormMeta(SimpleTestCase): def test_widget_overrides(self): form = FieldOverridesByFormMetaForm() self.assertHTMLEqual( str(form['name']), '<textarea id="id_name" rows="10" cols="40" name="name" maxlength="20" required></textarea>', ) self.assertHTMLEqual( str(form['url']), '<input id="id_url" type="text" class="url" name="url" maxlength="40" required>', ) self.assertHTMLEqual( str(form['slug']), '<input id="id_slug" type="text" name="slug" maxlength="20" required>', ) def test_label_overrides(self): form = FieldOverridesByFormMetaForm() self.assertHTMLEqual( str(form['name'].label_tag()), '<label for="id_name">Title:</label>', ) self.assertHTMLEqual( str(form['url'].label_tag()), '<label for="id_url">The URL:</label>', ) self.assertHTMLEqual( str(form['slug'].label_tag()), '<label for="id_slug">Slug:</label>', ) def test_help_text_overrides(self): form = FieldOverridesByFormMetaForm() self.assertEqual( form['slug'].help_text, 'Watch out! Letters, numbers, underscores and hyphens only.', ) def test_error_messages_overrides(self): form = FieldOverridesByFormMetaForm(data={ 'name': 'Category', 'url': 'http://www.example.com/category/', 'slug': '!%#*@', }) form.full_clean() error = [ "Didn't you read the help text? " "We said letters, numbers, underscores and hyphens only!", ] self.assertEqual(form.errors, {'slug': error}) def test_field_type_overrides(self): form = FieldOverridesByFormMetaForm() self.assertIs(Category._meta.get_field('url').__class__, models.CharField) self.assertIsInstance(form.fields['url'], forms.URLField) class IncompleteCategoryFormWithFields(forms.ModelForm): """ A form that replaces the model's url field with a custom one. This should prevent the model field's validation from being called. """ url = forms.CharField(required=False) class Meta: fields = ('name', 'slug') model = Category class IncompleteCategoryFormWithExclude(forms.ModelForm): """ A form that replaces the model's url field with a custom one. This should prevent the model field's validation from being called. """ url = forms.CharField(required=False) class Meta: exclude = ['url'] model = Category class ValidationTest(SimpleTestCase): def test_validates_with_replaced_field_not_specified(self): form = IncompleteCategoryFormWithFields(data={'name': 'some name', 'slug': 'some-slug'}) self.assertIs(form.is_valid(), True) def test_validates_with_replaced_field_excluded(self): form = IncompleteCategoryFormWithExclude(data={'name': 'some name', 'slug': 'some-slug'}) self.assertIs(form.is_valid(), True) def test_notrequired_overrides_notblank(self): form = CustomWriterForm({}) self.assertIs(form.is_valid(), True) class UniqueTest(TestCase): """ unique/unique_together validation. """ @classmethod def setUpTestData(cls): cls.writer = Writer.objects.create(name='Mike Royko') def test_simple_unique(self): form = ProductForm({'slug': 'teddy-bear-blue'}) self.assertTrue(form.is_valid()) obj = form.save() form = ProductForm({'slug': 'teddy-bear-blue'}) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['slug'], ['Product with this Slug already exists.']) form = ProductForm({'slug': 'teddy-bear-blue'}, instance=obj) self.assertTrue(form.is_valid()) def test_unique_together(self): """ModelForm test of unique_together constraint""" form = PriceForm({'price': '6.00', 'quantity': '1'}) self.assertTrue(form.is_valid()) form.save() form = PriceForm({'price': '6.00', 'quantity': '1'}) self.assertFalse(form.is_valid()) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['__all__'], ['Price with this Price and Quantity already exists.']) def test_unique_together_exclusion(self): """ Forms don't validate unique_together constraints when only part of the constraint is included in the form's fields. This allows using form.save(commit=False) and then assigning the missing field(s) to the model instance. """ class BookForm(forms.ModelForm): class Meta: model = DerivedBook fields = ('isbn', 'suffix1') # The unique_together is on suffix1/suffix2 but only suffix1 is part # of the form. The fields must have defaults, otherwise they'll be # skipped by other logic. self.assertEqual(DerivedBook._meta.unique_together, (('suffix1', 'suffix2'),)) for name in ('suffix1', 'suffix2'): with self.subTest(name=name): field = DerivedBook._meta.get_field(name) self.assertEqual(field.default, 0) # The form fails validation with "Derived book with this Suffix1 and # Suffix2 already exists." if the unique_together validation isn't # skipped. DerivedBook.objects.create(isbn='12345') form = BookForm({'isbn': '56789', 'suffix1': '0'}) self.assertTrue(form.is_valid(), form.errors) def test_multiple_field_unique_together(self): """ When the same field is involved in multiple unique_together constraints, we need to make sure we don't remove the data for it before doing all the validation checking (not just failing after the first one). """ class TripleForm(forms.ModelForm): class Meta: model = Triple fields = '__all__' Triple.objects.create(left=1, middle=2, right=3) form = TripleForm({'left': '1', 'middle': '2', 'right': '3'}) self.assertFalse(form.is_valid()) form = TripleForm({'left': '1', 'middle': '3', 'right': '1'}) self.assertTrue(form.is_valid()) @skipUnlessDBFeature('supports_nullable_unique_constraints') def test_unique_null(self): title = 'I May Be Wrong But I Doubt It' form = BookForm({'title': title, 'author': self.writer.pk}) self.assertTrue(form.is_valid()) form.save() form = BookForm({'title': title, 'author': self.writer.pk}) self.assertFalse(form.is_valid()) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.']) form = BookForm({'title': title}) self.assertTrue(form.is_valid()) form.save() form = BookForm({'title': title}) self.assertTrue(form.is_valid()) def test_inherited_unique(self): title = 'Boss' Book.objects.create(title=title, author=self.writer, special_id=1) form = DerivedBookForm({'title': 'Other', 'author': self.writer.pk, 'special_id': '1', 'isbn': '12345'}) self.assertFalse(form.is_valid()) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['special_id'], ['Book with this Special id already exists.']) def test_inherited_unique_together(self): title = 'Boss' form = BookForm({'title': title, 'author': self.writer.pk}) self.assertTrue(form.is_valid()) form.save() form = DerivedBookForm({'title': title, 'author': self.writer.pk, 'isbn': '12345'}) self.assertFalse(form.is_valid()) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['__all__'], ['Book with this Title and Author already exists.']) def test_abstract_inherited_unique(self): title = 'Boss' isbn = '12345' DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn) form = DerivedBookForm({ 'title': 'Other', 'author': self.writer.pk, 'isbn': isbn, 'suffix1': '1', 'suffix2': '2', }) self.assertFalse(form.is_valid()) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['isbn'], ['Derived book with this Isbn already exists.']) def test_abstract_inherited_unique_together(self): title = 'Boss' isbn = '12345' DerivedBook.objects.create(title=title, author=self.writer, isbn=isbn) form = DerivedBookForm({ 'title': 'Other', 'author': self.writer.pk, 'isbn': '9876', 'suffix1': '0', 'suffix2': '0' }) self.assertFalse(form.is_valid()) self.assertEqual(len(form.errors), 1) self.assertEqual( form.errors['__all__'], ['Derived book with this Suffix1 and Suffix2 already exists.'], ) def test_explicitpk_unspecified(self): """Test for primary_key being in the form and failing validation.""" form = ExplicitPKForm({'key': '', 'desc': ''}) self.assertFalse(form.is_valid()) def test_explicitpk_unique(self): """Ensure keys and blank character strings are tested for uniqueness.""" form = ExplicitPKForm({'key': 'key1', 'desc': ''}) self.assertTrue(form.is_valid()) form.save() form = ExplicitPKForm({'key': 'key1', 'desc': ''}) self.assertFalse(form.is_valid()) if connection.features.interprets_empty_strings_as_nulls: self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['key'], ['Explicit pk with this Key already exists.']) else: self.assertEqual(len(form.errors), 3) self.assertEqual(form.errors['__all__'], ['Explicit pk with this Key and Desc already exists.']) self.assertEqual(form.errors['desc'], ['Explicit pk with this Desc already exists.']) self.assertEqual(form.errors['key'], ['Explicit pk with this Key already exists.']) def test_unique_for_date(self): p = Post.objects.create( title="Django 1.0 is released", slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3), ) form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'}) self.assertFalse(form.is_valid()) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.']) form = PostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'}) self.assertTrue(form.is_valid()) form = PostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'}) self.assertTrue(form.is_valid()) form = PostForm({'slug': "Django 1.0", 'posted': '2008-01-01'}) self.assertFalse(form.is_valid()) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.']) form = PostForm({'subtitle': "Finally", 'posted': '2008-09-30'}) self.assertFalse(form.is_valid()) self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.']) data = {'subtitle': "Finally", "title": "Django 1.0 is released", "slug": "Django 1.0", 'posted': '2008-09-03'} form = PostForm(data, instance=p) self.assertTrue(form.is_valid()) form = PostForm({'title': "Django 1.0 is released"}) self.assertFalse(form.is_valid()) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['posted'], ['This field is required.']) def test_unique_for_date_in_exclude(self): """ If the date for unique_for_* constraints is excluded from the ModelForm (in this case 'posted' has editable=False, then the constraint should be ignored. """ class DateTimePostForm(forms.ModelForm): class Meta: model = DateTimePost fields = '__all__' DateTimePost.objects.create( title="Django 1.0 is released", slug="Django 1.0", subtitle="Finally", posted=datetime.datetime(2008, 9, 3, 10, 10, 1), ) # 'title' has unique_for_date='posted' form = DateTimePostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'}) self.assertTrue(form.is_valid()) # 'slug' has unique_for_year='posted' form = DateTimePostForm({'slug': "Django 1.0", 'posted': '2008-01-01'}) self.assertTrue(form.is_valid()) # 'subtitle' has unique_for_month='posted' form = DateTimePostForm({'subtitle': "Finally", 'posted': '2008-09-30'}) self.assertTrue(form.is_valid()) def test_inherited_unique_for_date(self): p = Post.objects.create( title="Django 1.0 is released", slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3), ) form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'}) self.assertFalse(form.is_valid()) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['title'], ['Title must be unique for Posted date.']) form = DerivedPostForm({'title': "Work on Django 1.1 begins", 'posted': '2008-09-03'}) self.assertTrue(form.is_valid()) form = DerivedPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-04'}) self.assertTrue(form.is_valid()) form = DerivedPostForm({'slug': "Django 1.0", 'posted': '2008-01-01'}) self.assertFalse(form.is_valid()) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['slug'], ['Slug must be unique for Posted year.']) form = DerivedPostForm({'subtitle': "Finally", 'posted': '2008-09-30'}) self.assertFalse(form.is_valid()) self.assertEqual(form.errors['subtitle'], ['Subtitle must be unique for Posted month.']) data = {'subtitle': "Finally", "title": "Django 1.0 is released", "slug": "Django 1.0", 'posted': '2008-09-03'} form = DerivedPostForm(data, instance=p) self.assertTrue(form.is_valid()) def test_unique_for_date_with_nullable_date(self): class FlexDatePostForm(forms.ModelForm): class Meta: model = FlexibleDatePost fields = '__all__' p = FlexibleDatePost.objects.create( title="Django 1.0 is released", slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3), ) form = FlexDatePostForm({'title': "Django 1.0 is released"}) self.assertTrue(form.is_valid()) form = FlexDatePostForm({'slug': "Django 1.0"}) self.assertTrue(form.is_valid()) form = FlexDatePostForm({'subtitle': "Finally"}) self.assertTrue(form.is_valid()) data = {'subtitle': "Finally", "title": "Django 1.0 is released", "slug": "Django 1.0"} form = FlexDatePostForm(data, instance=p) self.assertTrue(form.is_valid()) def test_override_unique_message(self): class CustomProductForm(ProductForm): class Meta(ProductForm.Meta): error_messages = { 'slug': { 'unique': "%(model_name)s's %(field_label)s not unique.", } } Product.objects.create(slug='teddy-bear-blue') form = CustomProductForm({'slug': 'teddy-bear-blue'}) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['slug'], ["Product's Slug not unique."]) def test_override_unique_together_message(self): class CustomPriceForm(PriceForm): class Meta(PriceForm.Meta): error_messages = { NON_FIELD_ERRORS: { 'unique_together': "%(model_name)s's %(field_labels)s not unique.", } } Price.objects.create(price=6.00, quantity=1) form = CustomPriceForm({'price': '6.00', 'quantity': '1'}) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors[NON_FIELD_ERRORS], ["Price's Price and Quantity not unique."]) def test_override_unique_for_date_message(self): class CustomPostForm(PostForm): class Meta(PostForm.Meta): error_messages = { 'title': { 'unique_for_date': ( "%(model_name)s's %(field_label)s not unique " "for %(date_field_label)s date." ), } } Post.objects.create( title="Django 1.0 is released", slug="Django 1.0", subtitle="Finally", posted=datetime.date(2008, 9, 3), ) form = CustomPostForm({'title': "Django 1.0 is released", 'posted': '2008-09-03'}) self.assertEqual(len(form.errors), 1) self.assertEqual(form.errors['title'], ["Post's Title not unique for Posted date."]) class ModelFormBasicTests(TestCase): def create_basic_data(self): self.c1 = Category.objects.create(name='Entertainment', slug='entertainment', url='entertainment') self.c2 = Category.objects.create(name="It's a test", slug='its-test', url='test') self.c3 = Category.objects.create(name='Third test', slug='third-test', url='third') self.w_royko = Writer.objects.create(name='Mike Royko') self.w_woodward = Writer.objects.create(name='Bob Woodward') def test_base_form(self): self.assertEqual(Category.objects.count(), 0) f = BaseCategoryForm() self.assertHTMLEqual( str(f), """<tr><th><label for="id_name">Name:</label></th> <td><input id="id_name" type="text" name="name" maxlength="20" required></td></tr> <tr><th><label for="id_slug">Slug:</label></th> <td><input id="id_slug" type="text" name="slug" maxlength="20" required></td></tr> <tr><th><label for="id_url">The URL:</label></th> <td><input id="id_url" type="text" name="url" maxlength="40" required></td></tr>""" ) self.assertHTMLEqual( str(f.as_ul()), """<li><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="20" required></li> <li><label for="id_slug">Slug:</label> <input id="id_slug" type="text" name="slug" maxlength="20" required></li> <li><label for="id_url">The URL:</label> <input id="id_url" type="text" name="url" maxlength="40" required></li>""" ) self.assertHTMLEqual( str(f["name"]), """<input id="id_name" type="text" name="name" maxlength="20" required>""") def test_auto_id(self): f = BaseCategoryForm(auto_id=False) self.assertHTMLEqual( str(f.as_ul()), """<li>Name: <input type="text" name="name" maxlength="20" required></li> <li>Slug: <input type="text" name="slug" maxlength="20" required></li> <li>The URL: <input type="text" name="url" maxlength="40" required></li>""" ) def test_initial_values(self): self.create_basic_data() # Initial values can be provided for model forms f = ArticleForm( auto_id=False, initial={ 'headline': 'Your headline here', 'categories': [str(self.c1.id), str(self.c2.id)] }) self.assertHTMLEqual( f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="Your headline here" maxlength="50" required></li> <li>Slug: <input type="text" name="slug" maxlength="50" required></li> <li>Pub date: <input type="text" name="pub_date" required></li> <li>Writer: <select name="writer" required> <option value="" selected>---------</option> <option value="%s">Bob Woodward</option> <option value="%s">Mike Royko</option> </select></li> <li>Article: <textarea rows="10" cols="40" name="article" required></textarea></li> <li>Categories: <select multiple name="categories"> <option value="%s" selected>Entertainment</option> <option value="%s" selected>It&#x27;s a test</option> <option value="%s">Third test</option> </select></li> <li>Status: <select name="status"> <option value="" selected>---------</option> <option value="1">Draft</option> <option value="2">Pending</option> <option value="3">Live</option> </select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk)) # When the ModelForm is passed an instance, that instance's current values are # inserted as 'initial' data in each Field. f = RoykoForm(auto_id=False, instance=self.w_royko) self.assertHTMLEqual( str(f), '''<tr><th>Name:</th><td><input type="text" name="name" value="Mike Royko" maxlength="50" required><br> <span class="helptext">Use both first and last names.</span></td></tr>''' ) art = Article.objects.create( headline='Test article', slug='test-article', pub_date=datetime.date(1988, 1, 4), writer=self.w_royko, article='Hello.' ) art_id_1 = art.id f = ArticleForm(auto_id=False, instance=art) self.assertHTMLEqual( f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="Test article" maxlength="50" required></li> <li>Slug: <input type="text" name="slug" value="test-article" maxlength="50" required></li> <li>Pub date: <input type="text" name="pub_date" value="1988-01-04" required></li> <li>Writer: <select name="writer" required> <option value="">---------</option> <option value="%s">Bob Woodward</option> <option value="%s" selected>Mike Royko</option> </select></li> <li>Article: <textarea rows="10" cols="40" name="article" required>Hello.</textarea></li> <li>Categories: <select multiple name="categories"> <option value="%s">Entertainment</option> <option value="%s">It&#x27;s a test</option> <option value="%s">Third test</option> </select></li> <li>Status: <select name="status"> <option value="" selected>---------</option> <option value="1">Draft</option> <option value="2">Pending</option> <option value="3">Live</option> </select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk)) f = ArticleForm({ 'headline': 'Test headline', 'slug': 'test-headline', 'pub_date': '1984-02-06', 'writer': str(self.w_royko.pk), 'article': 'Hello.' }, instance=art) self.assertEqual(f.errors, {}) self.assertTrue(f.is_valid()) test_art = f.save() self.assertEqual(test_art.id, art_id_1) test_art = Article.objects.get(id=art_id_1) self.assertEqual(test_art.headline, 'Test headline') def test_m2m_initial_callable(self): """ Regression for #10349: A callable can be provided as the initial value for an m2m field """ self.maxDiff = 1200 self.create_basic_data() # Set up a callable initial value def formfield_for_dbfield(db_field, **kwargs): if db_field.name == 'categories': kwargs['initial'] = lambda: Category.objects.all().order_by('name')[:2] return db_field.formfield(**kwargs) # Create a ModelForm, instantiate it, and check that the output is as expected ModelForm = modelform_factory( Article, fields=['headline', 'categories'], formfield_callback=formfield_for_dbfield, ) form = ModelForm() self.assertHTMLEqual( form.as_ul(), """<li><label for="id_headline">Headline:</label> <input id="id_headline" type="text" name="headline" maxlength="50" required></li> <li><label for="id_categories">Categories:</label> <select multiple name="categories" id="id_categories"> <option value="%d" selected>Entertainment</option> <option value="%d" selected>It&#x27;s a test</option> <option value="%d">Third test</option> </select></li>""" % (self.c1.pk, self.c2.pk, self.c3.pk)) def test_basic_creation(self): self.assertEqual(Category.objects.count(), 0) f = BaseCategoryForm({ 'name': 'Entertainment', 'slug': 'entertainment', 'url': 'entertainment', }) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data['name'], 'Entertainment') self.assertEqual(f.cleaned_data['slug'], 'entertainment') self.assertEqual(f.cleaned_data['url'], 'entertainment') c1 = f.save() # Testing whether the same object is returned from the # ORM... not the fastest way... self.assertEqual(Category.objects.count(), 1) self.assertEqual(c1, Category.objects.all()[0]) self.assertEqual(c1.name, "Entertainment") def test_save_commit_false(self): # If you call save() with commit=False, then it will return an object that # hasn't yet been saved to the database. In this case, it's up to you to call # save() on the resulting model instance. f = BaseCategoryForm({'name': 'Third test', 'slug': 'third-test', 'url': 'third'}) self.assertTrue(f.is_valid()) c1 = f.save(commit=False) self.assertEqual(c1.name, "Third test") self.assertEqual(Category.objects.count(), 0) c1.save() self.assertEqual(Category.objects.count(), 1) def test_save_with_data_errors(self): # If you call save() with invalid data, you'll get a ValueError. f = BaseCategoryForm({'name': '', 'slug': 'not a slug!', 'url': 'foo'}) self.assertEqual(f.errors['name'], ['This field is required.']) self.assertEqual( f.errors['slug'], ['Enter a valid “slug” consisting of letters, numbers, underscores or hyphens.'] ) self.assertEqual(f.cleaned_data, {'url': 'foo'}) msg = "The Category could not be created because the data didn't validate." with self.assertRaisesMessage(ValueError, msg): f.save() f = BaseCategoryForm({'name': '', 'slug': '', 'url': 'foo'}) with self.assertRaisesMessage(ValueError, msg): f.save() def test_multi_fields(self): self.create_basic_data() self.maxDiff = None # ManyToManyFields are represented by a MultipleChoiceField, ForeignKeys and any # fields with the 'choices' attribute are represented by a ChoiceField. f = ArticleForm(auto_id=False) self.assertHTMLEqual( str(f), '''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" required></td></tr> <tr><th>Slug:</th><td><input type="text" name="slug" maxlength="50" required></td></tr> <tr><th>Pub date:</th><td><input type="text" name="pub_date" required></td></tr> <tr><th>Writer:</th><td><select name="writer" required> <option value="" selected>---------</option> <option value="%s">Bob Woodward</option> <option value="%s">Mike Royko</option> </select></td></tr> <tr><th>Article:</th><td><textarea rows="10" cols="40" name="article" required></textarea></td></tr> <tr><th>Categories:</th><td><select multiple name="categories"> <option value="%s">Entertainment</option> <option value="%s">It&#x27;s a test</option> <option value="%s">Third test</option> </select></td></tr> <tr><th>Status:</th><td><select name="status"> <option value="" selected>---------</option> <option value="1">Draft</option> <option value="2">Pending</option> <option value="3">Live</option> </select></td></tr>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk)) # Add some categories and test the many-to-many form output. new_art = Article.objects.create( article="Hello.", headline="New headline", slug="new-headline", pub_date=datetime.date(1988, 1, 4), writer=self.w_royko) new_art.categories.add(Category.objects.get(name='Entertainment')) self.assertQuerysetEqual(new_art.categories.all(), ["Entertainment"]) f = ArticleForm(auto_id=False, instance=new_art) self.assertHTMLEqual( f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" required></li> <li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" required></li> <li>Pub date: <input type="text" name="pub_date" value="1988-01-04" required></li> <li>Writer: <select name="writer" required> <option value="">---------</option> <option value="%s">Bob Woodward</option> <option value="%s" selected>Mike Royko</option> </select></li> <li>Article: <textarea rows="10" cols="40" name="article" required>Hello.</textarea></li> <li>Categories: <select multiple name="categories"> <option value="%s" selected>Entertainment</option> <option value="%s">It&#x27;s a test</option> <option value="%s">Third test</option> </select></li> <li>Status: <select name="status"> <option value="" selected>---------</option> <option value="1">Draft</option> <option value="2">Pending</option> <option value="3">Live</option> </select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk)) def test_subset_fields(self): # You can restrict a form to a subset of the complete list of fields # by providing a 'fields' argument. If you try to save a # model created with such a form, you need to ensure that the fields # that are _not_ on the form have default values, or are allowed to have # a value of None. If a field isn't specified on a form, the object created # from the form can't provide a value for that field! class PartialArticleForm(forms.ModelForm): class Meta: model = Article fields = ('headline', 'pub_date') f = PartialArticleForm(auto_id=False) self.assertHTMLEqual( str(f), '''<tr><th>Headline:</th><td><input type="text" name="headline" maxlength="50" required></td></tr> <tr><th>Pub date:</th><td><input type="text" name="pub_date" required></td></tr>''') class PartialArticleFormWithSlug(forms.ModelForm): class Meta: model = Article fields = ('headline', 'slug', 'pub_date') w_royko = Writer.objects.create(name='Mike Royko') art = Article.objects.create( article="Hello.", headline="New headline", slug="new-headline", pub_date=datetime.date(1988, 1, 4), writer=w_royko) f = PartialArticleFormWithSlug({ 'headline': 'New headline', 'slug': 'new-headline', 'pub_date': '1988-01-04' }, auto_id=False, instance=art) self.assertHTMLEqual( f.as_ul(), '''<li>Headline: <input type="text" name="headline" value="New headline" maxlength="50" required></li> <li>Slug: <input type="text" name="slug" value="new-headline" maxlength="50" required></li> <li>Pub date: <input type="text" name="pub_date" value="1988-01-04" required></li>''' ) self.assertTrue(f.is_valid()) new_art = f.save() self.assertEqual(new_art.id, art.id) new_art = Article.objects.get(id=art.id) self.assertEqual(new_art.headline, 'New headline') def test_m2m_editing(self): self.create_basic_data() form_data = { 'headline': 'New headline', 'slug': 'new-headline', 'pub_date': '1988-01-04', 'writer': str(self.w_royko.pk), 'article': 'Hello.', 'categories': [str(self.c1.id), str(self.c2.id)] } # Create a new article, with categories, via the form. f = ArticleForm(form_data) new_art = f.save() new_art = Article.objects.get(id=new_art.id) art_id_1 = new_art.id self.assertQuerysetEqual(new_art.categories.order_by('name'), ["Entertainment", "It's a test"]) # Now, submit form data with no categories. This deletes the existing categories. form_data['categories'] = [] f = ArticleForm(form_data, instance=new_art) new_art = f.save() self.assertEqual(new_art.id, art_id_1) new_art = Article.objects.get(id=art_id_1) self.assertQuerysetEqual(new_art.categories.all(), []) # Create a new article, with no categories, via the form. f = ArticleForm(form_data) new_art = f.save() art_id_2 = new_art.id self.assertNotIn(art_id_2, (None, art_id_1)) new_art = Article.objects.get(id=art_id_2) self.assertQuerysetEqual(new_art.categories.all(), []) # Create a new article, with categories, via the form, but use commit=False. # The m2m data won't be saved until save_m2m() is invoked on the form. form_data['categories'] = [str(self.c1.id), str(self.c2.id)] f = ArticleForm(form_data) new_art = f.save(commit=False) # Manually save the instance new_art.save() art_id_3 = new_art.id self.assertNotIn(art_id_3, (None, art_id_1, art_id_2)) # The instance doesn't have m2m data yet new_art = Article.objects.get(id=art_id_3) self.assertQuerysetEqual(new_art.categories.all(), []) # Save the m2m data on the form f.save_m2m() self.assertQuerysetEqual(new_art.categories.order_by('name'), ["Entertainment", "It's a test"]) def test_custom_form_fields(self): # Here, we define a custom ModelForm. Because it happens to have the same fields as # the Category model, we can just call the form's save() to apply its changes to an # existing Category instance. class ShortCategory(forms.ModelForm): name = forms.CharField(max_length=5) slug = forms.CharField(max_length=5) url = forms.CharField(max_length=3) class Meta: model = Category fields = '__all__' cat = Category.objects.create(name='Third test') form = ShortCategory({'name': 'Third', 'slug': 'third', 'url': '3rd'}, instance=cat) self.assertEqual(form.save().name, 'Third') self.assertEqual(Category.objects.get(id=cat.id).name, 'Third') def test_runtime_choicefield_populated(self): self.maxDiff = None # Here, we demonstrate that choices for a ForeignKey ChoiceField are determined # at runtime, based on the data in the database when the form is displayed, not # the data in the database when the form is instantiated. self.create_basic_data() f = ArticleForm(auto_id=False) self.assertHTMLEqual( f.as_ul(), '''<li>Headline: <input type="text" name="headline" maxlength="50" required></li> <li>Slug: <input type="text" name="slug" maxlength="50" required></li> <li>Pub date: <input type="text" name="pub_date" required></li> <li>Writer: <select name="writer" required> <option value="" selected>---------</option> <option value="%s">Bob Woodward</option> <option value="%s">Mike Royko</option> </select></li> <li>Article: <textarea rows="10" cols="40" name="article" required></textarea></li> <li>Categories: <select multiple name="categories"> <option value="%s">Entertainment</option> <option value="%s">It&#x27;s a test</option> <option value="%s">Third test</option> </select> </li> <li>Status: <select name="status"> <option value="" selected>---------</option> <option value="1">Draft</option> <option value="2">Pending</option> <option value="3">Live</option> </select></li>''' % (self.w_woodward.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk)) c4 = Category.objects.create(name='Fourth', url='4th') w_bernstein = Writer.objects.create(name='Carl Bernstein') self.assertHTMLEqual( f.as_ul(), '''<li>Headline: <input type="text" name="headline" maxlength="50" required></li> <li>Slug: <input type="text" name="slug" maxlength="50" required></li> <li>Pub date: <input type="text" name="pub_date" required></li> <li>Writer: <select name="writer" required> <option value="" selected>---------</option> <option value="%s">Bob Woodward</option> <option value="%s">Carl Bernstein</option> <option value="%s">Mike Royko</option> </select></li> <li>Article: <textarea rows="10" cols="40" name="article" required></textarea></li> <li>Categories: <select multiple name="categories"> <option value="%s">Entertainment</option> <option value="%s">It&#x27;s a test</option> <option value="%s">Third test</option> <option value="%s">Fourth</option> </select></li> <li>Status: <select name="status"> <option value="" selected>---------</option> <option value="1">Draft</option> <option value="2">Pending</option> <option value="3">Live</option> </select></li>''' % (self.w_woodward.pk, w_bernstein.pk, self.w_royko.pk, self.c1.pk, self.c2.pk, self.c3.pk, c4.pk)) def test_recleaning_model_form_instance(self): """ Re-cleaning an instance that was added via a ModelForm shouldn't raise a pk uniqueness error. """ class AuthorForm(forms.ModelForm): class Meta: model = Author fields = '__all__' form = AuthorForm({'full_name': 'Bob'}) self.assertTrue(form.is_valid()) obj = form.save() obj.name = 'Alice' obj.full_clean() def test_validate_foreign_key_uses_default_manager(self): class MyForm(forms.ModelForm): class Meta: model = Article fields = '__all__' # Archived writers are filtered out by the default manager. w = Writer.objects.create(name='Randy', archived=True) data = { 'headline': 'My Article', 'slug': 'my-article', 'pub_date': datetime.date.today(), 'writer': w.pk, 'article': 'lorem ipsum', } form = MyForm(data) self.assertIs(form.is_valid(), False) self.assertEqual( form.errors, {'writer': ['Select a valid choice. That choice is not one of the available choices.']}, ) def test_validate_foreign_key_to_model_with_overridden_manager(self): class MyForm(forms.ModelForm): class Meta: model = Article fields = '__all__' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Allow archived authors. self.fields['writer'].queryset = Writer._base_manager.all() w = Writer.objects.create(name='Randy', archived=True) data = { 'headline': 'My Article', 'slug': 'my-article', 'pub_date': datetime.date.today(), 'writer': w.pk, 'article': 'lorem ipsum', } form = MyForm(data) self.assertIs(form.is_valid(), True) article = form.save() self.assertEqual(article.writer, w) class ModelMultipleChoiceFieldTests(TestCase): @classmethod def setUpTestData(cls): cls.c1 = Category.objects.create(name='Entertainment', slug='entertainment', url='entertainment') cls.c2 = Category.objects.create(name="It's a test", slug='its-test', url='test') cls.c3 = Category.objects.create(name='Third', slug='third-test', url='third') def test_model_multiple_choice_field(self): f = forms.ModelMultipleChoiceField(Category.objects.all()) self.assertEqual(list(f.choices), [ (self.c1.pk, 'Entertainment'), (self.c2.pk, "It's a test"), (self.c3.pk, 'Third')]) with self.assertRaises(ValidationError): f.clean(None) with self.assertRaises(ValidationError): f.clean([]) self.assertQuerysetEqual(f.clean([self.c1.id]), ["Entertainment"]) self.assertQuerysetEqual(f.clean([self.c2.id]), ["It's a test"]) self.assertQuerysetEqual(f.clean([str(self.c1.id)]), ["Entertainment"]) self.assertQuerysetEqual( f.clean([str(self.c1.id), str(self.c2.id)]), ["Entertainment", "It's a test"], ordered=False ) self.assertQuerysetEqual( f.clean([self.c1.id, str(self.c2.id)]), ["Entertainment", "It's a test"], ordered=False ) self.assertQuerysetEqual( f.clean((self.c1.id, str(self.c2.id))), ["Entertainment", "It's a test"], ordered=False ) with self.assertRaises(ValidationError): f.clean(['100']) with self.assertRaises(ValidationError): f.clean('hello') with self.assertRaises(ValidationError): f.clean(['fail']) # Invalid types that require TypeError to be caught (#22808). with self.assertRaises(ValidationError): f.clean([['fail']]) with self.assertRaises(ValidationError): f.clean([{'foo': 'bar'}]) # Add a Category object *after* the ModelMultipleChoiceField has already been # instantiated. This proves clean() checks the database during clean() rather # than caching it at time of instantiation. # Note, we are using an id of 1006 here since tests that run before # this may create categories with primary keys up to 6. Use # a number that will not conflict. c6 = Category.objects.create(id=1006, name='Sixth', url='6th') self.assertQuerysetEqual(f.clean([c6.id]), ["Sixth"]) # Delete a Category object *after* the ModelMultipleChoiceField has already been # instantiated. This proves clean() checks the database during clean() rather # than caching it at time of instantiation. Category.objects.get(url='6th').delete() with self.assertRaises(ValidationError): f.clean([c6.id]) def test_model_multiple_choice_required_false(self): f = forms.ModelMultipleChoiceField(Category.objects.all(), required=False) self.assertIsInstance(f.clean([]), EmptyQuerySet) self.assertIsInstance(f.clean(()), EmptyQuerySet) with self.assertRaises(ValidationError): f.clean(['0']) with self.assertRaises(ValidationError): f.clean([str(self.c3.id), '0']) with self.assertRaises(ValidationError): f.clean([str(self.c1.id), '0']) # queryset can be changed after the field is created. f.queryset = Category.objects.exclude(name='Third') self.assertEqual(list(f.choices), [ (self.c1.pk, 'Entertainment'), (self.c2.pk, "It's a test")]) self.assertQuerysetEqual(f.clean([self.c2.id]), ["It's a test"]) with self.assertRaises(ValidationError): f.clean([self.c3.id]) with self.assertRaises(ValidationError): f.clean([str(self.c2.id), str(self.c3.id)]) f.queryset = Category.objects.all() f.label_from_instance = lambda obj: "multicategory " + str(obj) self.assertEqual(list(f.choices), [ (self.c1.pk, 'multicategory Entertainment'), (self.c2.pk, "multicategory It's a test"), (self.c3.pk, 'multicategory Third')]) def test_model_multiple_choice_number_of_queries(self): """ ModelMultipleChoiceField does O(1) queries instead of O(n) (#10156). """ persons = [Writer.objects.create(name="Person %s" % i) for i in range(30)] f = forms.ModelMultipleChoiceField(queryset=Writer.objects.all()) self.assertNumQueries(1, f.clean, [p.pk for p in persons[1:11:2]]) def test_model_multiple_choice_run_validators(self): """ ModelMultipleChoiceField run given validators (#14144). """ for i in range(30): Writer.objects.create(name="Person %s" % i) self._validator_run = False def my_validator(value): self._validator_run = True f = forms.ModelMultipleChoiceField(queryset=Writer.objects.all(), validators=[my_validator]) f.clean([p.pk for p in Writer.objects.all()[8:9]]) self.assertTrue(self._validator_run) def test_model_multiple_choice_show_hidden_initial(self): """ Test support of show_hidden_initial by ModelMultipleChoiceField. """ class WriterForm(forms.Form): persons = forms.ModelMultipleChoiceField(show_hidden_initial=True, queryset=Writer.objects.all()) person1 = Writer.objects.create(name="Person 1") person2 = Writer.objects.create(name="Person 2") form = WriterForm( initial={'persons': [person1, person2]}, data={ 'initial-persons': [str(person1.pk), str(person2.pk)], 'persons': [str(person1.pk), str(person2.pk)], }, ) self.assertTrue(form.is_valid()) self.assertFalse(form.has_changed()) form = WriterForm( initial={'persons': [person1, person2]}, data={ 'initial-persons': [str(person1.pk), str(person2.pk)], 'persons': [str(person2.pk)], }, ) self.assertTrue(form.is_valid()) self.assertTrue(form.has_changed()) def test_model_multiple_choice_field_22745(self): """ #22745 -- Make sure that ModelMultipleChoiceField with CheckboxSelectMultiple widget doesn't produce unnecessary db queries when accessing its BoundField's attrs. """ class ModelMultipleChoiceForm(forms.Form): categories = forms.ModelMultipleChoiceField(Category.objects.all(), widget=forms.CheckboxSelectMultiple) form = ModelMultipleChoiceForm() field = form['categories'] # BoundField template = Template('{{ field.name }}{{ field }}{{ field.help_text }}') with self.assertNumQueries(1): template.render(Context({'field': field})) def test_show_hidden_initial_changed_queries_efficiently(self): class WriterForm(forms.Form): persons = forms.ModelMultipleChoiceField( show_hidden_initial=True, queryset=Writer.objects.all()) writers = (Writer.objects.create(name=str(x)) for x in range(0, 50)) writer_pks = tuple(x.pk for x in writers) form = WriterForm(data={'initial-persons': writer_pks}) with self.assertNumQueries(1): self.assertTrue(form.has_changed()) def test_clean_does_deduplicate_values(self): class PersonForm(forms.Form): persons = forms.ModelMultipleChoiceField(queryset=Person.objects.all()) person1 = Person.objects.create(name='Person 1') form = PersonForm(data={}) queryset = form.fields['persons'].clean([str(person1.pk)] * 50) sql, params = queryset.query.sql_with_params() self.assertEqual(len(params), 1) def test_to_field_name_with_initial_data(self): class ArticleCategoriesForm(forms.ModelForm): categories = forms.ModelMultipleChoiceField(Category.objects.all(), to_field_name='slug') class Meta: model = Article fields = ['categories'] article = Article.objects.create( headline='Test article', slug='test-article', pub_date=datetime.date(1988, 1, 4), writer=Writer.objects.create(name='Test writer'), article='Hello.', ) article.categories.add(self.c2, self.c3) form = ArticleCategoriesForm(instance=article) self.assertCountEqual(form['categories'].value(), [self.c2.slug, self.c3.slug]) class ModelOneToOneFieldTests(TestCase): def test_modelform_onetoonefield(self): class ImprovedArticleForm(forms.ModelForm): class Meta: model = ImprovedArticle fields = '__all__' class ImprovedArticleWithParentLinkForm(forms.ModelForm): class Meta: model = ImprovedArticleWithParentLink fields = '__all__' self.assertEqual(list(ImprovedArticleForm.base_fields), ['article']) self.assertEqual(list(ImprovedArticleWithParentLinkForm.base_fields), []) def test_modelform_subclassed_model(self): class BetterWriterForm(forms.ModelForm): class Meta: # BetterWriter model is a subclass of Writer with an additional `score` field model = BetterWriter fields = '__all__' bw = BetterWriter.objects.create(name='Joe Better', score=10) self.assertEqual(sorted(model_to_dict(bw)), ['id', 'name', 'score', 'writer_ptr']) self.assertEqual(sorted(model_to_dict(bw, fields=[])), []) self.assertEqual(sorted(model_to_dict(bw, fields=['id', 'name'])), ['id', 'name']) self.assertEqual(sorted(model_to_dict(bw, exclude=[])), ['id', 'name', 'score', 'writer_ptr']) self.assertEqual(sorted(model_to_dict(bw, exclude=['id', 'name'])), ['score', 'writer_ptr']) form = BetterWriterForm({'name': 'Some Name', 'score': 12}) self.assertTrue(form.is_valid()) bw2 = form.save() self.assertEqual(bw2.score, 12) def test_onetoonefield(self): class WriterProfileForm(forms.ModelForm): class Meta: # WriterProfile has a OneToOneField to Writer model = WriterProfile fields = '__all__' self.w_royko = Writer.objects.create(name='Mike Royko') self.w_woodward = Writer.objects.create(name='Bob Woodward') form = WriterProfileForm() self.assertHTMLEqual( form.as_p(), '''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer" required> <option value="" selected>---------</option> <option value="%s">Bob Woodward</option> <option value="%s">Mike Royko</option> </select></p> <p><label for="id_age">Age:</label> <input type="number" name="age" id="id_age" min="0" required></p>''' % ( self.w_woodward.pk, self.w_royko.pk, ) ) data = { 'writer': str(self.w_woodward.pk), 'age': '65', } form = WriterProfileForm(data) instance = form.save() self.assertEqual(str(instance), 'Bob Woodward is 65') form = WriterProfileForm(instance=instance) self.assertHTMLEqual( form.as_p(), '''<p><label for="id_writer">Writer:</label> <select name="writer" id="id_writer" required> <option value="">---------</option> <option value="%s" selected>Bob Woodward</option> <option value="%s">Mike Royko</option> </select></p> <p><label for="id_age">Age:</label> <input type="number" name="age" value="65" id="id_age" min="0" required></p>''' % ( self.w_woodward.pk, self.w_royko.pk, ) ) def test_assignment_of_none(self): class AuthorForm(forms.ModelForm): class Meta: model = Author fields = ['publication', 'full_name'] publication = Publication.objects.create(title="Pravda", date_published=datetime.date(1991, 8, 22)) author = Author.objects.create(publication=publication, full_name='John Doe') form = AuthorForm({'publication': '', 'full_name': 'John Doe'}, instance=author) self.assertTrue(form.is_valid()) self.assertIsNone(form.cleaned_data['publication']) author = form.save() # author object returned from form still retains original publication object # that's why we need to retrieve it from database again new_author = Author.objects.get(pk=author.pk) self.assertIsNone(new_author.publication) def test_assignment_of_none_null_false(self): class AuthorForm(forms.ModelForm): class Meta: model = Author1 fields = ['publication', 'full_name'] publication = Publication.objects.create(title="Pravda", date_published=datetime.date(1991, 8, 22)) author = Author1.objects.create(publication=publication, full_name='John Doe') form = AuthorForm({'publication': '', 'full_name': 'John Doe'}, instance=author) self.assertFalse(form.is_valid()) class FileAndImageFieldTests(TestCase): def test_clean_false(self): """ If the ``clean`` method on a non-required FileField receives False as the data (meaning clear the field value), it returns False, regardless of the value of ``initial``. """ f = forms.FileField(required=False) self.assertIs(f.clean(False), False) self.assertIs(f.clean(False, 'initial'), False) def test_clean_false_required(self): """ If the ``clean`` method on a required FileField receives False as the data, it has the same effect as None: initial is returned if non-empty, otherwise the validation catches the lack of a required value. """ f = forms.FileField(required=True) self.assertEqual(f.clean(False, 'initial'), 'initial') with self.assertRaises(ValidationError): f.clean(False) def test_full_clear(self): """ Integration happy-path test that a model FileField can actually be set and cleared via a ModelForm. """ class DocumentForm(forms.ModelForm): class Meta: model = Document fields = '__all__' form = DocumentForm() self.assertIn('name="myfile"', str(form)) self.assertNotIn('myfile-clear', str(form)) form = DocumentForm(files={'myfile': SimpleUploadedFile('something.txt', b'content')}) self.assertTrue(form.is_valid()) doc = form.save(commit=False) self.assertEqual(doc.myfile.name, 'something.txt') form = DocumentForm(instance=doc) self.assertIn('myfile-clear', str(form)) form = DocumentForm(instance=doc, data={'myfile-clear': 'true'}) doc = form.save(commit=False) self.assertFalse(doc.myfile) def test_clear_and_file_contradiction(self): """ If the user submits a new file upload AND checks the clear checkbox, they get a validation error, and the bound redisplay of the form still includes the current file and the clear checkbox. """ class DocumentForm(forms.ModelForm): class Meta: model = Document fields = '__all__' form = DocumentForm(files={'myfile': SimpleUploadedFile('something.txt', b'content')}) self.assertTrue(form.is_valid()) doc = form.save(commit=False) form = DocumentForm( instance=doc, files={'myfile': SimpleUploadedFile('something.txt', b'content')}, data={'myfile-clear': 'true'}, ) self.assertTrue(not form.is_valid()) self.assertEqual(form.errors['myfile'], ['Please either submit a file or check the clear checkbox, not both.']) rendered = str(form) self.assertIn('something.txt', rendered) self.assertIn('myfile-clear', rendered) def test_render_empty_file_field(self): class DocumentForm(forms.ModelForm): class Meta: model = Document fields = '__all__' doc = Document.objects.create() form = DocumentForm(instance=doc) self.assertHTMLEqual( str(form['myfile']), '<input id="id_myfile" name="myfile" type="file">' ) def test_file_field_data(self): # Test conditions when files is either not given or empty. f = TextFileForm(data={'description': 'Assistance'}) self.assertFalse(f.is_valid()) f = TextFileForm(data={'description': 'Assistance'}, files={}) self.assertFalse(f.is_valid()) # Upload a file and ensure it all works as expected. f = TextFileForm( data={'description': 'Assistance'}, files={'file': SimpleUploadedFile('test1.txt', b'hello world')}, ) self.assertTrue(f.is_valid()) self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile) instance = f.save() self.assertEqual(instance.file.name, 'tests/test1.txt') instance.file.delete() # If the previous file has been deleted, the file name can be reused f = TextFileForm( data={'description': 'Assistance'}, files={'file': SimpleUploadedFile('test1.txt', b'hello world')}, ) self.assertTrue(f.is_valid()) self.assertEqual(type(f.cleaned_data['file']), SimpleUploadedFile) instance = f.save() self.assertEqual(instance.file.name, 'tests/test1.txt') # Check if the max_length attribute has been inherited from the model. f = TextFileForm( data={'description': 'Assistance'}, files={'file': SimpleUploadedFile('test-maxlength.txt', b'hello world')}, ) self.assertFalse(f.is_valid()) # Edit an instance that already has the file defined in the model. This will not # save the file again, but leave it exactly as it is. f = TextFileForm({'description': 'Assistance'}, instance=instance) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data['file'].name, 'tests/test1.txt') instance = f.save() self.assertEqual(instance.file.name, 'tests/test1.txt') # Delete the current file since this is not done by Django. instance.file.delete() # Override the file by uploading a new one. f = TextFileForm( data={'description': 'Assistance'}, files={'file': SimpleUploadedFile('test2.txt', b'hello world')}, instance=instance, ) self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.file.name, 'tests/test2.txt') # Delete the current file since this is not done by Django. instance.file.delete() instance.delete() def test_filefield_required_false(self): # Test the non-required FileField f = TextFileForm(data={'description': 'Assistance'}) f.fields['file'].required = False self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.file.name, '') f = TextFileForm( data={'description': 'Assistance'}, files={'file': SimpleUploadedFile('test3.txt', b'hello world')}, instance=instance, ) self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.file.name, 'tests/test3.txt') # Instance can be edited w/out re-uploading the file and existing file should be preserved. f = TextFileForm({'description': 'New Description'}, instance=instance) f.fields['file'].required = False self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.description, 'New Description') self.assertEqual(instance.file.name, 'tests/test3.txt') # Delete the current file since this is not done by Django. instance.file.delete() instance.delete() def test_custom_file_field_save(self): """ Regression for #11149: save_form_data should be called only once """ class CFFForm(forms.ModelForm): class Meta: model = CustomFF fields = '__all__' # It's enough that the form saves without error -- the custom save routine will # generate an AssertionError if it is called more than once during save. form = CFFForm(data={'f': None}) form.save() def test_file_field_multiple_save(self): """ Simulate a file upload and check how many times Model.save() gets called. Test for bug #639. """ class PhotoForm(forms.ModelForm): class Meta: model = Photo fields = '__all__' # Grab an image for testing. filename = os.path.join(os.path.dirname(__file__), 'test.png') with open(filename, "rb") as fp: img = fp.read() # Fake a POST QueryDict and FILES MultiValueDict. data = {'title': 'Testing'} files = {"image": SimpleUploadedFile('test.png', img, 'image/png')} form = PhotoForm(data=data, files=files) p = form.save() try: # Check the savecount stored on the object (see the model). self.assertEqual(p._savecount, 1) finally: # Delete the "uploaded" file to avoid clogging /tmp. p = Photo.objects.get() p.image.delete(save=False) def test_file_path_field_blank(self): """FilePathField(blank=True) includes the empty option.""" class FPForm(forms.ModelForm): class Meta: model = FilePathModel fields = '__all__' form = FPForm() self.assertEqual([name for _, name in form['path'].field.choices], ['---------', 'models.py']) @skipUnless(test_images, "Pillow not installed") def test_image_field(self): # ImageField and FileField are nearly identical, but they differ slightly when # it comes to validation. This specifically tests that #6302 is fixed for # both file fields and image fields. with open(os.path.join(os.path.dirname(__file__), 'test.png'), 'rb') as fp: image_data = fp.read() with open(os.path.join(os.path.dirname(__file__), 'test2.png'), 'rb') as fp: image_data2 = fp.read() f = ImageFileForm( data={'description': 'An image'}, files={'image': SimpleUploadedFile('test.png', image_data)}, ) self.assertTrue(f.is_valid()) self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile) instance = f.save() self.assertEqual(instance.image.name, 'tests/test.png') self.assertEqual(instance.width, 16) self.assertEqual(instance.height, 16) # Delete the current file since this is not done by Django, but don't save # because the dimension fields are not null=True. instance.image.delete(save=False) f = ImageFileForm( data={'description': 'An image'}, files={'image': SimpleUploadedFile('test.png', image_data)}, ) self.assertTrue(f.is_valid()) self.assertEqual(type(f.cleaned_data['image']), SimpleUploadedFile) instance = f.save() self.assertEqual(instance.image.name, 'tests/test.png') self.assertEqual(instance.width, 16) self.assertEqual(instance.height, 16) # Edit an instance that already has the (required) image defined in the model. This will not # save the image again, but leave it exactly as it is. f = ImageFileForm(data={'description': 'Look, it changed'}, instance=instance) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data['image'].name, 'tests/test.png') instance = f.save() self.assertEqual(instance.image.name, 'tests/test.png') self.assertEqual(instance.height, 16) self.assertEqual(instance.width, 16) # Delete the current file since this is not done by Django, but don't save # because the dimension fields are not null=True. instance.image.delete(save=False) # Override the file by uploading a new one. f = ImageFileForm( data={'description': 'Changed it'}, files={'image': SimpleUploadedFile('test2.png', image_data2)}, instance=instance, ) self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.image.name, 'tests/test2.png') self.assertEqual(instance.height, 32) self.assertEqual(instance.width, 48) # Delete the current file since this is not done by Django, but don't save # because the dimension fields are not null=True. instance.image.delete(save=False) instance.delete() f = ImageFileForm( data={'description': 'Changed it'}, files={'image': SimpleUploadedFile('test2.png', image_data2)}, ) self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.image.name, 'tests/test2.png') self.assertEqual(instance.height, 32) self.assertEqual(instance.width, 48) # Delete the current file since this is not done by Django, but don't save # because the dimension fields are not null=True. instance.image.delete(save=False) instance.delete() # Test the non-required ImageField # Note: In Oracle, we expect a null ImageField to return '' instead of # None. if connection.features.interprets_empty_strings_as_nulls: expected_null_imagefield_repr = '' else: expected_null_imagefield_repr = None f = OptionalImageFileForm(data={'description': 'Test'}) self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.image.name, expected_null_imagefield_repr) self.assertIsNone(instance.width) self.assertIsNone(instance.height) f = OptionalImageFileForm( data={'description': 'And a final one'}, files={'image': SimpleUploadedFile('test3.png', image_data)}, instance=instance, ) self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.image.name, 'tests/test3.png') self.assertEqual(instance.width, 16) self.assertEqual(instance.height, 16) # Editing the instance without re-uploading the image should not affect # the image or its width/height properties. f = OptionalImageFileForm({'description': 'New Description'}, instance=instance) self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.description, 'New Description') self.assertEqual(instance.image.name, 'tests/test3.png') self.assertEqual(instance.width, 16) self.assertEqual(instance.height, 16) # Delete the current file since this is not done by Django. instance.image.delete() instance.delete() f = OptionalImageFileForm( data={'description': 'And a final one'}, files={'image': SimpleUploadedFile('test4.png', image_data2)} ) self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.image.name, 'tests/test4.png') self.assertEqual(instance.width, 48) self.assertEqual(instance.height, 32) instance.delete() # Test callable upload_to behavior that's dependent on the value of another field in the model f = ImageFileForm( data={'description': 'And a final one', 'path': 'foo'}, files={'image': SimpleUploadedFile('test4.png', image_data)}, ) self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.image.name, 'foo/test4.png') instance.delete() # Editing an instance that has an image without an extension shouldn't # fail validation. First create: f = NoExtensionImageFileForm( data={'description': 'An image'}, files={'image': SimpleUploadedFile('test.png', image_data)}, ) self.assertTrue(f.is_valid()) instance = f.save() self.assertEqual(instance.image.name, 'tests/no_extension') # Then edit: f = NoExtensionImageFileForm(data={'description': 'Edited image'}, instance=instance) self.assertTrue(f.is_valid()) class ModelOtherFieldTests(SimpleTestCase): def test_big_integer_field(self): bif = BigIntForm({'biggie': '-9223372036854775808'}) self.assertTrue(bif.is_valid()) bif = BigIntForm({'biggie': '-9223372036854775809'}) self.assertFalse(bif.is_valid()) self.assertEqual( bif.errors, {'biggie': ['Ensure this value is greater than or equal to -9223372036854775808.']} ) bif = BigIntForm({'biggie': '9223372036854775807'}) self.assertTrue(bif.is_valid()) bif = BigIntForm({'biggie': '9223372036854775808'}) self.assertFalse(bif.is_valid()) self.assertEqual(bif.errors, {'biggie': ['Ensure this value is less than or equal to 9223372036854775807.']}) def test_url_on_modelform(self): "Check basic URL field validation on model forms" class HomepageForm(forms.ModelForm): class Meta: model = Homepage fields = '__all__' self.assertFalse(HomepageForm({'url': 'foo'}).is_valid()) self.assertFalse(HomepageForm({'url': 'http://'}).is_valid()) self.assertFalse(HomepageForm({'url': 'http://example'}).is_valid()) self.assertFalse(HomepageForm({'url': 'http://example.'}).is_valid()) self.assertFalse(HomepageForm({'url': 'http://com.'}).is_valid()) self.assertTrue(HomepageForm({'url': 'http://localhost'}).is_valid()) self.assertTrue(HomepageForm({'url': 'http://example.com'}).is_valid()) self.assertTrue(HomepageForm({'url': 'http://www.example.com'}).is_valid()) self.assertTrue(HomepageForm({'url': 'http://www.example.com:8000'}).is_valid()) self.assertTrue(HomepageForm({'url': 'http://www.example.com/test'}).is_valid()) self.assertTrue(HomepageForm({'url': 'http://www.example.com:8000/test'}).is_valid()) self.assertTrue(HomepageForm({'url': 'http://example.com/foo/bar'}).is_valid()) def test_modelform_non_editable_field(self): """ When explicitly including a non-editable field in a ModelForm, the error message should be explicit. """ # 'created', non-editable, is excluded by default self.assertNotIn('created', ArticleForm().fields) msg = "'created' cannot be specified for Article model form as it is a non-editable field" with self.assertRaisesMessage(FieldError, msg): class InvalidArticleForm(forms.ModelForm): class Meta: model = Article fields = ('headline', 'created') def test_http_prefixing(self): """ If the http:// prefix is omitted on form input, the field adds it again. (Refs #13613) """ class HomepageForm(forms.ModelForm): class Meta: model = Homepage fields = '__all__' form = HomepageForm({'url': 'example.com'}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['url'], 'http://example.com') form = HomepageForm({'url': 'example.com/test'}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['url'], 'http://example.com/test') class OtherModelFormTests(TestCase): def test_media_on_modelform(self): # Similar to a regular Form class you can define custom media to be used on # the ModelForm. f = ModelFormWithMedia() self.assertHTMLEqual( str(f.media), '''<link href="/some/form/css" type="text/css" media="all" rel="stylesheet"> <script src="/some/form/javascript"></script>''' ) def test_choices_type(self): # Choices on CharField and IntegerField f = ArticleForm() with self.assertRaises(ValidationError): f.fields['status'].clean('42') f = ArticleStatusForm() with self.assertRaises(ValidationError): f.fields['status'].clean('z') def test_prefetch_related_queryset(self): """ ModelChoiceField should respect a prefetch_related() on its queryset. """ blue = Colour.objects.create(name='blue') red = Colour.objects.create(name='red') multicolor_item = ColourfulItem.objects.create() multicolor_item.colours.add(blue, red) red_item = ColourfulItem.objects.create() red_item.colours.add(red) class ColorModelChoiceField(forms.ModelChoiceField): def label_from_instance(self, obj): return ', '.join(c.name for c in obj.colours.all()) field = ColorModelChoiceField(ColourfulItem.objects.prefetch_related('colours')) with self.assertNumQueries(3): # would be 4 if prefetch is ignored self.assertEqual(tuple(field.choices), ( ('', '---------'), (multicolor_item.pk, 'blue, red'), (red_item.pk, 'red'), )) def test_foreignkeys_which_use_to_field(self): apple = Inventory.objects.create(barcode=86, name='Apple') Inventory.objects.create(barcode=22, name='Pear') core = Inventory.objects.create(barcode=87, name='Core', parent=apple) field = forms.ModelChoiceField(Inventory.objects.all(), to_field_name='barcode') self.assertEqual(tuple(field.choices), ( ('', '---------'), (86, 'Apple'), (87, 'Core'), (22, 'Pear'))) form = InventoryForm(instance=core) self.assertHTMLEqual(str(form['parent']), '''<select name="parent" id="id_parent"> <option value="">---------</option> <option value="86" selected>Apple</option> <option value="87">Core</option> <option value="22">Pear</option> </select>''') data = model_to_dict(core) data['parent'] = '22' form = InventoryForm(data=data, instance=core) core = form.save() self.assertEqual(core.parent.name, 'Pear') class CategoryForm(forms.ModelForm): description = forms.CharField() class Meta: model = Category fields = ['description', 'url'] self.assertEqual(list(CategoryForm.base_fields), ['description', 'url']) self.assertHTMLEqual( str(CategoryForm()), '''<tr><th><label for="id_description">Description:</label></th> <td><input type="text" name="description" id="id_description" required></td></tr> <tr><th><label for="id_url">The URL:</label></th> <td><input id="id_url" type="text" name="url" maxlength="40" required></td></tr>''' ) # to_field_name should also work on ModelMultipleChoiceField ################## field = forms.ModelMultipleChoiceField(Inventory.objects.all(), to_field_name='barcode') self.assertEqual(tuple(field.choices), ((86, 'Apple'), (87, 'Core'), (22, 'Pear'))) self.assertQuerysetEqual(field.clean([86]), ['Apple']) form = SelectInventoryForm({'items': [87, 22]}) self.assertTrue(form.is_valid()) self.assertEqual(len(form.cleaned_data), 1) self.assertQuerysetEqual(form.cleaned_data['items'], ['Core', 'Pear']) def test_model_field_that_returns_none_to_exclude_itself_with_explicit_fields(self): self.assertEqual(list(CustomFieldForExclusionForm.base_fields), ['name']) self.assertHTMLEqual( str(CustomFieldForExclusionForm()), '''<tr><th><label for="id_name">Name:</label></th> <td><input id="id_name" type="text" name="name" maxlength="10" required></td></tr>''' ) def test_iterable_model_m2m(self): class ColourfulItemForm(forms.ModelForm): class Meta: model = ColourfulItem fields = '__all__' colour = Colour.objects.create(name='Blue') form = ColourfulItemForm() self.maxDiff = 1024 self.assertHTMLEqual( form.as_p(), """<p><label for="id_name">Name:</label> <input id="id_name" type="text" name="name" maxlength="50" required></p> <p><label for="id_colours">Colours:</label> <select multiple name="colours" id="id_colours" required> <option value="%(blue_pk)s">Blue</option> </select></p>""" % {'blue_pk': colour.pk}) def test_callable_field_default(self): class PublicationDefaultsForm(forms.ModelForm): class Meta: model = PublicationDefaults fields = ('title', 'date_published', 'mode', 'category') self.maxDiff = 2000 form = PublicationDefaultsForm() today_str = str(datetime.date.today()) self.assertHTMLEqual( form.as_p(), """ <p><label for="id_title">Title:</label> <input id="id_title" maxlength="30" name="title" type="text" required></p> <p><label for="id_date_published">Date published:</label> <input id="id_date_published" name="date_published" type="text" value="{0}" required> <input id="initial-id_date_published" name="initial-date_published" type="hidden" value="{0}"></p> <p><label for="id_mode">Mode:</label> <select id="id_mode" name="mode"> <option value="di" selected>direct</option> <option value="de">delayed</option></select> <input id="initial-id_mode" name="initial-mode" type="hidden" value="di"></p> <p><label for="id_category">Category:</label> <select id="id_category" name="category"> <option value="1">Games</option> <option value="2">Comics</option> <option value="3" selected>Novel</option></select> <input id="initial-id_category" name="initial-category" type="hidden" value="3"> """.format(today_str) ) empty_data = { 'title': '', 'date_published': today_str, 'initial-date_published': today_str, 'mode': 'di', 'initial-mode': 'di', 'category': '3', 'initial-category': '3', } bound_form = PublicationDefaultsForm(empty_data) self.assertFalse(bound_form.has_changed()) class ModelFormCustomErrorTests(SimpleTestCase): def test_custom_error_messages(self): data = {'name1': '@#$!!**@#$', 'name2': '@#$!!**@#$'} errors = CustomErrorMessageForm(data).errors self.assertHTMLEqual( str(errors['name1']), '<ul class="errorlist"><li>Form custom error message.</li></ul>' ) self.assertHTMLEqual( str(errors['name2']), '<ul class="errorlist"><li>Model custom error message.</li></ul>' ) def test_model_clean_error_messages(self): data = {'name1': 'FORBIDDEN_VALUE', 'name2': 'ABC'} form = CustomErrorMessageForm(data) self.assertFalse(form.is_valid()) self.assertHTMLEqual( str(form.errors['name1']), '<ul class="errorlist"><li>Model.clean() error messages.</li></ul>' ) data = {'name1': 'FORBIDDEN_VALUE2', 'name2': 'ABC'} form = CustomErrorMessageForm(data) self.assertFalse(form.is_valid()) self.assertHTMLEqual( str(form.errors['name1']), '<ul class="errorlist"><li>Model.clean() error messages (simpler syntax).</li></ul>' ) data = {'name1': 'GLOBAL_ERROR', 'name2': 'ABC'} form = CustomErrorMessageForm(data) self.assertFalse(form.is_valid()) self.assertEqual(form.errors['__all__'], ['Global error message.']) class CustomCleanTests(TestCase): def test_override_clean(self): """ Regression for #12596: Calling super from ModelForm.clean() should be optional. """ class TripleFormWithCleanOverride(forms.ModelForm): class Meta: model = Triple fields = '__all__' def clean(self): if not self.cleaned_data['left'] == self.cleaned_data['right']: raise ValidationError('Left and right should be equal') return self.cleaned_data form = TripleFormWithCleanOverride({'left': 1, 'middle': 2, 'right': 1}) self.assertTrue(form.is_valid()) # form.instance.left will be None if the instance was not constructed # by form.full_clean(). self.assertEqual(form.instance.left, 1) def test_model_form_clean_applies_to_model(self): """ Regression test for #12960. Make sure the cleaned_data returned from ModelForm.clean() is applied to the model instance. """ class CategoryForm(forms.ModelForm): class Meta: model = Category fields = '__all__' def clean(self): self.cleaned_data['name'] = self.cleaned_data['name'].upper() return self.cleaned_data data = {'name': 'Test', 'slug': 'test', 'url': '/test'} form = CategoryForm(data) category = form.save() self.assertEqual(category.name, 'TEST') class ModelFormInheritanceTests(SimpleTestCase): def test_form_subclass_inheritance(self): class Form(forms.Form): age = forms.IntegerField() class ModelForm(forms.ModelForm, Form): class Meta: model = Writer fields = '__all__' self.assertEqual(list(ModelForm().fields), ['name', 'age']) def test_field_removal(self): class ModelForm(forms.ModelForm): class Meta: model = Writer fields = '__all__' class Mixin: age = None class Form(forms.Form): age = forms.IntegerField() class Form2(forms.Form): foo = forms.IntegerField() self.assertEqual(list(ModelForm().fields), ['name']) self.assertEqual(list(type('NewForm', (Mixin, Form), {})().fields), []) self.assertEqual(list(type('NewForm', (Form2, Mixin, Form), {})().fields), ['foo']) self.assertEqual(list(type('NewForm', (Mixin, ModelForm, Form), {})().fields), ['name']) self.assertEqual(list(type('NewForm', (ModelForm, Mixin, Form), {})().fields), ['name']) self.assertEqual(list(type('NewForm', (ModelForm, Form, Mixin), {})().fields), ['name', 'age']) self.assertEqual(list(type('NewForm', (ModelForm, Form), {'age': None})().fields), ['name']) def test_field_removal_name_clashes(self): """ Form fields can be removed in subclasses by setting them to None (#22510). """ class MyForm(forms.ModelForm): media = forms.CharField() class Meta: model = Writer fields = '__all__' class SubForm(MyForm): media = None self.assertIn('media', MyForm().fields) self.assertNotIn('media', SubForm().fields) self.assertTrue(hasattr(MyForm, 'media')) self.assertTrue(hasattr(SubForm, 'media')) class StumpJokeForm(forms.ModelForm): class Meta: model = StumpJoke fields = '__all__' class CustomFieldWithQuerysetButNoLimitChoicesTo(forms.Field): queryset = 42 class StumpJokeWithCustomFieldForm(forms.ModelForm): custom = CustomFieldWithQuerysetButNoLimitChoicesTo() class Meta: model = StumpJoke fields = () class LimitChoicesToTests(TestCase): """ Tests the functionality of ``limit_choices_to``. """ @classmethod def setUpTestData(cls): cls.threepwood = Character.objects.create( username='threepwood', last_action=datetime.datetime.today() + datetime.timedelta(days=1), ) cls.marley = Character.objects.create( username='marley', last_action=datetime.datetime.today() - datetime.timedelta(days=1), ) def test_limit_choices_to_callable_for_fk_rel(self): """ A ForeignKey can use limit_choices_to as a callable (#2554). """ stumpjokeform = StumpJokeForm() self.assertSequenceEqual(stumpjokeform.fields['most_recently_fooled'].queryset, [self.threepwood]) def test_limit_choices_to_callable_for_m2m_rel(self): """ A ManyToManyField can use limit_choices_to as a callable (#2554). """ stumpjokeform = StumpJokeForm() self.assertSequenceEqual(stumpjokeform.fields['most_recently_fooled'].queryset, [self.threepwood]) def test_custom_field_with_queryset_but_no_limit_choices_to(self): """ A custom field with a `queryset` attribute but no `limit_choices_to` works (#23795). """ f = StumpJokeWithCustomFieldForm() self.assertEqual(f.fields['custom'].queryset, 42) def test_fields_for_model_applies_limit_choices_to(self): fields = fields_for_model(StumpJoke, ['has_fooled_today']) self.assertSequenceEqual(fields['has_fooled_today'].queryset, [self.threepwood]) def test_callable_called_each_time_form_is_instantiated(self): field = StumpJokeForm.base_fields['most_recently_fooled'] with mock.patch.object(field, 'limit_choices_to') as today_callable_dict: StumpJokeForm() self.assertEqual(today_callable_dict.call_count, 1) StumpJokeForm() self.assertEqual(today_callable_dict.call_count, 2) StumpJokeForm() self.assertEqual(today_callable_dict.call_count, 3) class FormFieldCallbackTests(SimpleTestCase): def test_baseform_with_widgets_in_meta(self): """Regression for #13095: Using base forms with widgets defined in Meta should not raise errors.""" widget = forms.Textarea() class BaseForm(forms.ModelForm): class Meta: model = Person widgets = {'name': widget} fields = "__all__" Form = modelform_factory(Person, form=BaseForm) self.assertIsInstance(Form.base_fields['name'].widget, forms.Textarea) def test_factory_with_widget_argument(self): """ Regression for #15315: modelform_factory should accept widgets argument """ widget = forms.Textarea() # Without a widget should not set the widget to textarea Form = modelform_factory(Person, fields="__all__") self.assertNotEqual(Form.base_fields['name'].widget.__class__, forms.Textarea) # With a widget should not set the widget to textarea Form = modelform_factory(Person, fields="__all__", widgets={'name': widget}) self.assertEqual(Form.base_fields['name'].widget.__class__, forms.Textarea) def test_modelform_factory_without_fields(self): """ Regression for #19733 """ message = ( "Calling modelform_factory without defining 'fields' or 'exclude' " "explicitly is prohibited." ) with self.assertRaisesMessage(ImproperlyConfigured, message): modelform_factory(Person) def test_modelform_factory_with_all_fields(self): """ Regression for #19733 """ form = modelform_factory(Person, fields="__all__") self.assertEqual(list(form.base_fields), ["name"]) def test_custom_callback(self): """A custom formfield_callback is used if provided""" callback_args = [] def callback(db_field, **kwargs): callback_args.append((db_field, kwargs)) return db_field.formfield(**kwargs) widget = forms.Textarea() class BaseForm(forms.ModelForm): class Meta: model = Person widgets = {'name': widget} fields = "__all__" modelform_factory(Person, form=BaseForm, formfield_callback=callback) id_field, name_field = Person._meta.fields self.assertEqual(callback_args, [(id_field, {}), (name_field, {'widget': widget})]) def test_bad_callback(self): # A bad callback provided by user still gives an error with self.assertRaises(TypeError): modelform_factory(Person, fields="__all__", formfield_callback='not a function or callable') def test_inherit_after_custom_callback(self): def callback(db_field, **kwargs): if isinstance(db_field, models.CharField): return forms.CharField(widget=forms.Textarea) return db_field.formfield(**kwargs) class BaseForm(forms.ModelForm): class Meta: model = Person fields = '__all__' NewForm = modelform_factory(Person, form=BaseForm, formfield_callback=callback) class InheritedForm(NewForm): pass for name in NewForm.base_fields: self.assertEqual( type(InheritedForm.base_fields[name].widget), type(NewForm.base_fields[name].widget) ) class LocalizedModelFormTest(TestCase): def test_model_form_applies_localize_to_some_fields(self): class PartiallyLocalizedTripleForm(forms.ModelForm): class Meta: model = Triple localized_fields = ('left', 'right',) fields = '__all__' f = PartiallyLocalizedTripleForm({'left': 10, 'middle': 10, 'right': 10}) self.assertTrue(f.is_valid()) self.assertTrue(f.fields['left'].localize) self.assertFalse(f.fields['middle'].localize) self.assertTrue(f.fields['right'].localize) def test_model_form_applies_localize_to_all_fields(self): class FullyLocalizedTripleForm(forms.ModelForm): class Meta: model = Triple localized_fields = '__all__' fields = '__all__' f = FullyLocalizedTripleForm({'left': 10, 'middle': 10, 'right': 10}) self.assertTrue(f.is_valid()) self.assertTrue(f.fields['left'].localize) self.assertTrue(f.fields['middle'].localize) self.assertTrue(f.fields['right'].localize) def test_model_form_refuses_arbitrary_string(self): msg = ( "BrokenLocalizedTripleForm.Meta.localized_fields " "cannot be a string. Did you mean to type: ('foo',)?" ) with self.assertRaisesMessage(TypeError, msg): class BrokenLocalizedTripleForm(forms.ModelForm): class Meta: model = Triple localized_fields = "foo" class CustomMetaclass(ModelFormMetaclass): def __new__(cls, name, bases, attrs): new = super().__new__(cls, name, bases, attrs) new.base_fields = {} return new class CustomMetaclassForm(forms.ModelForm, metaclass=CustomMetaclass): pass class CustomMetaclassTestCase(SimpleTestCase): def test_modelform_factory_metaclass(self): new_cls = modelform_factory(Person, fields="__all__", form=CustomMetaclassForm) self.assertEqual(new_cls.base_fields, {}) class StrictAssignmentTests(SimpleTestCase): """ Should a model do anything special with __setattr__() or descriptors which raise a ValidationError, a model form should catch the error (#24706). """ def test_setattr_raises_validation_error_field_specific(self): """ A model ValidationError using the dict form should put the error message into the correct key of form.errors. """ form_class = modelform_factory(model=StrictAssignmentFieldSpecific, fields=['title']) form = form_class(data={'title': 'testing setattr'}, files=None) # This line turns on the ValidationError; it avoids the model erroring # when its own __init__() is called when creating form.instance. form.instance._should_error = True self.assertFalse(form.is_valid()) self.assertEqual(form.errors, { 'title': ['Cannot set attribute', 'This field cannot be blank.'] }) def test_setattr_raises_validation_error_non_field(self): """ A model ValidationError not using the dict form should put the error message into __all__ (i.e. non-field errors) on the form. """ form_class = modelform_factory(model=StrictAssignmentAll, fields=['title']) form = form_class(data={'title': 'testing setattr'}, files=None) # This line turns on the ValidationError; it avoids the model erroring # when its own __init__() is called when creating form.instance. form.instance._should_error = True self.assertFalse(form.is_valid()) self.assertEqual(form.errors, { '__all__': ['Cannot set attribute'], 'title': ['This field cannot be blank.'] }) class ModelToDictTests(TestCase): def test_many_to_many(self): """Data for a ManyToManyField is a list rather than a lazy QuerySet.""" blue = Colour.objects.create(name='blue') red = Colour.objects.create(name='red') item = ColourfulItem.objects.create() item.colours.set([blue]) data = model_to_dict(item)['colours'] self.assertEqual(data, [blue]) item.colours.set([red]) # If data were a QuerySet, it would be reevaluated here and give "red" # instead of the original value. self.assertEqual(data, [blue])
356de732d48f2b8ae0a986bec0532544a6e07d520466fd558721683f58acacbb
import datetime import os import tempfile import uuid from django.core import validators from django.core.exceptions import ValidationError from django.core.files.storage import FileSystemStorage from django.db import models temp_storage_dir = tempfile.mkdtemp() temp_storage = FileSystemStorage(temp_storage_dir) class Person(models.Model): name = models.CharField(max_length=100) class Category(models.Model): name = models.CharField(max_length=20) slug = models.SlugField(max_length=20) url = models.CharField('The URL', max_length=40) def __str__(self): return self.name def __repr__(self): return self.__str__() class WriterManager(models.Manager): def get_queryset(self): qs = super().get_queryset() return qs.filter(archived=False) class Writer(models.Model): name = models.CharField(max_length=50, help_text='Use both first and last names.') archived = models.BooleanField(default=False, editable=False) objects = WriterManager() class Meta: ordering = ('name',) def __str__(self): return self.name class Article(models.Model): ARTICLE_STATUS = ( (1, 'Draft'), (2, 'Pending'), (3, 'Live'), ) headline = models.CharField(max_length=50) slug = models.SlugField() pub_date = models.DateField() created = models.DateField(editable=False) writer = models.ForeignKey(Writer, models.CASCADE) article = models.TextField() categories = models.ManyToManyField(Category, blank=True) status = models.PositiveIntegerField(choices=ARTICLE_STATUS, blank=True, null=True) def save(self, *args, **kwargs): if not self.id: self.created = datetime.date.today() return super().save(*args, **kwargs) def __str__(self): return self.headline class ImprovedArticle(models.Model): article = models.OneToOneField(Article, models.CASCADE) class ImprovedArticleWithParentLink(models.Model): article = models.OneToOneField(Article, models.CASCADE, parent_link=True) class BetterWriter(Writer): score = models.IntegerField() class Publication(models.Model): title = models.CharField(max_length=30) date_published = models.DateField() def __str__(self): return self.title def default_mode(): return 'di' def default_category(): return 3 class PublicationDefaults(models.Model): MODE_CHOICES = (('di', 'direct'), ('de', 'delayed')) CATEGORY_CHOICES = ((1, 'Games'), (2, 'Comics'), (3, 'Novel')) title = models.CharField(max_length=30) date_published = models.DateField(default=datetime.date.today) datetime_published = models.DateTimeField(default=datetime.datetime(2000, 1, 1)) mode = models.CharField(max_length=2, choices=MODE_CHOICES, default=default_mode) category = models.IntegerField(choices=CATEGORY_CHOICES, default=default_category) active = models.BooleanField(default=True) file = models.FileField(default='default.txt') class Author(models.Model): publication = models.OneToOneField(Publication, models.SET_NULL, null=True, blank=True) full_name = models.CharField(max_length=255) class Author1(models.Model): publication = models.OneToOneField(Publication, models.CASCADE, null=False) full_name = models.CharField(max_length=255) class WriterProfile(models.Model): writer = models.OneToOneField(Writer, models.CASCADE, primary_key=True) age = models.PositiveIntegerField() def __str__(self): return "%s is %s" % (self.writer, self.age) class Document(models.Model): myfile = models.FileField(upload_to='unused', blank=True) class TextFile(models.Model): description = models.CharField(max_length=20) file = models.FileField(storage=temp_storage, upload_to='tests', max_length=15) def __str__(self): return self.description class CustomFileField(models.FileField): def save_form_data(self, instance, data): been_here = getattr(self, 'been_saved', False) assert not been_here, "save_form_data called more than once" setattr(self, 'been_saved', True) class CustomFF(models.Model): f = CustomFileField(upload_to='unused', blank=True) class FilePathModel(models.Model): path = models.FilePathField(path=os.path.dirname(__file__), match='models.py', blank=True) try: from PIL import Image # NOQA: detect if Pillow is installed test_images = True class ImageFile(models.Model): def custom_upload_path(self, filename): path = self.path or 'tests' return '%s/%s' % (path, filename) description = models.CharField(max_length=20) # Deliberately put the image field *after* the width/height fields to # trigger the bug in #10404 with width/height not getting assigned. width = models.IntegerField(editable=False) height = models.IntegerField(editable=False) image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path, width_field='width', height_field='height') path = models.CharField(max_length=16, blank=True, default='') def __str__(self): return self.description class OptionalImageFile(models.Model): def custom_upload_path(self, filename): path = self.path or 'tests' return '%s/%s' % (path, filename) description = models.CharField(max_length=20) image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path, width_field='width', height_field='height', blank=True, null=True) width = models.IntegerField(editable=False, null=True) height = models.IntegerField(editable=False, null=True) path = models.CharField(max_length=16, blank=True, default='') def __str__(self): return self.description class NoExtensionImageFile(models.Model): def upload_to(self, filename): return 'tests/no_extension' description = models.CharField(max_length=20) image = models.ImageField(storage=temp_storage, upload_to=upload_to) def __str__(self): return self.description except ImportError: test_images = False class Homepage(models.Model): url = models.URLField() class Product(models.Model): slug = models.SlugField(unique=True) def __str__(self): return self.slug class Price(models.Model): price = models.DecimalField(max_digits=10, decimal_places=2) quantity = models.PositiveIntegerField() class Meta: unique_together = (('price', 'quantity'),) def __str__(self): return "%s for %s" % (self.quantity, self.price) class Triple(models.Model): left = models.IntegerField() middle = models.IntegerField() right = models.IntegerField() class Meta: unique_together = (('left', 'middle'), ('middle', 'right')) class ArticleStatus(models.Model): ARTICLE_STATUS_CHAR = ( ('d', 'Draft'), ('p', 'Pending'), ('l', 'Live'), ) status = models.CharField(max_length=2, choices=ARTICLE_STATUS_CHAR, blank=True, null=True) class Inventory(models.Model): barcode = models.PositiveIntegerField(unique=True) parent = models.ForeignKey('self', models.SET_NULL, to_field='barcode', blank=True, null=True) name = models.CharField(blank=False, max_length=20) class Meta: ordering = ('name',) def __str__(self): return self.name def __repr__(self): return self.__str__() class Book(models.Model): title = models.CharField(max_length=40) author = models.ForeignKey(Writer, models.SET_NULL, blank=True, null=True) special_id = models.IntegerField(blank=True, null=True, unique=True) class Meta: unique_together = ('title', 'author') class BookXtra(models.Model): isbn = models.CharField(max_length=16, unique=True) suffix1 = models.IntegerField(blank=True, default=0) suffix2 = models.IntegerField(blank=True, default=0) class Meta: unique_together = (('suffix1', 'suffix2')) abstract = True class DerivedBook(Book, BookXtra): pass class ExplicitPK(models.Model): key = models.CharField(max_length=20, primary_key=True) desc = models.CharField(max_length=20, blank=True, unique=True) class Meta: unique_together = ('key', 'desc') def __str__(self): return self.key class Post(models.Model): title = models.CharField(max_length=50, unique_for_date='posted', blank=True) slug = models.CharField(max_length=50, unique_for_year='posted', blank=True) subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True) posted = models.DateField() def __str__(self): return self.title class DateTimePost(models.Model): title = models.CharField(max_length=50, unique_for_date='posted', blank=True) slug = models.CharField(max_length=50, unique_for_year='posted', blank=True) subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True) posted = models.DateTimeField(editable=False) def __str__(self): return self.title class DerivedPost(Post): pass class BigInt(models.Model): biggie = models.BigIntegerField() def __str__(self): return str(self.biggie) class MarkupField(models.CharField): def __init__(self, *args, **kwargs): kwargs["max_length"] = 20 super().__init__(*args, **kwargs) def formfield(self, **kwargs): # don't allow this field to be used in form (real use-case might be # that you know the markup will always be X, but it is among an app # that allows the user to say it could be something else) # regressed at r10062 return None class CustomFieldForExclusionModel(models.Model): name = models.CharField(max_length=10) markup = MarkupField() class FlexibleDatePost(models.Model): title = models.CharField(max_length=50, unique_for_date='posted', blank=True) slug = models.CharField(max_length=50, unique_for_year='posted', blank=True) subtitle = models.CharField(max_length=50, unique_for_month='posted', blank=True) posted = models.DateField(blank=True, null=True) class Colour(models.Model): name = models.CharField(max_length=50) def __iter__(self): yield from range(5) def __str__(self): return self.name class ColourfulItem(models.Model): name = models.CharField(max_length=50) colours = models.ManyToManyField(Colour) class CustomErrorMessage(models.Model): name1 = models.CharField( max_length=50, validators=[validators.validate_slug], error_messages={'invalid': 'Model custom error message.'}, ) name2 = models.CharField( max_length=50, validators=[validators.validate_slug], error_messages={'invalid': 'Model custom error message.'}, ) def clean(self): if self.name1 == 'FORBIDDEN_VALUE': raise ValidationError({'name1': [ValidationError('Model.clean() error messages.')]}) elif self.name1 == 'FORBIDDEN_VALUE2': raise ValidationError({'name1': 'Model.clean() error messages (simpler syntax).'}) elif self.name1 == 'GLOBAL_ERROR': raise ValidationError("Global error message.") def today_callable_dict(): return {"last_action__gte": datetime.datetime.today()} def today_callable_q(): return models.Q(last_action__gte=datetime.datetime.today()) class Character(models.Model): username = models.CharField(max_length=100) last_action = models.DateTimeField() def __str__(self): return self.username class StumpJoke(models.Model): most_recently_fooled = models.ForeignKey( Character, models.CASCADE, limit_choices_to=today_callable_dict, related_name="+", ) has_fooled_today = models.ManyToManyField(Character, limit_choices_to=today_callable_q, related_name="+") # Model for #13776 class Student(models.Model): character = models.ForeignKey(Character, models.CASCADE) study = models.CharField(max_length=30) # Model for #639 class Photo(models.Model): title = models.CharField(max_length=30) image = models.FileField(storage=temp_storage, upload_to='tests') # Support code for the tests; this keeps track of how many times save() # gets called on each instance. def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._savecount = 0 def save(self, force_insert=False, force_update=False): super().save(force_insert, force_update) self._savecount += 1 class UUIDPK(models.Model): uuid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) name = models.CharField(max_length=30) # Models for #24706 class StrictAssignmentFieldSpecific(models.Model): title = models.CharField(max_length=30) _should_error = False def __setattr__(self, key, value): if self._should_error is True: raise ValidationError(message={key: "Cannot set attribute"}, code='invalid') super().__setattr__(key, value) class StrictAssignmentAll(models.Model): title = models.CharField(max_length=30) _should_error = False def __setattr__(self, key, value): if self._should_error is True: raise ValidationError(message="Cannot set attribute", code='invalid') super().__setattr__(key, value) # A model with ForeignKey(blank=False, null=True) class Award(models.Model): name = models.CharField(max_length=30) character = models.ForeignKey(Character, models.SET_NULL, blank=False, null=True) class NullableUniqueCharFieldModel(models.Model): codename = models.CharField(max_length=50, blank=True, null=True, unique=True)
b8012205081cdc8ca3978a4413b353dcd28285bf73337a14a9c4dbe10bad5f6f
from enum import Enum from django.db.models.query_utils import Q from django.db.models.sql.query import Query __all__ = ['CheckConstraint', 'Deferrable', 'UniqueConstraint'] class BaseConstraint: def __init__(self, name): self.name = name def constraint_sql(self, model, schema_editor): raise NotImplementedError('This method must be implemented by a subclass.') def create_sql(self, model, schema_editor): raise NotImplementedError('This method must be implemented by a subclass.') def remove_sql(self, model, schema_editor): raise NotImplementedError('This method must be implemented by a subclass.') def deconstruct(self): path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__) path = path.replace('django.db.models.constraints', 'django.db.models') return (path, (), {'name': self.name}) def clone(self): _, args, kwargs = self.deconstruct() return self.__class__(*args, **kwargs) class CheckConstraint(BaseConstraint): def __init__(self, *, check, name): self.check = check if not getattr(check, 'conditional', False): raise TypeError( 'CheckConstraint.check must be a Q instance or boolean ' 'expression.' ) super().__init__(name) def _get_check_sql(self, model, schema_editor): query = Query(model=model, alias_cols=False) where = query.build_where(self.check) compiler = query.get_compiler(connection=schema_editor.connection) sql, params = where.as_sql(compiler, schema_editor.connection) return sql % tuple(schema_editor.quote_value(p) for p in params) def constraint_sql(self, model, schema_editor): check = self._get_check_sql(model, schema_editor) return schema_editor._check_sql(self.name, check) def create_sql(self, model, schema_editor): check = self._get_check_sql(model, schema_editor) return schema_editor._create_check_sql(model, self.name, check) def remove_sql(self, model, schema_editor): return schema_editor._delete_check_sql(model, self.name) def __repr__(self): return "<%s: check='%s' name=%r>" % (self.__class__.__name__, self.check, self.name) def __eq__(self, other): if isinstance(other, CheckConstraint): return self.name == other.name and self.check == other.check return super().__eq__(other) def deconstruct(self): path, args, kwargs = super().deconstruct() kwargs['check'] = self.check return path, args, kwargs class Deferrable(Enum): DEFERRED = 'deferred' IMMEDIATE = 'immediate' class UniqueConstraint(BaseConstraint): def __init__( self, *, fields, name, condition=None, deferrable=None, include=None, opclasses=(), ): if not fields: raise ValueError('At least one field is required to define a unique constraint.') if not isinstance(condition, (type(None), Q)): raise ValueError('UniqueConstraint.condition must be a Q instance.') if condition and deferrable: raise ValueError( 'UniqueConstraint with conditions cannot be deferred.' ) if include and deferrable: raise ValueError( 'UniqueConstraint with include fields cannot be deferred.' ) if opclasses and deferrable: raise ValueError( 'UniqueConstraint with opclasses cannot be deferred.' ) if not isinstance(deferrable, (type(None), Deferrable)): raise ValueError( 'UniqueConstraint.deferrable must be a Deferrable instance.' ) if not isinstance(include, (type(None), list, tuple)): raise ValueError('UniqueConstraint.include must be a list or tuple.') if not isinstance(opclasses, (list, tuple)): raise ValueError('UniqueConstraint.opclasses must be a list or tuple.') if opclasses and len(fields) != len(opclasses): raise ValueError( 'UniqueConstraint.fields and UniqueConstraint.opclasses must ' 'have the same number of elements.' ) self.fields = tuple(fields) self.condition = condition self.deferrable = deferrable self.include = tuple(include) if include else () self.opclasses = opclasses super().__init__(name) def _get_condition_sql(self, model, schema_editor): if self.condition is None: return None query = Query(model=model, alias_cols=False) where = query.build_where(self.condition) compiler = query.get_compiler(connection=schema_editor.connection) sql, params = where.as_sql(compiler, schema_editor.connection) return sql % tuple(schema_editor.quote_value(p) for p in params) def constraint_sql(self, model, schema_editor): fields = [model._meta.get_field(field_name).column for field_name in self.fields] include = [model._meta.get_field(field_name).column for field_name in self.include] condition = self._get_condition_sql(model, schema_editor) return schema_editor._unique_sql( model, fields, self.name, condition=condition, deferrable=self.deferrable, include=include, opclasses=self.opclasses, ) def create_sql(self, model, schema_editor): fields = [model._meta.get_field(field_name).column for field_name in self.fields] include = [model._meta.get_field(field_name).column for field_name in self.include] condition = self._get_condition_sql(model, schema_editor) return schema_editor._create_unique_sql( model, fields, self.name, condition=condition, deferrable=self.deferrable, include=include, opclasses=self.opclasses, ) def remove_sql(self, model, schema_editor): condition = self._get_condition_sql(model, schema_editor) include = [model._meta.get_field(field_name).column for field_name in self.include] return schema_editor._delete_unique_sql( model, self.name, condition=condition, deferrable=self.deferrable, include=include, opclasses=self.opclasses, ) def __repr__(self): return '<%s: fields=%r name=%r%s%s%s%s>' % ( self.__class__.__name__, self.fields, self.name, '' if self.condition is None else ' condition=%s' % self.condition, '' if self.deferrable is None else ' deferrable=%s' % self.deferrable, '' if not self.include else ' include=%s' % repr(self.include), '' if not self.opclasses else ' opclasses=%s' % repr(self.opclasses), ) def __eq__(self, other): if isinstance(other, UniqueConstraint): return ( self.name == other.name and self.fields == other.fields and self.condition == other.condition and self.deferrable == other.deferrable and self.include == other.include and self.opclasses == other.opclasses ) return super().__eq__(other) def deconstruct(self): path, args, kwargs = super().deconstruct() kwargs['fields'] = self.fields if self.condition: kwargs['condition'] = self.condition if self.deferrable: kwargs['deferrable'] = self.deferrable if self.include: kwargs['include'] = self.include if self.opclasses: kwargs['opclasses'] = self.opclasses return path, args, kwargs
849934a042b49c4191c0946d225eef2bd877f47e150601d987150fdd779dba18
import functools import inspect from functools import partial from django import forms from django.apps import apps from django.conf import SettingsReference, settings from django.core import checks, exceptions from django.db import connection, router from django.db.backends import utils from django.db.models import Q from django.db.models.constants import LOOKUP_SEP from django.db.models.deletion import CASCADE, SET_DEFAULT, SET_NULL from django.db.models.query_utils import PathInfo from django.db.models.utils import make_model_tuple from django.utils.functional import cached_property from django.utils.translation import gettext_lazy as _ from . import Field from .mixins import FieldCacheMixin from .related_descriptors import ( ForeignKeyDeferredAttribute, ForwardManyToOneDescriptor, ForwardOneToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from .related_lookups import ( RelatedExact, RelatedGreaterThan, RelatedGreaterThanOrEqual, RelatedIn, RelatedIsNull, RelatedLessThan, RelatedLessThanOrEqual, ) from .reverse_related import ( ForeignObjectRel, ManyToManyRel, ManyToOneRel, OneToOneRel, ) RECURSIVE_RELATIONSHIP_CONSTANT = 'self' def resolve_relation(scope_model, relation): """ Transform relation into a model or fully-qualified model string of the form "app_label.ModelName", relative to scope_model. The relation argument can be: * RECURSIVE_RELATIONSHIP_CONSTANT, i.e. the string "self", in which case the model argument will be returned. * A bare model name without an app_label, in which case scope_model's app_label will be prepended. * An "app_label.ModelName" string. * A model class, which will be returned unchanged. """ # Check for recursive relations if relation == RECURSIVE_RELATIONSHIP_CONSTANT: relation = scope_model # Look for an "app.Model" relation if isinstance(relation, str): if "." not in relation: relation = "%s.%s" % (scope_model._meta.app_label, relation) return relation def lazy_related_operation(function, model, *related_models, **kwargs): """ Schedule `function` to be called once `model` and all `related_models` have been imported and registered with the app registry. `function` will be called with the newly-loaded model classes as its positional arguments, plus any optional keyword arguments. The `model` argument must be a model class. Each subsequent positional argument is another model, or a reference to another model - see `resolve_relation()` for the various forms these may take. Any relative references will be resolved relative to `model`. This is a convenience wrapper for `Apps.lazy_model_operation` - the app registry model used is the one found in `model._meta.apps`. """ models = [model] + [resolve_relation(model, rel) for rel in related_models] model_keys = (make_model_tuple(m) for m in models) apps = model._meta.apps return apps.lazy_model_operation(partial(function, **kwargs), *model_keys) class RelatedField(FieldCacheMixin, Field): """Base class that all relational fields inherit from.""" # Field flags one_to_many = False one_to_one = False many_to_many = False many_to_one = False @cached_property def related_model(self): # Can't cache this property until all the models are loaded. apps.check_models_ready() return self.remote_field.model def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_related_name_is_valid(), *self._check_related_query_name_is_valid(), *self._check_relation_model_exists(), *self._check_referencing_to_swapped_model(), *self._check_clashes(), ] def _check_related_name_is_valid(self): import keyword related_name = self.remote_field.related_name if related_name is None: return [] is_valid_id = not keyword.iskeyword(related_name) and related_name.isidentifier() if not (is_valid_id or related_name.endswith('+')): return [ checks.Error( "The name '%s' is invalid related_name for field %s.%s" % (self.remote_field.related_name, self.model._meta.object_name, self.name), hint="Related name must be a valid Python identifier or end with a '+'", obj=self, id='fields.E306', ) ] return [] def _check_related_query_name_is_valid(self): if self.remote_field.is_hidden(): return [] rel_query_name = self.related_query_name() errors = [] if rel_query_name.endswith('_'): errors.append( checks.Error( "Reverse query name '%s' must not end with an underscore." % rel_query_name, hint=("Add or change a related_name or related_query_name " "argument for this field."), obj=self, id='fields.E308', ) ) if LOOKUP_SEP in rel_query_name: errors.append( checks.Error( "Reverse query name '%s' must not contain '%s'." % (rel_query_name, LOOKUP_SEP), hint=("Add or change a related_name or related_query_name " "argument for this field."), obj=self, id='fields.E309', ) ) return errors def _check_relation_model_exists(self): rel_is_missing = self.remote_field.model not in self.opts.apps.get_models() rel_is_string = isinstance(self.remote_field.model, str) model_name = self.remote_field.model if rel_is_string else self.remote_field.model._meta.object_name if rel_is_missing and (rel_is_string or not self.remote_field.model._meta.swapped): return [ checks.Error( "Field defines a relation with model '%s', which is either " "not installed, or is abstract." % model_name, obj=self, id='fields.E300', ) ] return [] def _check_referencing_to_swapped_model(self): if (self.remote_field.model not in self.opts.apps.get_models() and not isinstance(self.remote_field.model, str) and self.remote_field.model._meta.swapped): model = "%s.%s" % ( self.remote_field.model._meta.app_label, self.remote_field.model._meta.object_name ) return [ checks.Error( "Field defines a relation with the model '%s', which has " "been swapped out." % model, hint="Update the relation to point at 'settings.%s'." % self.remote_field.model._meta.swappable, obj=self, id='fields.E301', ) ] return [] def _check_clashes(self): """Check accessor and reverse query name clashes.""" from django.db.models.base import ModelBase errors = [] opts = self.model._meta # `f.remote_field.model` may be a string instead of a model. Skip if model name is # not resolved. if not isinstance(self.remote_field.model, ModelBase): return [] # Consider that we are checking field `Model.foreign` and the models # are: # # class Target(models.Model): # model = models.IntegerField() # model_set = models.IntegerField() # # class Model(models.Model): # foreign = models.ForeignKey(Target) # m2m = models.ManyToManyField(Target) # rel_opts.object_name == "Target" rel_opts = self.remote_field.model._meta # If the field doesn't install a backward relation on the target model # (so `is_hidden` returns True), then there are no clashes to check # and we can skip these fields. rel_is_hidden = self.remote_field.is_hidden() rel_name = self.remote_field.get_accessor_name() # i. e. "model_set" rel_query_name = self.related_query_name() # i. e. "model" field_name = "%s.%s" % (opts.object_name, self.name) # i. e. "Model.field" # Check clashes between accessor or reverse query name of `field` # and any other field name -- i.e. accessor for Model.foreign is # model_set and it clashes with Target.model_set. potential_clashes = rel_opts.fields + rel_opts.many_to_many for clash_field in potential_clashes: clash_name = "%s.%s" % (rel_opts.object_name, clash_field.name) # i.e. "Target.model_set" if not rel_is_hidden and clash_field.name == rel_name: errors.append( checks.Error( "Reverse accessor for '%s' clashes with field name '%s'." % (field_name, clash_name), hint=("Rename field '%s', or add/change a related_name " "argument to the definition for field '%s'.") % (clash_name, field_name), obj=self, id='fields.E302', ) ) if clash_field.name == rel_query_name: errors.append( checks.Error( "Reverse query name for '%s' clashes with field name '%s'." % (field_name, clash_name), hint=("Rename field '%s', or add/change a related_name " "argument to the definition for field '%s'.") % (clash_name, field_name), obj=self, id='fields.E303', ) ) # Check clashes between accessors/reverse query names of `field` and # any other field accessor -- i. e. Model.foreign accessor clashes with # Model.m2m accessor. potential_clashes = (r for r in rel_opts.related_objects if r.field is not self) for clash_field in potential_clashes: clash_name = "%s.%s" % ( # i. e. "Model.m2m" clash_field.related_model._meta.object_name, clash_field.field.name) if not rel_is_hidden and clash_field.get_accessor_name() == rel_name: errors.append( checks.Error( "Reverse accessor for '%s' clashes with reverse accessor for '%s'." % (field_name, clash_name), hint=("Add or change a related_name argument " "to the definition for '%s' or '%s'.") % (field_name, clash_name), obj=self, id='fields.E304', ) ) if clash_field.get_accessor_name() == rel_query_name: errors.append( checks.Error( "Reverse query name for '%s' clashes with reverse query name for '%s'." % (field_name, clash_name), hint=("Add or change a related_name argument " "to the definition for '%s' or '%s'.") % (field_name, clash_name), obj=self, id='fields.E305', ) ) return errors def db_type(self, connection): # By default related field will not have a column as it relates to # columns from another table. return None def contribute_to_class(self, cls, name, private_only=False, **kwargs): super().contribute_to_class(cls, name, private_only=private_only, **kwargs) self.opts = cls._meta if not cls._meta.abstract: if self.remote_field.related_name: related_name = self.remote_field.related_name else: related_name = self.opts.default_related_name if related_name: related_name = related_name % { 'class': cls.__name__.lower(), 'model_name': cls._meta.model_name.lower(), 'app_label': cls._meta.app_label.lower() } self.remote_field.related_name = related_name if self.remote_field.related_query_name: related_query_name = self.remote_field.related_query_name % { 'class': cls.__name__.lower(), 'app_label': cls._meta.app_label.lower(), } self.remote_field.related_query_name = related_query_name def resolve_related_class(model, related, field): field.remote_field.model = related field.do_related_class(related, model) lazy_related_operation(resolve_related_class, cls, self.remote_field.model, field=self) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.remote_field.limit_choices_to: kwargs['limit_choices_to'] = self.remote_field.limit_choices_to if self.remote_field.related_name is not None: kwargs['related_name'] = self.remote_field.related_name if self.remote_field.related_query_name is not None: kwargs['related_query_name'] = self.remote_field.related_query_name return name, path, args, kwargs def get_forward_related_filter(self, obj): """ Return the keyword arguments that when supplied to self.model.object.filter(), would select all instances related through this field to the remote obj. This is used to build the querysets returned by related descriptors. obj is an instance of self.related_field.model. """ return { '%s__%s' % (self.name, rh_field.name): getattr(obj, rh_field.attname) for _, rh_field in self.related_fields } def get_reverse_related_filter(self, obj): """ Complement to get_forward_related_filter(). Return the keyword arguments that when passed to self.related_field.model.object.filter() select all instances of self.related_field.model related through this field to obj. obj is an instance of self.model. """ base_filter = { rh_field.attname: getattr(obj, lh_field.attname) for lh_field, rh_field in self.related_fields } descriptor_filter = self.get_extra_descriptor_filter(obj) base_q = Q(**base_filter) if isinstance(descriptor_filter, dict): return base_q & Q(**descriptor_filter) elif descriptor_filter: return base_q & descriptor_filter return base_q @property def swappable_setting(self): """ Get the setting that this is powered from for swapping, or None if it's not swapped in / marked with swappable=False. """ if self.swappable: # Work out string form of "to" if isinstance(self.remote_field.model, str): to_string = self.remote_field.model else: to_string = self.remote_field.model._meta.label return apps.get_swappable_settings_name(to_string) return None def set_attributes_from_rel(self): self.name = ( self.name or (self.remote_field.model._meta.model_name + '_' + self.remote_field.model._meta.pk.name) ) if self.verbose_name is None: self.verbose_name = self.remote_field.model._meta.verbose_name self.remote_field.set_field_name() def do_related_class(self, other, cls): self.set_attributes_from_rel() self.contribute_to_related_class(other, self.remote_field) def get_limit_choices_to(self): """ Return ``limit_choices_to`` for this model field. If it is a callable, it will be invoked and the result will be returned. """ if callable(self.remote_field.limit_choices_to): return self.remote_field.limit_choices_to() return self.remote_field.limit_choices_to def formfield(self, **kwargs): """ Pass ``limit_choices_to`` to the field being constructed. Only passes it if there is a type that supports related fields. This is a similar strategy used to pass the ``queryset`` to the field being constructed. """ defaults = {} if hasattr(self.remote_field, 'get_related_field'): # If this is a callable, do not invoke it here. Just pass # it in the defaults for when the form class will later be # instantiated. limit_choices_to = self.remote_field.limit_choices_to defaults.update({ 'limit_choices_to': limit_choices_to, }) defaults.update(kwargs) return super().formfield(**defaults) def related_query_name(self): """ Define the name that can be used to identify this related object in a table-spanning query. """ return self.remote_field.related_query_name or self.remote_field.related_name or self.opts.model_name @property def target_field(self): """ When filtering against this relation, return the field on the remote model against which the filtering should happen. """ target_fields = self.get_path_info()[-1].target_fields if len(target_fields) > 1: raise exceptions.FieldError( "The relation has multiple target fields, but only single target field was asked for") return target_fields[0] def get_cache_name(self): return self.name class ForeignObject(RelatedField): """ Abstraction of the ForeignKey relation to support multi-column relations. """ # Field flags many_to_many = False many_to_one = True one_to_many = False one_to_one = False requires_unique_target = True related_accessor_class = ReverseManyToOneDescriptor forward_related_accessor_class = ForwardManyToOneDescriptor rel_class = ForeignObjectRel def __init__(self, to, on_delete, from_fields, to_fields, rel=None, related_name=None, related_query_name=None, limit_choices_to=None, parent_link=False, swappable=True, **kwargs): if rel is None: rel = self.rel_class( self, to, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, parent_link=parent_link, on_delete=on_delete, ) super().__init__(rel=rel, **kwargs) self.from_fields = from_fields self.to_fields = to_fields self.swappable = swappable def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_to_fields_exist(), *self._check_unique_target(), ] def _check_to_fields_exist(self): # Skip nonexistent models. if isinstance(self.remote_field.model, str): return [] errors = [] for to_field in self.to_fields: if to_field: try: self.remote_field.model._meta.get_field(to_field) except exceptions.FieldDoesNotExist: errors.append( checks.Error( "The to_field '%s' doesn't exist on the related " "model '%s'." % (to_field, self.remote_field.model._meta.label), obj=self, id='fields.E312', ) ) return errors def _check_unique_target(self): rel_is_string = isinstance(self.remote_field.model, str) if rel_is_string or not self.requires_unique_target: return [] try: self.foreign_related_fields except exceptions.FieldDoesNotExist: return [] if not self.foreign_related_fields: return [] unique_foreign_fields = { frozenset([f.name]) for f in self.remote_field.model._meta.get_fields() if getattr(f, 'unique', False) } unique_foreign_fields.update({ frozenset(ut) for ut in self.remote_field.model._meta.unique_together }) unique_foreign_fields.update({ frozenset(uc.fields) for uc in self.remote_field.model._meta.total_unique_constraints }) foreign_fields = {f.name for f in self.foreign_related_fields} has_unique_constraint = any(u <= foreign_fields for u in unique_foreign_fields) if not has_unique_constraint and len(self.foreign_related_fields) > 1: field_combination = ', '.join( "'%s'" % rel_field.name for rel_field in self.foreign_related_fields ) model_name = self.remote_field.model.__name__ return [ checks.Error( "No subset of the fields %s on model '%s' is unique." % (field_combination, model_name), hint=( 'Mark a single field as unique=True or add a set of ' 'fields to a unique constraint (via unique_together ' 'or a UniqueConstraint (without condition) in the ' 'model Meta.constraints).' ), obj=self, id='fields.E310', ) ] elif not has_unique_constraint: field_name = self.foreign_related_fields[0].name model_name = self.remote_field.model.__name__ return [ checks.Error( "'%s.%s' must be unique because it is referenced by " "a foreign key." % (model_name, field_name), hint=( 'Add unique=True to this field or add a ' 'UniqueConstraint (without condition) in the model ' 'Meta.constraints.' ), obj=self, id='fields.E311', ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() kwargs['on_delete'] = self.remote_field.on_delete kwargs['from_fields'] = self.from_fields kwargs['to_fields'] = self.to_fields if self.remote_field.parent_link: kwargs['parent_link'] = self.remote_field.parent_link if isinstance(self.remote_field.model, str): if '.' in self.remote_field.model: app_label, model_name = self.remote_field.model.split('.') kwargs['to'] = '%s.%s' % (app_label, model_name.lower()) else: kwargs['to'] = self.remote_field.model.lower() else: kwargs['to'] = self.remote_field.model._meta.label_lower # If swappable is True, then see if we're actually pointing to the target # of a swap. swappable_setting = self.swappable_setting if swappable_setting is not None: # If it's already a settings reference, error if hasattr(kwargs['to'], "setting_name"): if kwargs['to'].setting_name != swappable_setting: raise ValueError( "Cannot deconstruct a ForeignKey pointing to a model " "that is swapped in place of more than one model (%s and %s)" % (kwargs['to'].setting_name, swappable_setting) ) # Set it kwargs['to'] = SettingsReference( kwargs['to'], swappable_setting, ) return name, path, args, kwargs def resolve_related_fields(self): if not self.from_fields or len(self.from_fields) != len(self.to_fields): raise ValueError('Foreign Object from and to fields must be the same non-zero length') if isinstance(self.remote_field.model, str): raise ValueError('Related model %r cannot be resolved' % self.remote_field.model) related_fields = [] for index in range(len(self.from_fields)): from_field_name = self.from_fields[index] to_field_name = self.to_fields[index] from_field = ( self if from_field_name == RECURSIVE_RELATIONSHIP_CONSTANT else self.opts.get_field(from_field_name) ) to_field = (self.remote_field.model._meta.pk if to_field_name is None else self.remote_field.model._meta.get_field(to_field_name)) related_fields.append((from_field, to_field)) return related_fields @cached_property def related_fields(self): return self.resolve_related_fields() @cached_property def reverse_related_fields(self): return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields] @cached_property def local_related_fields(self): return tuple(lhs_field for lhs_field, rhs_field in self.related_fields) @cached_property def foreign_related_fields(self): return tuple(rhs_field for lhs_field, rhs_field in self.related_fields if rhs_field) def get_local_related_value(self, instance): return self.get_instance_value_for_fields(instance, self.local_related_fields) def get_foreign_related_value(self, instance): return self.get_instance_value_for_fields(instance, self.foreign_related_fields) @staticmethod def get_instance_value_for_fields(instance, fields): ret = [] opts = instance._meta for field in fields: # Gotcha: in some cases (like fixture loading) a model can have # different values in parent_ptr_id and parent's id. So, use # instance.pk (that is, parent_ptr_id) when asked for instance.id. if field.primary_key: possible_parent_link = opts.get_ancestor_link(field.model) if (not possible_parent_link or possible_parent_link.primary_key or possible_parent_link.model._meta.abstract): ret.append(instance.pk) continue ret.append(getattr(instance, field.attname)) return tuple(ret) def get_attname_column(self): attname, column = super().get_attname_column() return attname, None def get_joining_columns(self, reverse_join=False): source = self.reverse_related_fields if reverse_join else self.related_fields return tuple((lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source) def get_reverse_joining_columns(self): return self.get_joining_columns(reverse_join=True) def get_extra_descriptor_filter(self, instance): """ Return an extra filter condition for related object fetching when user does 'instance.fieldname', that is the extra filter is used in the descriptor of the field. The filter should be either a dict usable in .filter(**kwargs) call or a Q-object. The condition will be ANDed together with the relation's joining columns. A parallel method is get_extra_restriction() which is used in JOIN and subquery conditions. """ return {} def get_extra_restriction(self, where_class, alias, related_alias): """ Return a pair condition used for joining and subquery pushdown. The condition is something that responds to as_sql(compiler, connection) method. Note that currently referring both the 'alias' and 'related_alias' will not work in some conditions, like subquery pushdown. A parallel method is get_extra_descriptor_filter() which is used in instance.fieldname related object fetching. """ return None def get_path_info(self, filtered_relation=None): """Get path from this field to the related model.""" opts = self.remote_field.model._meta from_opts = self.model._meta return [PathInfo( from_opts=from_opts, to_opts=opts, target_fields=self.foreign_related_fields, join_field=self, m2m=False, direct=True, filtered_relation=filtered_relation, )] def get_reverse_path_info(self, filtered_relation=None): """Get path from the related model to this field's model.""" opts = self.model._meta from_opts = self.remote_field.model._meta return [PathInfo( from_opts=from_opts, to_opts=opts, target_fields=(opts.pk,), join_field=self.remote_field, m2m=not self.unique, direct=False, filtered_relation=filtered_relation, )] @classmethod @functools.lru_cache(maxsize=None) def get_lookups(cls): bases = inspect.getmro(cls) bases = bases[:bases.index(ForeignObject) + 1] class_lookups = [parent.__dict__.get('class_lookups', {}) for parent in bases] return cls.merge_dicts(class_lookups) def contribute_to_class(self, cls, name, private_only=False, **kwargs): super().contribute_to_class(cls, name, private_only=private_only, **kwargs) setattr(cls, self.name, self.forward_related_accessor_class(self)) def contribute_to_related_class(self, cls, related): # Internal FK's - i.e., those with a related name ending with '+' - # and swapped models don't get a related descriptor. if not self.remote_field.is_hidden() and not related.related_model._meta.swapped: setattr(cls._meta.concrete_model, related.get_accessor_name(), self.related_accessor_class(related)) # While 'limit_choices_to' might be a callable, simply pass # it along for later - this is too early because it's still # model load time. if self.remote_field.limit_choices_to: cls._meta.related_fkey_lookups.append(self.remote_field.limit_choices_to) ForeignObject.register_lookup(RelatedIn) ForeignObject.register_lookup(RelatedExact) ForeignObject.register_lookup(RelatedLessThan) ForeignObject.register_lookup(RelatedGreaterThan) ForeignObject.register_lookup(RelatedGreaterThanOrEqual) ForeignObject.register_lookup(RelatedLessThanOrEqual) ForeignObject.register_lookup(RelatedIsNull) class ForeignKey(ForeignObject): """ Provide a many-to-one relation by adding a column to the local model to hold the remote value. By default ForeignKey will target the pk of the remote model but this behavior can be changed by using the ``to_field`` argument. """ descriptor_class = ForeignKeyDeferredAttribute # Field flags many_to_many = False many_to_one = True one_to_many = False one_to_one = False rel_class = ManyToOneRel empty_strings_allowed = False default_error_messages = { 'invalid': _('%(model)s instance with %(field)s %(value)r does not exist.') } description = _("Foreign Key (type determined by related field)") def __init__(self, to, on_delete, related_name=None, related_query_name=None, limit_choices_to=None, parent_link=False, to_field=None, db_constraint=True, **kwargs): try: to._meta.model_name except AttributeError: assert isinstance(to, str), ( "%s(%r) is invalid. First parameter to ForeignKey must be " "either a model, a model name, or the string %r" % ( self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT, ) ) else: # For backwards compatibility purposes, we need to *try* and set # the to_field during FK construction. It won't be guaranteed to # be correct until contribute_to_class is called. Refs #12190. to_field = to_field or (to._meta.pk and to._meta.pk.name) if not callable(on_delete): raise TypeError('on_delete must be callable.') kwargs['rel'] = self.rel_class( self, to, to_field, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, parent_link=parent_link, on_delete=on_delete, ) kwargs.setdefault('db_index', True) super().__init__( to, on_delete, from_fields=[RECURSIVE_RELATIONSHIP_CONSTANT], to_fields=[to_field], **kwargs, ) self.db_constraint = db_constraint def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_on_delete(), *self._check_unique(), ] def _check_on_delete(self): on_delete = getattr(self.remote_field, 'on_delete', None) if on_delete == SET_NULL and not self.null: return [ checks.Error( 'Field specifies on_delete=SET_NULL, but cannot be null.', hint='Set null=True argument on the field, or change the on_delete rule.', obj=self, id='fields.E320', ) ] elif on_delete == SET_DEFAULT and not self.has_default(): return [ checks.Error( 'Field specifies on_delete=SET_DEFAULT, but has no default value.', hint='Set a default value, or change the on_delete rule.', obj=self, id='fields.E321', ) ] else: return [] def _check_unique(self, **kwargs): return [ checks.Warning( 'Setting unique=True on a ForeignKey has the same effect as using a OneToOneField.', hint='ForeignKey(unique=True) is usually better served by a OneToOneField.', obj=self, id='fields.W342', ) ] if self.unique else [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs['to_fields'] del kwargs['from_fields'] # Handle the simpler arguments if self.db_index: del kwargs['db_index'] else: kwargs['db_index'] = False if self.db_constraint is not True: kwargs['db_constraint'] = self.db_constraint # Rel needs more work. to_meta = getattr(self.remote_field.model, "_meta", None) if self.remote_field.field_name and ( not to_meta or (to_meta.pk and self.remote_field.field_name != to_meta.pk.name)): kwargs['to_field'] = self.remote_field.field_name return name, path, args, kwargs def to_python(self, value): return self.target_field.to_python(value) @property def target_field(self): return self.foreign_related_fields[0] def get_reverse_path_info(self, filtered_relation=None): """Get path from the related model to this field's model.""" opts = self.model._meta from_opts = self.remote_field.model._meta return [PathInfo( from_opts=from_opts, to_opts=opts, target_fields=(opts.pk,), join_field=self.remote_field, m2m=not self.unique, direct=False, filtered_relation=filtered_relation, )] def validate(self, value, model_instance): if self.remote_field.parent_link: return super().validate(value, model_instance) if value is None: return using = router.db_for_read(self.remote_field.model, instance=model_instance) qs = self.remote_field.model._base_manager.using(using).filter( **{self.remote_field.field_name: value} ) qs = qs.complex_filter(self.get_limit_choices_to()) if not qs.exists(): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={ 'model': self.remote_field.model._meta.verbose_name, 'pk': value, 'field': self.remote_field.field_name, 'value': value, }, # 'pk' is included for backwards compatibility ) def resolve_related_fields(self): related_fields = super().resolve_related_fields() for from_field, to_field in related_fields: if to_field and to_field.model != self.remote_field.model._meta.concrete_model: raise exceptions.FieldError( "'%s.%s' refers to field '%s' which is not local to model " "'%s'." % ( self.model._meta.label, self.name, to_field.name, self.remote_field.model._meta.concrete_model._meta.label, ) ) return related_fields def get_attname(self): return '%s_id' % self.name def get_attname_column(self): attname = self.get_attname() column = self.db_column or attname return attname, column def get_default(self): """Return the to_field if the default value is an object.""" field_default = super().get_default() if isinstance(field_default, self.remote_field.model): return getattr(field_default, self.target_field.attname) return field_default def get_db_prep_save(self, value, connection): if value is None or (value == '' and (not self.target_field.empty_strings_allowed or connection.features.interprets_empty_strings_as_nulls)): return None else: return self.target_field.get_db_prep_save(value, connection=connection) def get_db_prep_value(self, value, connection, prepared=False): return self.target_field.get_db_prep_value(value, connection, prepared) def get_prep_value(self, value): return self.target_field.get_prep_value(value) def contribute_to_related_class(self, cls, related): super().contribute_to_related_class(cls, related) if self.remote_field.field_name is None: self.remote_field.field_name = cls._meta.pk.name def formfield(self, *, using=None, **kwargs): if isinstance(self.remote_field.model, str): raise ValueError("Cannot create form field for %r yet, because " "its related model %r has not been loaded yet" % (self.name, self.remote_field.model)) return super().formfield(**{ 'form_class': forms.ModelChoiceField, 'queryset': self.remote_field.model._default_manager.using(using), 'to_field_name': self.remote_field.field_name, **kwargs, 'blank': self.blank, }) def db_check(self, connection): return [] def db_type(self, connection): return self.target_field.rel_db_type(connection=connection) def db_parameters(self, connection): return {"type": self.db_type(connection), "check": self.db_check(connection)} def convert_empty_strings(self, value, expression, connection): if (not value) and isinstance(value, str): return None return value def get_db_converters(self, connection): converters = super().get_db_converters(connection) if connection.features.interprets_empty_strings_as_nulls: converters += [self.convert_empty_strings] return converters def get_col(self, alias, output_field=None): if output_field is None: output_field = self.target_field while isinstance(output_field, ForeignKey): output_field = output_field.target_field if output_field is self: raise ValueError('Cannot resolve output_field.') return super().get_col(alias, output_field) class OneToOneField(ForeignKey): """ A OneToOneField is essentially the same as a ForeignKey, with the exception that it always carries a "unique" constraint with it and the reverse relation always returns the object pointed to (since there will only ever be one), rather than returning a list. """ # Field flags many_to_many = False many_to_one = False one_to_many = False one_to_one = True related_accessor_class = ReverseOneToOneDescriptor forward_related_accessor_class = ForwardOneToOneDescriptor rel_class = OneToOneRel description = _("One-to-one relationship") def __init__(self, to, on_delete, to_field=None, **kwargs): kwargs['unique'] = True super().__init__(to, on_delete, to_field=to_field, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if "unique" in kwargs: del kwargs['unique'] return name, path, args, kwargs def formfield(self, **kwargs): if self.remote_field.parent_link: return None return super().formfield(**kwargs) def save_form_data(self, instance, data): if isinstance(data, self.remote_field.model): setattr(instance, self.name, data) else: setattr(instance, self.attname, data) # Remote field object must be cleared otherwise Model.save() # will reassign attname using the related object pk. if data is None: setattr(instance, self.name, data) def _check_unique(self, **kwargs): # Override ForeignKey since check isn't applicable here. return [] def create_many_to_many_intermediary_model(field, klass): from django.db import models def set_managed(model, related, through): through._meta.managed = model._meta.managed or related._meta.managed to_model = resolve_relation(klass, field.remote_field.model) name = '%s_%s' % (klass._meta.object_name, field.name) lazy_related_operation(set_managed, klass, to_model, name) to = make_model_tuple(to_model)[1] from_ = klass._meta.model_name if to == from_: to = 'to_%s' % to from_ = 'from_%s' % from_ meta = type('Meta', (), { 'db_table': field._get_m2m_db_table(klass._meta), 'auto_created': klass, 'app_label': klass._meta.app_label, 'db_tablespace': klass._meta.db_tablespace, 'unique_together': (from_, to), 'verbose_name': _('%(from)s-%(to)s relationship') % {'from': from_, 'to': to}, 'verbose_name_plural': _('%(from)s-%(to)s relationships') % {'from': from_, 'to': to}, 'apps': field.model._meta.apps, }) # Construct and return the new class. return type(name, (models.Model,), { 'Meta': meta, '__module__': klass.__module__, from_: models.ForeignKey( klass, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.remote_field.db_constraint, on_delete=CASCADE, ), to: models.ForeignKey( to_model, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.remote_field.db_constraint, on_delete=CASCADE, ) }) class ManyToManyField(RelatedField): """ Provide a many-to-many relation by using an intermediary model that holds two ForeignKey fields pointed at the two sides of the relation. Unless a ``through`` model was provided, ManyToManyField will use the create_many_to_many_intermediary_model factory to automatically generate the intermediary model. """ # Field flags many_to_many = True many_to_one = False one_to_many = False one_to_one = False rel_class = ManyToManyRel description = _("Many-to-many relationship") def __init__(self, to, related_name=None, related_query_name=None, limit_choices_to=None, symmetrical=None, through=None, through_fields=None, db_constraint=True, db_table=None, swappable=True, **kwargs): try: to._meta except AttributeError: assert isinstance(to, str), ( "%s(%r) is invalid. First parameter to ManyToManyField must be " "either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT) ) if symmetrical is None: symmetrical = (to == RECURSIVE_RELATIONSHIP_CONSTANT) if through is not None: assert db_table is None, ( "Cannot specify a db_table if an intermediary model is used." ) kwargs['rel'] = self.rel_class( self, to, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, symmetrical=symmetrical, through=through, through_fields=through_fields, db_constraint=db_constraint, ) self.has_null_arg = 'null' in kwargs super().__init__(**kwargs) self.db_table = db_table self.swappable = swappable def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_unique(**kwargs), *self._check_relationship_model(**kwargs), *self._check_ignored_options(**kwargs), *self._check_table_uniqueness(**kwargs), ] def _check_unique(self, **kwargs): if self.unique: return [ checks.Error( 'ManyToManyFields cannot be unique.', obj=self, id='fields.E330', ) ] return [] def _check_ignored_options(self, **kwargs): warnings = [] if self.has_null_arg: warnings.append( checks.Warning( 'null has no effect on ManyToManyField.', obj=self, id='fields.W340', ) ) if self._validators: warnings.append( checks.Warning( 'ManyToManyField does not support validators.', obj=self, id='fields.W341', ) ) if (self.remote_field.limit_choices_to and self.remote_field.through and not self.remote_field.through._meta.auto_created): warnings.append( checks.Warning( 'limit_choices_to has no effect on ManyToManyField ' 'with a through model.', obj=self, id='fields.W343', ) ) return warnings def _check_relationship_model(self, from_model=None, **kwargs): if hasattr(self.remote_field.through, '_meta'): qualified_model_name = "%s.%s" % ( self.remote_field.through._meta.app_label, self.remote_field.through.__name__) else: qualified_model_name = self.remote_field.through errors = [] if self.remote_field.through not in self.opts.apps.get_models(include_auto_created=True): # The relationship model is not installed. errors.append( checks.Error( "Field specifies a many-to-many relation through model " "'%s', which has not been installed." % qualified_model_name, obj=self, id='fields.E331', ) ) else: assert from_model is not None, ( "ManyToManyField with intermediate " "tables cannot be checked if you don't pass the model " "where the field is attached to." ) # Set some useful local variables to_model = resolve_relation(from_model, self.remote_field.model) from_model_name = from_model._meta.object_name if isinstance(to_model, str): to_model_name = to_model else: to_model_name = to_model._meta.object_name relationship_model_name = self.remote_field.through._meta.object_name self_referential = from_model == to_model # Count foreign keys in intermediate model if self_referential: seen_self = sum( from_model == getattr(field.remote_field, 'model', None) for field in self.remote_field.through._meta.fields ) if seen_self > 2 and not self.remote_field.through_fields: errors.append( checks.Error( "The model is used as an intermediate model by " "'%s', but it has more than two foreign keys " "to '%s', which is ambiguous. You must specify " "which two foreign keys Django should use via the " "through_fields keyword argument." % (self, from_model_name), hint="Use through_fields to specify which two foreign keys Django should use.", obj=self.remote_field.through, id='fields.E333', ) ) else: # Count foreign keys in relationship model seen_from = sum( from_model == getattr(field.remote_field, 'model', None) for field in self.remote_field.through._meta.fields ) seen_to = sum( to_model == getattr(field.remote_field, 'model', None) for field in self.remote_field.through._meta.fields ) if seen_from > 1 and not self.remote_field.through_fields: errors.append( checks.Error( ("The model is used as an intermediate model by " "'%s', but it has more than one foreign key " "from '%s', which is ambiguous. You must specify " "which foreign key Django should use via the " "through_fields keyword argument.") % (self, from_model_name), hint=( 'If you want to create a recursive relationship, ' 'use ManyToManyField("%s", through="%s").' ) % ( RECURSIVE_RELATIONSHIP_CONSTANT, relationship_model_name, ), obj=self, id='fields.E334', ) ) if seen_to > 1 and not self.remote_field.through_fields: errors.append( checks.Error( "The model is used as an intermediate model by " "'%s', but it has more than one foreign key " "to '%s', which is ambiguous. You must specify " "which foreign key Django should use via the " "through_fields keyword argument." % (self, to_model_name), hint=( 'If you want to create a recursive relationship, ' 'use ManyToManyField("%s", through="%s").' ) % ( RECURSIVE_RELATIONSHIP_CONSTANT, relationship_model_name, ), obj=self, id='fields.E335', ) ) if seen_from == 0 or seen_to == 0: errors.append( checks.Error( "The model is used as an intermediate model by " "'%s', but it does not have a foreign key to '%s' or '%s'." % ( self, from_model_name, to_model_name ), obj=self.remote_field.through, id='fields.E336', ) ) # Validate `through_fields`. if self.remote_field.through_fields is not None: # Validate that we're given an iterable of at least two items # and that none of them is "falsy". if not (len(self.remote_field.through_fields) >= 2 and self.remote_field.through_fields[0] and self.remote_field.through_fields[1]): errors.append( checks.Error( "Field specifies 'through_fields' but does not provide " "the names of the two link fields that should be used " "for the relation through model '%s'." % qualified_model_name, hint="Make sure you specify 'through_fields' as through_fields=('field1', 'field2')", obj=self, id='fields.E337', ) ) # Validate the given through fields -- they should be actual # fields on the through model, and also be foreign keys to the # expected models. else: assert from_model is not None, ( "ManyToManyField with intermediate " "tables cannot be checked if you don't pass the model " "where the field is attached to." ) source, through, target = from_model, self.remote_field.through, self.remote_field.model source_field_name, target_field_name = self.remote_field.through_fields[:2] for field_name, related_model in ((source_field_name, source), (target_field_name, target)): possible_field_names = [] for f in through._meta.fields: if hasattr(f, 'remote_field') and getattr(f.remote_field, 'model', None) == related_model: possible_field_names.append(f.name) if possible_field_names: hint = "Did you mean one of the following foreign keys to '%s': %s?" % ( related_model._meta.object_name, ', '.join(possible_field_names), ) else: hint = None try: field = through._meta.get_field(field_name) except exceptions.FieldDoesNotExist: errors.append( checks.Error( "The intermediary model '%s' has no field '%s'." % (qualified_model_name, field_name), hint=hint, obj=self, id='fields.E338', ) ) else: if not (hasattr(field, 'remote_field') and getattr(field.remote_field, 'model', None) == related_model): errors.append( checks.Error( "'%s.%s' is not a foreign key to '%s'." % ( through._meta.object_name, field_name, related_model._meta.object_name, ), hint=hint, obj=self, id='fields.E339', ) ) return errors def _check_table_uniqueness(self, **kwargs): if isinstance(self.remote_field.through, str) or not self.remote_field.through._meta.managed: return [] registered_tables = { model._meta.db_table: model for model in self.opts.apps.get_models(include_auto_created=True) if model != self.remote_field.through and model._meta.managed } m2m_db_table = self.m2m_db_table() model = registered_tables.get(m2m_db_table) # The second condition allows multiple m2m relations on a model if # some point to a through model that proxies another through model. if model and model._meta.concrete_model != self.remote_field.through._meta.concrete_model: if model._meta.auto_created: def _get_field_name(model): for field in model._meta.auto_created._meta.many_to_many: if field.remote_field.through is model: return field.name opts = model._meta.auto_created._meta clashing_obj = '%s.%s' % (opts.label, _get_field_name(model)) else: clashing_obj = model._meta.label if settings.DATABASE_ROUTERS: error_class, error_id = checks.Warning, 'fields.W344' error_hint = ( 'You have configured settings.DATABASE_ROUTERS. Verify ' 'that the table of %r is correctly routed to a separate ' 'database.' % clashing_obj ) else: error_class, error_id = checks.Error, 'fields.E340' error_hint = None return [ error_class( "The field's intermediary table '%s' clashes with the " "table name of '%s'." % (m2m_db_table, clashing_obj), obj=self, hint=error_hint, id=error_id, ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() # Handle the simpler arguments. if self.db_table is not None: kwargs['db_table'] = self.db_table if self.remote_field.db_constraint is not True: kwargs['db_constraint'] = self.remote_field.db_constraint # Rel needs more work. if isinstance(self.remote_field.model, str): kwargs['to'] = self.remote_field.model else: kwargs['to'] = "%s.%s" % ( self.remote_field.model._meta.app_label, self.remote_field.model._meta.object_name, ) if getattr(self.remote_field, 'through', None) is not None: if isinstance(self.remote_field.through, str): kwargs['through'] = self.remote_field.through elif not self.remote_field.through._meta.auto_created: kwargs['through'] = "%s.%s" % ( self.remote_field.through._meta.app_label, self.remote_field.through._meta.object_name, ) # If swappable is True, then see if we're actually pointing to the target # of a swap. swappable_setting = self.swappable_setting if swappable_setting is not None: # If it's already a settings reference, error. if hasattr(kwargs['to'], "setting_name"): if kwargs['to'].setting_name != swappable_setting: raise ValueError( "Cannot deconstruct a ManyToManyField pointing to a " "model that is swapped in place of more than one model " "(%s and %s)" % (kwargs['to'].setting_name, swappable_setting) ) kwargs['to'] = SettingsReference( kwargs['to'], swappable_setting, ) return name, path, args, kwargs def _get_path_info(self, direct=False, filtered_relation=None): """Called by both direct and indirect m2m traversal.""" int_model = self.remote_field.through linkfield1 = int_model._meta.get_field(self.m2m_field_name()) linkfield2 = int_model._meta.get_field(self.m2m_reverse_field_name()) if direct: join1infos = linkfield1.get_reverse_path_info() join2infos = linkfield2.get_path_info(filtered_relation) else: join1infos = linkfield2.get_reverse_path_info() join2infos = linkfield1.get_path_info(filtered_relation) # Get join infos between the last model of join 1 and the first model # of join 2. Assume the only reason these may differ is due to model # inheritance. join1_final = join1infos[-1].to_opts join2_initial = join2infos[0].from_opts if join1_final is join2_initial: intermediate_infos = [] elif issubclass(join1_final.model, join2_initial.model): intermediate_infos = join1_final.get_path_to_parent(join2_initial.model) else: intermediate_infos = join2_initial.get_path_from_parent(join1_final.model) return [*join1infos, *intermediate_infos, *join2infos] def get_path_info(self, filtered_relation=None): return self._get_path_info(direct=True, filtered_relation=filtered_relation) def get_reverse_path_info(self, filtered_relation=None): return self._get_path_info(direct=False, filtered_relation=filtered_relation) def _get_m2m_db_table(self, opts): """ Function that can be curried to provide the m2m table name for this relation. """ if self.remote_field.through is not None: return self.remote_field.through._meta.db_table elif self.db_table: return self.db_table else: m2m_table_name = '%s_%s' % (utils.strip_quotes(opts.db_table), self.name) return utils.truncate_name(m2m_table_name, connection.ops.max_name_length()) def _get_m2m_attr(self, related, attr): """ Function that can be curried to provide the source accessor or DB column name for the m2m table. """ cache_attr = '_m2m_%s_cache' % attr if hasattr(self, cache_attr): return getattr(self, cache_attr) if self.remote_field.through_fields is not None: link_field_name = self.remote_field.through_fields[0] else: link_field_name = None for f in self.remote_field.through._meta.fields: if (f.is_relation and f.remote_field.model == related.related_model and (link_field_name is None or link_field_name == f.name)): setattr(self, cache_attr, getattr(f, attr)) return getattr(self, cache_attr) def _get_m2m_reverse_attr(self, related, attr): """ Function that can be curried to provide the related accessor or DB column name for the m2m table. """ cache_attr = '_m2m_reverse_%s_cache' % attr if hasattr(self, cache_attr): return getattr(self, cache_attr) found = False if self.remote_field.through_fields is not None: link_field_name = self.remote_field.through_fields[1] else: link_field_name = None for f in self.remote_field.through._meta.fields: if f.is_relation and f.remote_field.model == related.model: if link_field_name is None and related.related_model == related.model: # If this is an m2m-intermediate to self, # the first foreign key you find will be # the source column. Keep searching for # the second foreign key. if found: setattr(self, cache_attr, getattr(f, attr)) break else: found = True elif link_field_name is None or link_field_name == f.name: setattr(self, cache_attr, getattr(f, attr)) break return getattr(self, cache_attr) def contribute_to_class(self, cls, name, **kwargs): # To support multiple relations to self, it's useful to have a non-None # related name on symmetrical relations for internal reasons. The # concept doesn't make a lot of sense externally ("you want me to # specify *what* on my non-reversible relation?!"), so we set it up # automatically. The funky name reduces the chance of an accidental # clash. if self.remote_field.symmetrical and ( self.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT or self.remote_field.model == cls._meta.object_name ): self.remote_field.related_name = "%s_rel_+" % name elif self.remote_field.is_hidden(): # If the backwards relation is disabled, replace the original # related_name with one generated from the m2m field name. Django # still uses backwards relations internally and we need to avoid # clashes between multiple m2m fields with related_name == '+'. self.remote_field.related_name = "_%s_%s_+" % (cls.__name__.lower(), name) super().contribute_to_class(cls, name, **kwargs) # The intermediate m2m model is not auto created if: # 1) There is a manually specified intermediate, or # 2) The class owning the m2m field is abstract. # 3) The class owning the m2m field has been swapped out. if not cls._meta.abstract: if self.remote_field.through: def resolve_through_model(_, model, field): field.remote_field.through = model lazy_related_operation(resolve_through_model, cls, self.remote_field.through, field=self) elif not cls._meta.swapped: self.remote_field.through = create_many_to_many_intermediary_model(self, cls) # Add the descriptor for the m2m relation. setattr(cls, self.name, ManyToManyDescriptor(self.remote_field, reverse=False)) # Set up the accessor for the m2m table name for the relation. self.m2m_db_table = partial(self._get_m2m_db_table, cls._meta) def contribute_to_related_class(self, cls, related): # Internal M2Ms (i.e., those with a related name ending with '+') # and swapped models don't get a related descriptor. if not self.remote_field.is_hidden() and not related.related_model._meta.swapped: setattr(cls, related.get_accessor_name(), ManyToManyDescriptor(self.remote_field, reverse=True)) # Set up the accessors for the column names on the m2m table. self.m2m_column_name = partial(self._get_m2m_attr, related, 'column') self.m2m_reverse_name = partial(self._get_m2m_reverse_attr, related, 'column') self.m2m_field_name = partial(self._get_m2m_attr, related, 'name') self.m2m_reverse_field_name = partial(self._get_m2m_reverse_attr, related, 'name') get_m2m_rel = partial(self._get_m2m_attr, related, 'remote_field') self.m2m_target_field_name = lambda: get_m2m_rel().field_name get_m2m_reverse_rel = partial(self._get_m2m_reverse_attr, related, 'remote_field') self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name def set_attributes_from_rel(self): pass def value_from_object(self, obj): return [] if obj.pk is None else list(getattr(obj, self.attname).all()) def save_form_data(self, instance, data): getattr(instance, self.attname).set(data) def formfield(self, *, using=None, **kwargs): defaults = { 'form_class': forms.ModelMultipleChoiceField, 'queryset': self.remote_field.model._default_manager.using(using), **kwargs, } # If initial is passed in, it's a list of related objects, but the # MultipleChoiceField takes a list of IDs. if defaults.get('initial') is not None: initial = defaults['initial'] if callable(initial): initial = initial() defaults['initial'] = [i.pk for i in initial] return super().formfield(**defaults) def db_check(self, connection): return None def db_type(self, connection): # A ManyToManyField is not represented by a single column, # so return None. return None def db_parameters(self, connection): return {"type": None, "check": None}
7a6291327f8144929ad6c8365c6ec98874b4de0814bef9a5c3796018ba2e94e3
from django.apps.registry import Apps from django.contrib.contenttypes.fields import GenericForeignKey from django.db import models from django.db.migrations.exceptions import InvalidBasesError from django.db.migrations.operations import ( AddField, AlterField, DeleteModel, RemoveField, ) from django.db.migrations.state import ( ModelState, ProjectState, get_related_models_recursive, ) from django.test import SimpleTestCase, override_settings from django.test.utils import isolate_apps from .models import ( FoodManager, FoodQuerySet, ModelWithCustomBase, NoMigrationFoodManager, UnicodeModel, ) class StateTests(SimpleTestCase): """ Tests state construction, rendering and modification by operations. """ def test_create(self): """ Tests making a ProjectState from an Apps """ new_apps = Apps(["migrations"]) class Author(models.Model): name = models.CharField(max_length=255) bio = models.TextField() age = models.IntegerField(blank=True, null=True) class Meta: app_label = "migrations" apps = new_apps unique_together = ["name", "bio"] index_together = ["bio", "age"] class AuthorProxy(Author): class Meta: app_label = "migrations" apps = new_apps proxy = True ordering = ["name"] class SubAuthor(Author): width = models.FloatField(null=True) class Meta: app_label = "migrations" apps = new_apps class Book(models.Model): title = models.CharField(max_length=1000) author = models.ForeignKey(Author, models.CASCADE) contributors = models.ManyToManyField(Author) class Meta: app_label = "migrations" apps = new_apps verbose_name = "tome" db_table = "test_tome" indexes = [models.Index(fields=['title'])] class Food(models.Model): food_mgr = FoodManager('a', 'b') food_qs = FoodQuerySet.as_manager() food_no_mgr = NoMigrationFoodManager('x', 'y') class Meta: app_label = "migrations" apps = new_apps class FoodNoManagers(models.Model): class Meta: app_label = "migrations" apps = new_apps class FoodNoDefaultManager(models.Model): food_no_mgr = NoMigrationFoodManager('x', 'y') food_mgr = FoodManager('a', 'b') food_qs = FoodQuerySet.as_manager() class Meta: app_label = "migrations" apps = new_apps mgr1 = FoodManager('a', 'b') mgr2 = FoodManager('x', 'y', c=3, d=4) class FoodOrderedManagers(models.Model): # The managers on this model should be ordered by their creation # counter and not by the order in model body food_no_mgr = NoMigrationFoodManager('x', 'y') food_mgr2 = mgr2 food_mgr1 = mgr1 class Meta: app_label = "migrations" apps = new_apps project_state = ProjectState.from_apps(new_apps) author_state = project_state.models['migrations', 'author'] author_proxy_state = project_state.models['migrations', 'authorproxy'] sub_author_state = project_state.models['migrations', 'subauthor'] book_state = project_state.models['migrations', 'book'] food_state = project_state.models['migrations', 'food'] food_no_managers_state = project_state.models['migrations', 'foodnomanagers'] food_no_default_manager_state = project_state.models['migrations', 'foodnodefaultmanager'] food_order_manager_state = project_state.models['migrations', 'foodorderedmanagers'] book_index = models.Index(fields=['title']) book_index.set_name_with_model(Book) self.assertEqual(author_state.app_label, "migrations") self.assertEqual(author_state.name, "Author") self.assertEqual(list(author_state.fields), ["id", "name", "bio", "age"]) self.assertEqual(author_state.fields['name'].max_length, 255) self.assertIs(author_state.fields['bio'].null, False) self.assertIs(author_state.fields['age'].null, True) self.assertEqual( author_state.options, { "unique_together": {("name", "bio")}, "index_together": {("bio", "age")}, "indexes": [], "constraints": [], } ) self.assertEqual(author_state.bases, (models.Model,)) self.assertEqual(book_state.app_label, "migrations") self.assertEqual(book_state.name, "Book") self.assertEqual(list(book_state.fields), ["id", "title", "author", "contributors"]) self.assertEqual(book_state.fields['title'].max_length, 1000) self.assertIs(book_state.fields['author'].null, False) self.assertEqual(book_state.fields['contributors'].__class__.__name__, 'ManyToManyField') self.assertEqual( book_state.options, {"verbose_name": "tome", "db_table": "test_tome", "indexes": [book_index], "constraints": []}, ) self.assertEqual(book_state.bases, (models.Model,)) self.assertEqual(author_proxy_state.app_label, "migrations") self.assertEqual(author_proxy_state.name, "AuthorProxy") self.assertEqual(author_proxy_state.fields, {}) self.assertEqual( author_proxy_state.options, {"proxy": True, "ordering": ["name"], "indexes": [], "constraints": []}, ) self.assertEqual(author_proxy_state.bases, ("migrations.author",)) self.assertEqual(sub_author_state.app_label, "migrations") self.assertEqual(sub_author_state.name, "SubAuthor") self.assertEqual(len(sub_author_state.fields), 2) self.assertEqual(sub_author_state.bases, ("migrations.author",)) # The default manager is used in migrations self.assertEqual([name for name, mgr in food_state.managers], ['food_mgr']) self.assertTrue(all(isinstance(name, str) for name, mgr in food_state.managers)) self.assertEqual(food_state.managers[0][1].args, ('a', 'b', 1, 2)) # No explicit managers defined. Migrations will fall back to the default self.assertEqual(food_no_managers_state.managers, []) # food_mgr is used in migration but isn't the default mgr, hence add the # default self.assertEqual([name for name, mgr in food_no_default_manager_state.managers], ['food_no_mgr', 'food_mgr']) self.assertTrue(all(isinstance(name, str) for name, mgr in food_no_default_manager_state.managers)) self.assertEqual(food_no_default_manager_state.managers[0][1].__class__, models.Manager) self.assertIsInstance(food_no_default_manager_state.managers[1][1], FoodManager) self.assertEqual([name for name, mgr in food_order_manager_state.managers], ['food_mgr1', 'food_mgr2']) self.assertTrue(all(isinstance(name, str) for name, mgr in food_order_manager_state.managers)) self.assertEqual([mgr.args for name, mgr in food_order_manager_state.managers], [('a', 'b', 1, 2), ('x', 'y', 3, 4)]) def test_custom_default_manager_added_to_the_model_state(self): """ When the default manager of the model is a custom manager, it needs to be added to the model state. """ new_apps = Apps(['migrations']) custom_manager = models.Manager() class Author(models.Model): objects = models.TextField() authors = custom_manager class Meta: app_label = 'migrations' apps = new_apps project_state = ProjectState.from_apps(new_apps) author_state = project_state.models['migrations', 'author'] self.assertEqual(author_state.managers, [('authors', custom_manager)]) def test_custom_default_manager_named_objects_with_false_migration_flag(self): """ When a manager is added with a name of 'objects' but it does not have `use_in_migrations = True`, no migration should be added to the model state (#26643). """ new_apps = Apps(['migrations']) class Author(models.Model): objects = models.Manager() class Meta: app_label = 'migrations' apps = new_apps project_state = ProjectState.from_apps(new_apps) author_state = project_state.models['migrations', 'author'] self.assertEqual(author_state.managers, []) def test_no_duplicate_managers(self): """ When a manager is added with `use_in_migrations = True` and a parent model had a manager with the same name and `use_in_migrations = True`, the parent's manager shouldn't appear in the model state (#26881). """ new_apps = Apps(['migrations']) class PersonManager(models.Manager): use_in_migrations = True class Person(models.Model): objects = PersonManager() class Meta: abstract = True class BossManager(PersonManager): use_in_migrations = True class Boss(Person): objects = BossManager() class Meta: app_label = 'migrations' apps = new_apps project_state = ProjectState.from_apps(new_apps) boss_state = project_state.models['migrations', 'boss'] self.assertEqual(boss_state.managers, [('objects', Boss.objects)]) def test_custom_default_manager(self): new_apps = Apps(['migrations']) class Author(models.Model): manager1 = models.Manager() manager2 = models.Manager() class Meta: app_label = 'migrations' apps = new_apps default_manager_name = 'manager2' project_state = ProjectState.from_apps(new_apps) author_state = project_state.models['migrations', 'author'] self.assertEqual(author_state.options['default_manager_name'], 'manager2') self.assertEqual(author_state.managers, [('manager2', Author.manager1)]) def test_custom_base_manager(self): new_apps = Apps(['migrations']) class Author(models.Model): manager1 = models.Manager() manager2 = models.Manager() class Meta: app_label = 'migrations' apps = new_apps base_manager_name = 'manager2' class Author2(models.Model): manager1 = models.Manager() manager2 = models.Manager() class Meta: app_label = 'migrations' apps = new_apps base_manager_name = 'manager1' project_state = ProjectState.from_apps(new_apps) author_state = project_state.models['migrations', 'author'] self.assertEqual(author_state.options['base_manager_name'], 'manager2') self.assertEqual(author_state.managers, [ ('manager1', Author.manager1), ('manager2', Author.manager2), ]) author2_state = project_state.models['migrations', 'author2'] self.assertEqual(author2_state.options['base_manager_name'], 'manager1') self.assertEqual(author2_state.managers, [ ('manager1', Author2.manager1), ]) def test_apps_bulk_update(self): """ StateApps.bulk_update() should update apps.ready to False and reset the value afterwards. """ project_state = ProjectState() apps = project_state.apps with apps.bulk_update(): self.assertFalse(apps.ready) self.assertTrue(apps.ready) with self.assertRaises(ValueError): with apps.bulk_update(): self.assertFalse(apps.ready) raise ValueError() self.assertTrue(apps.ready) def test_render(self): """ Tests rendering a ProjectState into an Apps. """ project_state = ProjectState() project_state.add_model(ModelState( app_label="migrations", name="Tag", fields=[ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=100)), ("hidden", models.BooleanField()), ], )) project_state.add_model(ModelState( app_label="migrations", name="SubTag", fields=[ ('tag_ptr', models.OneToOneField( 'migrations.Tag', models.CASCADE, auto_created=True, parent_link=True, primary_key=True, to_field='id', serialize=False, )), ("awesome", models.BooleanField()), ], bases=("migrations.Tag",), )) base_mgr = models.Manager() mgr1 = FoodManager('a', 'b') mgr2 = FoodManager('x', 'y', c=3, d=4) project_state.add_model(ModelState( app_label="migrations", name="Food", fields=[ ("id", models.AutoField(primary_key=True)), ], managers=[ # The ordering we really want is objects, mgr1, mgr2 ('default', base_mgr), ('food_mgr2', mgr2), ('food_mgr1', mgr1), ] )) new_apps = project_state.apps self.assertEqual(new_apps.get_model("migrations", "Tag")._meta.get_field("name").max_length, 100) self.assertIs(new_apps.get_model("migrations", "Tag")._meta.get_field("hidden").null, False) self.assertEqual(len(new_apps.get_model("migrations", "SubTag")._meta.local_fields), 2) Food = new_apps.get_model("migrations", "Food") self.assertEqual([mgr.name for mgr in Food._meta.managers], ['default', 'food_mgr1', 'food_mgr2']) self.assertTrue(all(isinstance(mgr.name, str) for mgr in Food._meta.managers)) self.assertEqual([mgr.__class__ for mgr in Food._meta.managers], [models.Manager, FoodManager, FoodManager]) def test_render_model_inheritance(self): class Book(models.Model): title = models.CharField(max_length=1000) class Meta: app_label = "migrations" apps = Apps() class Novel(Book): class Meta: app_label = "migrations" apps = Apps() # First, test rendering individually apps = Apps(["migrations"]) # We shouldn't be able to render yet ms = ModelState.from_model(Novel) with self.assertRaises(InvalidBasesError): ms.render(apps) # Once the parent model is in the app registry, it should be fine ModelState.from_model(Book).render(apps) ModelState.from_model(Novel).render(apps) def test_render_model_with_multiple_inheritance(self): class Foo(models.Model): class Meta: app_label = "migrations" apps = Apps() class Bar(models.Model): class Meta: app_label = "migrations" apps = Apps() class FooBar(Foo, Bar): class Meta: app_label = "migrations" apps = Apps() class AbstractSubFooBar(FooBar): class Meta: abstract = True apps = Apps() class SubFooBar(AbstractSubFooBar): class Meta: app_label = "migrations" apps = Apps() apps = Apps(["migrations"]) # We shouldn't be able to render yet ms = ModelState.from_model(FooBar) with self.assertRaises(InvalidBasesError): ms.render(apps) # Once the parent models are in the app registry, it should be fine ModelState.from_model(Foo).render(apps) self.assertSequenceEqual(ModelState.from_model(Foo).bases, [models.Model]) ModelState.from_model(Bar).render(apps) self.assertSequenceEqual(ModelState.from_model(Bar).bases, [models.Model]) ModelState.from_model(FooBar).render(apps) self.assertSequenceEqual(ModelState.from_model(FooBar).bases, ['migrations.foo', 'migrations.bar']) ModelState.from_model(SubFooBar).render(apps) self.assertSequenceEqual(ModelState.from_model(SubFooBar).bases, ['migrations.foobar']) def test_render_project_dependencies(self): """ The ProjectState render method correctly renders models to account for inter-model base dependencies. """ new_apps = Apps() class A(models.Model): class Meta: app_label = "migrations" apps = new_apps class B(A): class Meta: app_label = "migrations" apps = new_apps class C(B): class Meta: app_label = "migrations" apps = new_apps class D(A): class Meta: app_label = "migrations" apps = new_apps class E(B): class Meta: app_label = "migrations" apps = new_apps proxy = True class F(D): class Meta: app_label = "migrations" apps = new_apps proxy = True # Make a ProjectState and render it project_state = ProjectState() project_state.add_model(ModelState.from_model(A)) project_state.add_model(ModelState.from_model(B)) project_state.add_model(ModelState.from_model(C)) project_state.add_model(ModelState.from_model(D)) project_state.add_model(ModelState.from_model(E)) project_state.add_model(ModelState.from_model(F)) final_apps = project_state.apps self.assertEqual(len(final_apps.get_models()), 6) # Now make an invalid ProjectState and make sure it fails project_state = ProjectState() project_state.add_model(ModelState.from_model(A)) project_state.add_model(ModelState.from_model(B)) project_state.add_model(ModelState.from_model(C)) project_state.add_model(ModelState.from_model(F)) with self.assertRaises(InvalidBasesError): project_state.apps def test_render_unique_app_labels(self): """ The ProjectState render method doesn't raise an ImproperlyConfigured exception about unique labels if two dotted app names have the same last part. """ class A(models.Model): class Meta: app_label = "django.contrib.auth" class B(models.Model): class Meta: app_label = "vendor.auth" # Make a ProjectState and render it project_state = ProjectState() project_state.add_model(ModelState.from_model(A)) project_state.add_model(ModelState.from_model(B)) self.assertEqual(len(project_state.apps.get_models()), 2) def test_reload_related_model_on_non_relational_fields(self): """ The model is reloaded even on changes that are not involved in relations. Other models pointing to or from it are also reloaded. """ project_state = ProjectState() project_state.apps # Render project state. project_state.add_model(ModelState('migrations', 'A', [])) project_state.add_model(ModelState('migrations', 'B', [ ('a', models.ForeignKey('A', models.CASCADE)), ])) project_state.add_model(ModelState('migrations', 'C', [ ('b', models.ForeignKey('B', models.CASCADE)), ('name', models.TextField()), ])) project_state.add_model(ModelState('migrations', 'D', [ ('a', models.ForeignKey('A', models.CASCADE)), ])) operation = AlterField( model_name='C', name='name', field=models.TextField(blank=True), ) operation.state_forwards('migrations', project_state) project_state.reload_model('migrations', 'a', delay=True) A = project_state.apps.get_model('migrations.A') B = project_state.apps.get_model('migrations.B') D = project_state.apps.get_model('migrations.D') self.assertIs(B._meta.get_field('a').related_model, A) self.assertIs(D._meta.get_field('a').related_model, A) def test_reload_model_relationship_consistency(self): project_state = ProjectState() project_state.add_model(ModelState('migrations', 'A', [])) project_state.add_model(ModelState('migrations', 'B', [ ('a', models.ForeignKey('A', models.CASCADE)), ])) project_state.add_model(ModelState('migrations', 'C', [ ('b', models.ForeignKey('B', models.CASCADE)), ])) A = project_state.apps.get_model('migrations.A') B = project_state.apps.get_model('migrations.B') C = project_state.apps.get_model('migrations.C') self.assertEqual([r.related_model for r in A._meta.related_objects], [B]) self.assertEqual([r.related_model for r in B._meta.related_objects], [C]) self.assertEqual([r.related_model for r in C._meta.related_objects], []) project_state.reload_model('migrations', 'a', delay=True) A = project_state.apps.get_model('migrations.A') B = project_state.apps.get_model('migrations.B') C = project_state.apps.get_model('migrations.C') self.assertEqual([r.related_model for r in A._meta.related_objects], [B]) self.assertEqual([r.related_model for r in B._meta.related_objects], [C]) self.assertEqual([r.related_model for r in C._meta.related_objects], []) def test_add_relations(self): """ #24573 - Adding relations to existing models should reload the referenced models too. """ new_apps = Apps() class A(models.Model): class Meta: app_label = 'something' apps = new_apps class B(A): class Meta: app_label = 'something' apps = new_apps class C(models.Model): class Meta: app_label = 'something' apps = new_apps project_state = ProjectState() project_state.add_model(ModelState.from_model(A)) project_state.add_model(ModelState.from_model(B)) project_state.add_model(ModelState.from_model(C)) project_state.apps # We need to work with rendered models old_state = project_state.clone() model_a_old = old_state.apps.get_model('something', 'A') model_b_old = old_state.apps.get_model('something', 'B') model_c_old = old_state.apps.get_model('something', 'C') # The relations between the old models are correct self.assertIs(model_a_old._meta.get_field('b').related_model, model_b_old) self.assertIs(model_b_old._meta.get_field('a_ptr').related_model, model_a_old) operation = AddField('c', 'to_a', models.OneToOneField( 'something.A', models.CASCADE, related_name='from_c', )) operation.state_forwards('something', project_state) model_a_new = project_state.apps.get_model('something', 'A') model_b_new = project_state.apps.get_model('something', 'B') model_c_new = project_state.apps.get_model('something', 'C') # All models have changed self.assertIsNot(model_a_old, model_a_new) self.assertIsNot(model_b_old, model_b_new) self.assertIsNot(model_c_old, model_c_new) # The relations between the old models still hold self.assertIs(model_a_old._meta.get_field('b').related_model, model_b_old) self.assertIs(model_b_old._meta.get_field('a_ptr').related_model, model_a_old) # The relations between the new models correct self.assertIs(model_a_new._meta.get_field('b').related_model, model_b_new) self.assertIs(model_b_new._meta.get_field('a_ptr').related_model, model_a_new) self.assertIs(model_a_new._meta.get_field('from_c').related_model, model_c_new) self.assertIs(model_c_new._meta.get_field('to_a').related_model, model_a_new) def test_remove_relations(self): """ #24225 - Relations between models are updated while remaining the relations and references for models of an old state. """ new_apps = Apps() class A(models.Model): class Meta: app_label = "something" apps = new_apps class B(models.Model): to_a = models.ForeignKey(A, models.CASCADE) class Meta: app_label = "something" apps = new_apps def get_model_a(state): return [mod for mod in state.apps.get_models() if mod._meta.model_name == 'a'][0] project_state = ProjectState() project_state.add_model(ModelState.from_model(A)) project_state.add_model(ModelState.from_model(B)) self.assertEqual(len(get_model_a(project_state)._meta.related_objects), 1) old_state = project_state.clone() operation = RemoveField("b", "to_a") operation.state_forwards("something", project_state) # Model from old_state still has the relation model_a_old = get_model_a(old_state) model_a_new = get_model_a(project_state) self.assertIsNot(model_a_old, model_a_new) self.assertEqual(len(model_a_old._meta.related_objects), 1) self.assertEqual(len(model_a_new._meta.related_objects), 0) # Same test for deleted model project_state = ProjectState() project_state.add_model(ModelState.from_model(A)) project_state.add_model(ModelState.from_model(B)) old_state = project_state.clone() operation = DeleteModel("b") operation.state_forwards("something", project_state) model_a_old = get_model_a(old_state) model_a_new = get_model_a(project_state) self.assertIsNot(model_a_old, model_a_new) self.assertEqual(len(model_a_old._meta.related_objects), 1) self.assertEqual(len(model_a_new._meta.related_objects), 0) def test_self_relation(self): """ #24513 - Modifying an object pointing to itself would cause it to be rendered twice and thus breaking its related M2M through objects. """ class A(models.Model): to_a = models.ManyToManyField('something.A', symmetrical=False) class Meta: app_label = "something" def get_model_a(state): return [mod for mod in state.apps.get_models() if mod._meta.model_name == 'a'][0] project_state = ProjectState() project_state.add_model(ModelState.from_model(A)) self.assertEqual(len(get_model_a(project_state)._meta.related_objects), 1) old_state = project_state.clone() operation = AlterField( model_name="a", name="to_a", field=models.ManyToManyField("something.A", symmetrical=False, blank=True) ) # At this point the model would be rendered twice causing its related # M2M through objects to point to an old copy and thus breaking their # attribute lookup. operation.state_forwards("something", project_state) model_a_old = get_model_a(old_state) model_a_new = get_model_a(project_state) self.assertIsNot(model_a_old, model_a_new) # The old model's _meta is still consistent field_to_a_old = model_a_old._meta.get_field("to_a") self.assertEqual(field_to_a_old.m2m_field_name(), "from_a") self.assertEqual(field_to_a_old.m2m_reverse_field_name(), "to_a") self.assertIs(field_to_a_old.related_model, model_a_old) self.assertIs(field_to_a_old.remote_field.through._meta.get_field('to_a').related_model, model_a_old) self.assertIs(field_to_a_old.remote_field.through._meta.get_field('from_a').related_model, model_a_old) # The new model's _meta is still consistent field_to_a_new = model_a_new._meta.get_field("to_a") self.assertEqual(field_to_a_new.m2m_field_name(), "from_a") self.assertEqual(field_to_a_new.m2m_reverse_field_name(), "to_a") self.assertIs(field_to_a_new.related_model, model_a_new) self.assertIs(field_to_a_new.remote_field.through._meta.get_field('to_a').related_model, model_a_new) self.assertIs(field_to_a_new.remote_field.through._meta.get_field('from_a').related_model, model_a_new) def test_equality(self): """ == and != are implemented correctly. """ # Test two things that should be equal project_state = ProjectState() project_state.add_model(ModelState( "migrations", "Tag", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=100)), ("hidden", models.BooleanField()), ], {}, None, )) project_state.apps # Fill the apps cached property other_state = project_state.clone() self.assertEqual(project_state, project_state) self.assertEqual(project_state, other_state) self.assertIs(project_state != project_state, False) self.assertIs(project_state != other_state, False) self.assertNotEqual(project_state.apps, other_state.apps) # Make a very small change (max_len 99) and see if that affects it project_state = ProjectState() project_state.add_model(ModelState( "migrations", "Tag", [ ("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=99)), ("hidden", models.BooleanField()), ], {}, None, )) self.assertNotEqual(project_state, other_state) self.assertIs(project_state == other_state, False) def test_dangling_references_throw_error(self): new_apps = Apps() class Author(models.Model): name = models.TextField() class Meta: app_label = "migrations" apps = new_apps class Publisher(models.Model): name = models.TextField() class Meta: app_label = "migrations" apps = new_apps class Book(models.Model): author = models.ForeignKey(Author, models.CASCADE) publisher = models.ForeignKey(Publisher, models.CASCADE) class Meta: app_label = "migrations" apps = new_apps class Magazine(models.Model): authors = models.ManyToManyField(Author) class Meta: app_label = "migrations" apps = new_apps # Make a valid ProjectState and render it project_state = ProjectState() project_state.add_model(ModelState.from_model(Author)) project_state.add_model(ModelState.from_model(Publisher)) project_state.add_model(ModelState.from_model(Book)) project_state.add_model(ModelState.from_model(Magazine)) self.assertEqual(len(project_state.apps.get_models()), 4) # now make an invalid one with a ForeignKey project_state = ProjectState() project_state.add_model(ModelState.from_model(Book)) msg = ( "The field migrations.Book.author was declared with a lazy reference " "to 'migrations.author', but app 'migrations' doesn't provide model 'author'.\n" "The field migrations.Book.publisher was declared with a lazy reference " "to 'migrations.publisher', but app 'migrations' doesn't provide model 'publisher'." ) with self.assertRaisesMessage(ValueError, msg): project_state.apps # And another with ManyToManyField. project_state = ProjectState() project_state.add_model(ModelState.from_model(Magazine)) msg = ( "The field migrations.Magazine.authors was declared with a lazy reference " "to 'migrations.author\', but app 'migrations' doesn't provide model 'author'.\n" "The field migrations.Magazine_authors.author was declared with a lazy reference " "to \'migrations.author\', but app 'migrations' doesn't provide model 'author'." ) with self.assertRaisesMessage(ValueError, msg): project_state.apps # And now with multiple models and multiple fields. project_state.add_model(ModelState.from_model(Book)) msg = ( "The field migrations.Book.author was declared with a lazy reference " "to 'migrations.author', but app 'migrations' doesn't provide model 'author'.\n" "The field migrations.Book.publisher was declared with a lazy reference " "to 'migrations.publisher', but app 'migrations' doesn't provide model 'publisher'.\n" "The field migrations.Magazine.authors was declared with a lazy reference " "to 'migrations.author', but app 'migrations' doesn't provide model 'author'.\n" "The field migrations.Magazine_authors.author was declared with a lazy reference " "to 'migrations.author', but app 'migrations' doesn't provide model 'author'." ) with self.assertRaisesMessage(ValueError, msg): project_state.apps def test_reference_mixed_case_app_label(self): new_apps = Apps() class Author(models.Model): class Meta: app_label = 'MiXedCase_migrations' apps = new_apps class Book(models.Model): author = models.ForeignKey(Author, models.CASCADE) class Meta: app_label = 'MiXedCase_migrations' apps = new_apps class Magazine(models.Model): authors = models.ManyToManyField(Author) class Meta: app_label = 'MiXedCase_migrations' apps = new_apps project_state = ProjectState() project_state.add_model(ModelState.from_model(Author)) project_state.add_model(ModelState.from_model(Book)) project_state.add_model(ModelState.from_model(Magazine)) self.assertEqual(len(project_state.apps.get_models()), 3) def test_real_apps(self): """ Including real apps can resolve dangling FK errors. This test relies on the fact that contenttypes is always loaded. """ new_apps = Apps() class TestModel(models.Model): ct = models.ForeignKey("contenttypes.ContentType", models.CASCADE) class Meta: app_label = "migrations" apps = new_apps # If we just stick it into an empty state it should fail project_state = ProjectState() project_state.add_model(ModelState.from_model(TestModel)) with self.assertRaises(ValueError): project_state.apps # If we include the real app it should succeed project_state = ProjectState(real_apps=["contenttypes"]) project_state.add_model(ModelState.from_model(TestModel)) rendered_state = project_state.apps self.assertEqual( len([x for x in rendered_state.get_models() if x._meta.app_label == "migrations"]), 1, ) def test_ignore_order_wrt(self): """ Makes sure ProjectState doesn't include OrderWrt fields when making from existing models. """ new_apps = Apps() class Author(models.Model): name = models.TextField() class Meta: app_label = "migrations" apps = new_apps class Book(models.Model): author = models.ForeignKey(Author, models.CASCADE) class Meta: app_label = "migrations" apps = new_apps order_with_respect_to = "author" # Make a valid ProjectState and render it project_state = ProjectState() project_state.add_model(ModelState.from_model(Author)) project_state.add_model(ModelState.from_model(Book)) self.assertEqual( list(project_state.models['migrations', 'book'].fields), ["id", "author"], ) def test_manager_refer_correct_model_version(self): """ #24147 - Managers refer to the correct version of a historical model """ project_state = ProjectState() project_state.add_model(ModelState( app_label="migrations", name="Tag", fields=[ ("id", models.AutoField(primary_key=True)), ("hidden", models.BooleanField()), ], managers=[ ('food_mgr', FoodManager('a', 'b')), ('food_qs', FoodQuerySet.as_manager()), ] )) old_model = project_state.apps.get_model('migrations', 'tag') new_state = project_state.clone() operation = RemoveField("tag", "hidden") operation.state_forwards("migrations", new_state) new_model = new_state.apps.get_model('migrations', 'tag') self.assertIsNot(old_model, new_model) self.assertIs(old_model, old_model.food_mgr.model) self.assertIs(old_model, old_model.food_qs.model) self.assertIs(new_model, new_model.food_mgr.model) self.assertIs(new_model, new_model.food_qs.model) self.assertIsNot(old_model.food_mgr, new_model.food_mgr) self.assertIsNot(old_model.food_qs, new_model.food_qs) self.assertIsNot(old_model.food_mgr.model, new_model.food_mgr.model) self.assertIsNot(old_model.food_qs.model, new_model.food_qs.model) def test_choices_iterator(self): """ #24483 - ProjectState.from_apps should not destructively consume Field.choices iterators. """ new_apps = Apps(["migrations"]) choices = [('a', 'A'), ('b', 'B')] class Author(models.Model): name = models.CharField(max_length=255) choice = models.CharField(max_length=255, choices=iter(choices)) class Meta: app_label = "migrations" apps = new_apps ProjectState.from_apps(new_apps) choices_field = Author._meta.get_field('choice') self.assertEqual(list(choices_field.choices), choices) class ModelStateTests(SimpleTestCase): def test_custom_model_base(self): state = ModelState.from_model(ModelWithCustomBase) self.assertEqual(state.bases, (models.Model,)) def test_bound_field_sanity_check(self): field = models.CharField(max_length=1) field.model = models.Model with self.assertRaisesMessage(ValueError, 'ModelState.fields cannot be bound to a model - "field" is.'): ModelState('app', 'Model', [('field', field)]) def test_sanity_check_to(self): field = models.ForeignKey(UnicodeModel, models.CASCADE) with self.assertRaisesMessage( ValueError, 'ModelState.fields cannot refer to a model class - "field.to" does. ' 'Use a string reference instead.' ): ModelState('app', 'Model', [('field', field)]) def test_sanity_check_through(self): field = models.ManyToManyField('UnicodeModel') field.remote_field.through = UnicodeModel with self.assertRaisesMessage( ValueError, 'ModelState.fields cannot refer to a model class - "field.through" does. ' 'Use a string reference instead.' ): ModelState('app', 'Model', [('field', field)]) def test_sanity_index_name(self): field = models.IntegerField() options = {'indexes': [models.Index(fields=['field'])]} msg = "Indexes passed to ModelState require a name attribute. <Index: fields='field'> doesn't have one." with self.assertRaisesMessage(ValueError, msg): ModelState('app', 'Model', [('field', field)], options=options) def test_fields_immutability(self): """ Rendering a model state doesn't alter its internal fields. """ apps = Apps() field = models.CharField(max_length=1) state = ModelState('app', 'Model', [('name', field)]) Model = state.render(apps) self.assertNotEqual(Model._meta.get_field('name'), field) def test_repr(self): field = models.CharField(max_length=1) state = ModelState('app', 'Model', [('name', field)], bases=['app.A', 'app.B', 'app.C']) self.assertEqual(repr(state), "<ModelState: 'app.Model'>") project_state = ProjectState() project_state.add_model(state) with self.assertRaisesMessage(InvalidBasesError, "Cannot resolve bases for [<ModelState: 'app.Model'>]"): project_state.apps def test_fields_ordering_equality(self): state = ModelState( 'migrations', 'Tag', [ ('id', models.AutoField(primary_key=True)), ('name', models.CharField(max_length=100)), ('hidden', models.BooleanField()), ], ) reordered_state = ModelState( 'migrations', 'Tag', [ ('id', models.AutoField(primary_key=True)), # Purposedly re-ordered. ('hidden', models.BooleanField()), ('name', models.CharField(max_length=100)), ], ) self.assertEqual(state, reordered_state) @override_settings(TEST_SWAPPABLE_MODEL='migrations.SomeFakeModel') def test_create_swappable(self): """ Tests making a ProjectState from an Apps with a swappable model """ new_apps = Apps(['migrations']) class Author(models.Model): name = models.CharField(max_length=255) bio = models.TextField() age = models.IntegerField(blank=True, null=True) class Meta: app_label = 'migrations' apps = new_apps swappable = 'TEST_SWAPPABLE_MODEL' author_state = ModelState.from_model(Author) self.assertEqual(author_state.app_label, 'migrations') self.assertEqual(author_state.name, 'Author') self.assertEqual(list(author_state.fields), ['id', 'name', 'bio', 'age']) self.assertEqual(author_state.fields['name'].max_length, 255) self.assertIs(author_state.fields['bio'].null, False) self.assertIs(author_state.fields['age'].null, True) self.assertEqual(author_state.options, {'swappable': 'TEST_SWAPPABLE_MODEL', 'indexes': [], "constraints": []}) self.assertEqual(author_state.bases, (models.Model,)) self.assertEqual(author_state.managers, []) @override_settings(TEST_SWAPPABLE_MODEL='migrations.SomeFakeModel') def test_create_swappable_from_abstract(self): """ A swappable model inheriting from a hierarchy: concrete -> abstract -> concrete. """ new_apps = Apps(['migrations']) class SearchableLocation(models.Model): keywords = models.CharField(max_length=256) class Meta: app_label = 'migrations' apps = new_apps class Station(SearchableLocation): name = models.CharField(max_length=128) class Meta: abstract = True class BusStation(Station): bus_routes = models.CharField(max_length=128) inbound = models.BooleanField(default=False) class Meta(Station.Meta): app_label = 'migrations' apps = new_apps swappable = 'TEST_SWAPPABLE_MODEL' station_state = ModelState.from_model(BusStation) self.assertEqual(station_state.app_label, 'migrations') self.assertEqual(station_state.name, 'BusStation') self.assertEqual( list(station_state.fields), ['searchablelocation_ptr', 'name', 'bus_routes', 'inbound'] ) self.assertEqual(station_state.fields['name'].max_length, 128) self.assertIs(station_state.fields['bus_routes'].null, False) self.assertEqual( station_state.options, {'abstract': False, 'swappable': 'TEST_SWAPPABLE_MODEL', 'indexes': [], 'constraints': []} ) self.assertEqual(station_state.bases, ('migrations.searchablelocation',)) self.assertEqual(station_state.managers, []) @override_settings(TEST_SWAPPABLE_MODEL='migrations.SomeFakeModel') def test_custom_manager_swappable(self): """ Tests making a ProjectState from unused models with custom managers """ new_apps = Apps(['migrations']) class Food(models.Model): food_mgr = FoodManager('a', 'b') food_qs = FoodQuerySet.as_manager() food_no_mgr = NoMigrationFoodManager('x', 'y') class Meta: app_label = "migrations" apps = new_apps swappable = 'TEST_SWAPPABLE_MODEL' food_state = ModelState.from_model(Food) # The default manager is used in migrations self.assertEqual([name for name, mgr in food_state.managers], ['food_mgr']) self.assertEqual(food_state.managers[0][1].args, ('a', 'b', 1, 2)) @isolate_apps('migrations', 'django.contrib.contenttypes') def test_order_with_respect_to_private_field(self): class PrivateFieldModel(models.Model): content_type = models.ForeignKey('contenttypes.ContentType', models.CASCADE) object_id = models.PositiveIntegerField() private = GenericForeignKey() class Meta: order_with_respect_to = 'private' state = ModelState.from_model(PrivateFieldModel) self.assertNotIn('order_with_respect_to', state.options) @isolate_apps('migrations') def test_abstract_model_children_inherit_indexes(self): class Abstract(models.Model): name = models.CharField(max_length=50) class Meta: app_label = 'migrations' abstract = True indexes = [models.Index(fields=['name'])] class Child1(Abstract): pass class Child2(Abstract): pass child1_state = ModelState.from_model(Child1) child2_state = ModelState.from_model(Child2) index_names = [index.name for index in child1_state.options['indexes']] self.assertEqual(index_names, ['migrations__name_b0afd7_idx']) index_names = [index.name for index in child2_state.options['indexes']] self.assertEqual(index_names, ['migrations__name_016466_idx']) # Modifying the state doesn't modify the index on the model. child1_state.options['indexes'][0].name = 'bar' self.assertEqual(Child1._meta.indexes[0].name, 'migrations__name_b0afd7_idx') @isolate_apps('migrations') def test_explicit_index_name(self): class TestModel(models.Model): name = models.CharField(max_length=50) class Meta: app_label = 'migrations' indexes = [models.Index(fields=['name'], name='foo_idx')] model_state = ModelState.from_model(TestModel) index_names = [index.name for index in model_state.options['indexes']] self.assertEqual(index_names, ['foo_idx']) @isolate_apps('migrations') def test_from_model_constraints(self): class ModelWithConstraints(models.Model): size = models.IntegerField() class Meta: constraints = [models.CheckConstraint(check=models.Q(size__gt=1), name='size_gt_1')] state = ModelState.from_model(ModelWithConstraints) model_constraints = ModelWithConstraints._meta.constraints state_constraints = state.options['constraints'] self.assertEqual(model_constraints, state_constraints) self.assertIsNot(model_constraints, state_constraints) self.assertIsNot(model_constraints[0], state_constraints[0]) class RelatedModelsTests(SimpleTestCase): def setUp(self): self.apps = Apps(['migrations.related_models_app']) def create_model(self, name, foreign_keys=[], bases=(), abstract=False, proxy=False): test_name = 'related_models_app' assert not (abstract and proxy) meta_contents = { 'abstract': abstract, 'app_label': test_name, 'apps': self.apps, 'proxy': proxy, } meta = type("Meta", (), meta_contents) if not bases: bases = (models.Model,) body = { 'Meta': meta, '__module__': "__fake__", } fname_base = fname = '%s_%%d' % name.lower() for i, fk in enumerate(foreign_keys, 1): fname = fname_base % i body[fname] = fk return type(name, bases, body) def assertRelated(self, model, needle): self.assertEqual( get_related_models_recursive(model), {(n._meta.app_label, n._meta.model_name) for n in needle}, ) def test_unrelated(self): A = self.create_model("A") B = self.create_model("B") self.assertRelated(A, []) self.assertRelated(B, []) def test_direct_fk(self): A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE)]) B = self.create_model("B") self.assertRelated(A, [B]) self.assertRelated(B, [A]) def test_direct_hidden_fk(self): A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE, related_name='+')]) B = self.create_model("B") self.assertRelated(A, [B]) self.assertRelated(B, [A]) def test_fk_through_proxy(self): A = self.create_model("A") B = self.create_model("B", bases=(A,), proxy=True) C = self.create_model("C", bases=(B,), proxy=True) D = self.create_model("D", foreign_keys=[models.ForeignKey('C', models.CASCADE)]) self.assertRelated(A, [B, C, D]) self.assertRelated(B, [A, C, D]) self.assertRelated(C, [A, B, D]) self.assertRelated(D, [A, B, C]) def test_nested_fk(self): A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE)]) B = self.create_model("B", foreign_keys=[models.ForeignKey('C', models.CASCADE)]) C = self.create_model("C") self.assertRelated(A, [B, C]) self.assertRelated(B, [A, C]) self.assertRelated(C, [A, B]) def test_two_sided(self): A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE)]) B = self.create_model("B", foreign_keys=[models.ForeignKey('A', models.CASCADE)]) self.assertRelated(A, [B]) self.assertRelated(B, [A]) def test_circle(self): A = self.create_model("A", foreign_keys=[models.ForeignKey('B', models.CASCADE)]) B = self.create_model("B", foreign_keys=[models.ForeignKey('C', models.CASCADE)]) C = self.create_model("C", foreign_keys=[models.ForeignKey('A', models.CASCADE)]) self.assertRelated(A, [B, C]) self.assertRelated(B, [A, C]) self.assertRelated(C, [A, B]) def test_base(self): A = self.create_model("A") B = self.create_model("B", bases=(A,)) self.assertRelated(A, [B]) self.assertRelated(B, [A]) def test_nested_base(self): A = self.create_model("A") B = self.create_model("B", bases=(A,)) C = self.create_model("C", bases=(B,)) self.assertRelated(A, [B, C]) self.assertRelated(B, [A, C]) self.assertRelated(C, [A, B]) def test_multiple_bases(self): A = self.create_model("A") B = self.create_model("B") C = self.create_model("C", bases=(A, B,)) self.assertRelated(A, [B, C]) self.assertRelated(B, [A, C]) self.assertRelated(C, [A, B]) def test_multiple_nested_bases(self): A = self.create_model("A") B = self.create_model("B") C = self.create_model("C", bases=(A, B,)) D = self.create_model("D") E = self.create_model("E", bases=(D,)) F = self.create_model("F", bases=(C, E,)) Y = self.create_model("Y") Z = self.create_model("Z", bases=(Y,)) self.assertRelated(A, [B, C, D, E, F]) self.assertRelated(B, [A, C, D, E, F]) self.assertRelated(C, [A, B, D, E, F]) self.assertRelated(D, [A, B, C, E, F]) self.assertRelated(E, [A, B, C, D, F]) self.assertRelated(F, [A, B, C, D, E]) self.assertRelated(Y, [Z]) self.assertRelated(Z, [Y]) def test_base_to_base_fk(self): A = self.create_model("A", foreign_keys=[models.ForeignKey('Y', models.CASCADE)]) B = self.create_model("B", bases=(A,)) Y = self.create_model("Y") Z = self.create_model("Z", bases=(Y,)) self.assertRelated(A, [B, Y, Z]) self.assertRelated(B, [A, Y, Z]) self.assertRelated(Y, [A, B, Z]) self.assertRelated(Z, [A, B, Y]) def test_base_to_subclass_fk(self): A = self.create_model("A", foreign_keys=[models.ForeignKey('Z', models.CASCADE)]) B = self.create_model("B", bases=(A,)) Y = self.create_model("Y") Z = self.create_model("Z", bases=(Y,)) self.assertRelated(A, [B, Y, Z]) self.assertRelated(B, [A, Y, Z]) self.assertRelated(Y, [A, B, Z]) self.assertRelated(Z, [A, B, Y]) def test_direct_m2m(self): A = self.create_model("A", foreign_keys=[models.ManyToManyField('B')]) B = self.create_model("B") self.assertRelated(A, [A.a_1.rel.through, B]) self.assertRelated(B, [A, A.a_1.rel.through]) def test_direct_m2m_self(self): A = self.create_model("A", foreign_keys=[models.ManyToManyField('A')]) self.assertRelated(A, [A.a_1.rel.through]) def test_intermediate_m2m_self(self): A = self.create_model("A", foreign_keys=[models.ManyToManyField('A', through='T')]) T = self.create_model("T", foreign_keys=[ models.ForeignKey('A', models.CASCADE), models.ForeignKey('A', models.CASCADE), ]) self.assertRelated(A, [T]) self.assertRelated(T, [A]) def test_intermediate_m2m(self): A = self.create_model("A", foreign_keys=[models.ManyToManyField('B', through='T')]) B = self.create_model("B") T = self.create_model("T", foreign_keys=[ models.ForeignKey('A', models.CASCADE), models.ForeignKey('B', models.CASCADE), ]) self.assertRelated(A, [B, T]) self.assertRelated(B, [A, T]) self.assertRelated(T, [A, B]) def test_intermediate_m2m_extern_fk(self): A = self.create_model("A", foreign_keys=[models.ManyToManyField('B', through='T')]) B = self.create_model("B") Z = self.create_model("Z") T = self.create_model("T", foreign_keys=[ models.ForeignKey('A', models.CASCADE), models.ForeignKey('B', models.CASCADE), models.ForeignKey('Z', models.CASCADE), ]) self.assertRelated(A, [B, T, Z]) self.assertRelated(B, [A, T, Z]) self.assertRelated(T, [A, B, Z]) self.assertRelated(Z, [A, B, T]) def test_intermediate_m2m_base(self): A = self.create_model("A", foreign_keys=[models.ManyToManyField('B', through='T')]) B = self.create_model("B") S = self.create_model("S") T = self.create_model("T", foreign_keys=[ models.ForeignKey('A', models.CASCADE), models.ForeignKey('B', models.CASCADE), ], bases=(S,)) self.assertRelated(A, [B, S, T]) self.assertRelated(B, [A, S, T]) self.assertRelated(S, [A, B, T]) self.assertRelated(T, [A, B, S]) def test_generic_fk(self): A = self.create_model("A", foreign_keys=[ models.ForeignKey('B', models.CASCADE), GenericForeignKey(), ]) B = self.create_model("B", foreign_keys=[ models.ForeignKey('C', models.CASCADE), ]) self.assertRelated(A, [B]) self.assertRelated(B, [A]) def test_abstract_base(self): A = self.create_model("A", abstract=True) B = self.create_model("B", bases=(A,)) self.assertRelated(A, [B]) self.assertRelated(B, []) def test_nested_abstract_base(self): A = self.create_model("A", abstract=True) B = self.create_model("B", bases=(A,), abstract=True) C = self.create_model("C", bases=(B,)) self.assertRelated(A, [B, C]) self.assertRelated(B, [C]) self.assertRelated(C, []) def test_proxy_base(self): A = self.create_model("A") B = self.create_model("B", bases=(A,), proxy=True) self.assertRelated(A, [B]) self.assertRelated(B, []) def test_nested_proxy_base(self): A = self.create_model("A") B = self.create_model("B", bases=(A,), proxy=True) C = self.create_model("C", bases=(B,), proxy=True) self.assertRelated(A, [B, C]) self.assertRelated(B, [C]) self.assertRelated(C, []) def test_multiple_mixed_bases(self): A = self.create_model("A", abstract=True) M = self.create_model("M") P = self.create_model("P") Q = self.create_model("Q", bases=(P,), proxy=True) Z = self.create_model("Z", bases=(A, M, Q)) # M has a pointer O2O field p_ptr to P self.assertRelated(A, [M, P, Q, Z]) self.assertRelated(M, [P, Q, Z]) self.assertRelated(P, [M, Q, Z]) self.assertRelated(Q, [M, P, Z]) self.assertRelated(Z, [M, P, Q])
16631be9fe1b263ddad873868e08f5fd9019ea34f7d01219ed71fb438e32e490
import os import stat import sys import tempfile import unittest from django.utils import archive class TestArchive(unittest.TestCase): def setUp(self): self.testdir = os.path.join(os.path.dirname(__file__), 'archives') self.old_cwd = os.getcwd() os.chdir(self.testdir) def tearDown(self): os.chdir(self.old_cwd) def test_extract_function(self): for entry in os.scandir(self.testdir): with self.subTest(entry.name), tempfile.TemporaryDirectory() as tmpdir: archive.extract(entry.path, tmpdir) self.assertTrue(os.path.isfile(os.path.join(tmpdir, '1'))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, '2'))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, 'foo', '1'))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, 'foo', '2'))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, 'foo', 'bar', '1'))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, 'foo', 'bar', '2'))) @unittest.skipIf(sys.platform == 'win32', 'Python on Windows has a limited os.chmod().') def test_extract_file_permissions(self): """archive.extract() preserves file permissions.""" mask = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO umask = os.umask(0) os.umask(umask) # Restore the original umask. for entry in os.scandir(self.testdir): if entry.name.startswith('leadpath_'): continue with self.subTest(entry.name), tempfile.TemporaryDirectory() as tmpdir: archive.extract(entry.path, tmpdir) # An executable file in the archive has executable permissions. filepath = os.path.join(tmpdir, 'executable') self.assertEqual(os.stat(filepath).st_mode & mask, 0o775) # A file is readable even if permission data is missing. filepath = os.path.join(tmpdir, 'no_permissions') self.assertEqual(os.stat(filepath).st_mode & mask, 0o666 & ~umask)
4ae5dc655cb504bafda42d0502b77fa5c6f3bad25c66f39103ad05c126471be9
from unittest import mock from django.core.exceptions import ValidationError from django.db import IntegrityError, connection, models from django.db.models.constraints import BaseConstraint from django.db.transaction import atomic from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from .models import ( ChildModel, Product, UniqueConstraintConditionProduct, UniqueConstraintDeferrable, UniqueConstraintInclude, UniqueConstraintProduct, ) def get_constraints(table): with connection.cursor() as cursor: return connection.introspection.get_constraints(cursor, table) class BaseConstraintTests(SimpleTestCase): def test_constraint_sql(self): c = BaseConstraint('name') msg = 'This method must be implemented by a subclass.' with self.assertRaisesMessage(NotImplementedError, msg): c.constraint_sql(None, None) def test_create_sql(self): c = BaseConstraint('name') msg = 'This method must be implemented by a subclass.' with self.assertRaisesMessage(NotImplementedError, msg): c.create_sql(None, None) def test_remove_sql(self): c = BaseConstraint('name') msg = 'This method must be implemented by a subclass.' with self.assertRaisesMessage(NotImplementedError, msg): c.remove_sql(None, None) class CheckConstraintTests(TestCase): def test_eq(self): check1 = models.Q(price__gt=models.F('discounted_price')) check2 = models.Q(price__lt=models.F('discounted_price')) self.assertEqual( models.CheckConstraint(check=check1, name='price'), models.CheckConstraint(check=check1, name='price'), ) self.assertEqual(models.CheckConstraint(check=check1, name='price'), mock.ANY) self.assertNotEqual( models.CheckConstraint(check=check1, name='price'), models.CheckConstraint(check=check1, name='price2'), ) self.assertNotEqual( models.CheckConstraint(check=check1, name='price'), models.CheckConstraint(check=check2, name='price'), ) self.assertNotEqual(models.CheckConstraint(check=check1, name='price'), 1) def test_repr(self): check = models.Q(price__gt=models.F('discounted_price')) name = 'price_gt_discounted_price' constraint = models.CheckConstraint(check=check, name=name) self.assertEqual( repr(constraint), "<CheckConstraint: check='{}' name='{}'>".format(check, name), ) def test_invalid_check_types(self): msg = ( 'CheckConstraint.check must be a Q instance or boolean expression.' ) with self.assertRaisesMessage(TypeError, msg): models.CheckConstraint(check=models.F('discounted_price'), name='check') def test_deconstruction(self): check = models.Q(price__gt=models.F('discounted_price')) name = 'price_gt_discounted_price' constraint = models.CheckConstraint(check=check, name=name) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.db.models.CheckConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, {'check': check, 'name': name}) @skipUnlessDBFeature('supports_table_check_constraints') def test_database_constraint(self): Product.objects.create(price=10, discounted_price=5) with self.assertRaises(IntegrityError): Product.objects.create(price=10, discounted_price=20) @skipUnlessDBFeature('supports_table_check_constraints') def test_database_constraint_expression(self): Product.objects.create(price=999, discounted_price=5) with self.assertRaises(IntegrityError): Product.objects.create(price=1000, discounted_price=5) @skipUnlessDBFeature('supports_table_check_constraints') def test_database_constraint_expressionwrapper(self): Product.objects.create(price=499, discounted_price=5) with self.assertRaises(IntegrityError): Product.objects.create(price=500, discounted_price=5) @skipUnlessDBFeature('supports_table_check_constraints', 'can_introspect_check_constraints') def test_name(self): constraints = get_constraints(Product._meta.db_table) for expected_name in ( 'price_gt_discounted_price', 'constraints_price_lt_1000_raw', 'constraints_price_neq_500_wrap', 'constraints_product_price_gt_0', ): with self.subTest(expected_name): self.assertIn(expected_name, constraints) @skipUnlessDBFeature('supports_table_check_constraints', 'can_introspect_check_constraints') def test_abstract_name(self): constraints = get_constraints(ChildModel._meta.db_table) self.assertIn('constraints_childmodel_adult', constraints) class UniqueConstraintTests(TestCase): @classmethod def setUpTestData(cls): cls.p1, cls.p2 = UniqueConstraintProduct.objects.bulk_create([ UniqueConstraintProduct(name='p1', color='red'), UniqueConstraintProduct(name='p2'), ]) def test_eq(self): self.assertEqual( models.UniqueConstraint(fields=['foo', 'bar'], name='unique'), models.UniqueConstraint(fields=['foo', 'bar'], name='unique'), ) self.assertEqual( models.UniqueConstraint(fields=['foo', 'bar'], name='unique'), mock.ANY, ) self.assertNotEqual( models.UniqueConstraint(fields=['foo', 'bar'], name='unique'), models.UniqueConstraint(fields=['foo', 'bar'], name='unique2'), ) self.assertNotEqual( models.UniqueConstraint(fields=['foo', 'bar'], name='unique'), models.UniqueConstraint(fields=['foo', 'baz'], name='unique'), ) self.assertNotEqual(models.UniqueConstraint(fields=['foo', 'bar'], name='unique'), 1) def test_eq_with_condition(self): self.assertEqual( models.UniqueConstraint( fields=['foo', 'bar'], name='unique', condition=models.Q(foo=models.F('bar')) ), models.UniqueConstraint( fields=['foo', 'bar'], name='unique', condition=models.Q(foo=models.F('bar'))), ) self.assertNotEqual( models.UniqueConstraint( fields=['foo', 'bar'], name='unique', condition=models.Q(foo=models.F('bar')) ), models.UniqueConstraint( fields=['foo', 'bar'], name='unique', condition=models.Q(foo=models.F('baz')) ), ) def test_eq_with_deferrable(self): constraint_1 = models.UniqueConstraint( fields=['foo', 'bar'], name='unique', deferrable=models.Deferrable.DEFERRED, ) constraint_2 = models.UniqueConstraint( fields=['foo', 'bar'], name='unique', deferrable=models.Deferrable.IMMEDIATE, ) self.assertEqual(constraint_1, constraint_1) self.assertNotEqual(constraint_1, constraint_2) def test_eq_with_include(self): constraint_1 = models.UniqueConstraint( fields=['foo', 'bar'], name='include', include=['baz_1'], ) constraint_2 = models.UniqueConstraint( fields=['foo', 'bar'], name='include', include=['baz_2'], ) self.assertEqual(constraint_1, constraint_1) self.assertNotEqual(constraint_1, constraint_2) def test_eq_with_opclasses(self): constraint_1 = models.UniqueConstraint( fields=['foo', 'bar'], name='opclasses', opclasses=['text_pattern_ops', 'varchar_pattern_ops'], ) constraint_2 = models.UniqueConstraint( fields=['foo', 'bar'], name='opclasses', opclasses=['varchar_pattern_ops', 'text_pattern_ops'], ) self.assertEqual(constraint_1, constraint_1) self.assertNotEqual(constraint_1, constraint_2) def test_repr(self): fields = ['foo', 'bar'] name = 'unique_fields' constraint = models.UniqueConstraint(fields=fields, name=name) self.assertEqual( repr(constraint), "<UniqueConstraint: fields=('foo', 'bar') name='unique_fields'>", ) def test_repr_with_condition(self): constraint = models.UniqueConstraint( fields=['foo', 'bar'], name='unique_fields', condition=models.Q(foo=models.F('bar')), ) self.assertEqual( repr(constraint), "<UniqueConstraint: fields=('foo', 'bar') name='unique_fields' " "condition=(AND: ('foo', F(bar)))>", ) def test_repr_with_deferrable(self): constraint = models.UniqueConstraint( fields=['foo', 'bar'], name='unique_fields', deferrable=models.Deferrable.IMMEDIATE, ) self.assertEqual( repr(constraint), "<UniqueConstraint: fields=('foo', 'bar') name='unique_fields' " "deferrable=Deferrable.IMMEDIATE>", ) def test_repr_with_include(self): constraint = models.UniqueConstraint( fields=['foo', 'bar'], name='include_fields', include=['baz_1', 'baz_2'], ) self.assertEqual( repr(constraint), "<UniqueConstraint: fields=('foo', 'bar') name='include_fields' " "include=('baz_1', 'baz_2')>", ) def test_repr_with_opclasses(self): constraint = models.UniqueConstraint( fields=['foo', 'bar'], name='opclasses_fields', opclasses=['text_pattern_ops', 'varchar_pattern_ops'], ) self.assertEqual( repr(constraint), "<UniqueConstraint: fields=('foo', 'bar') name='opclasses_fields' " "opclasses=['text_pattern_ops', 'varchar_pattern_ops']>", ) def test_deconstruction(self): fields = ['foo', 'bar'] name = 'unique_fields' constraint = models.UniqueConstraint(fields=fields, name=name) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.db.models.UniqueConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, {'fields': tuple(fields), 'name': name}) def test_deconstruction_with_condition(self): fields = ['foo', 'bar'] name = 'unique_fields' condition = models.Q(foo=models.F('bar')) constraint = models.UniqueConstraint(fields=fields, name=name, condition=condition) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.db.models.UniqueConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, {'fields': tuple(fields), 'name': name, 'condition': condition}) def test_deconstruction_with_deferrable(self): fields = ['foo'] name = 'unique_fields' constraint = models.UniqueConstraint( fields=fields, name=name, deferrable=models.Deferrable.DEFERRED, ) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.db.models.UniqueConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, { 'fields': tuple(fields), 'name': name, 'deferrable': models.Deferrable.DEFERRED, }) def test_deconstruction_with_include(self): fields = ['foo', 'bar'] name = 'unique_fields' include = ['baz_1', 'baz_2'] constraint = models.UniqueConstraint(fields=fields, name=name, include=include) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.db.models.UniqueConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, { 'fields': tuple(fields), 'name': name, 'include': tuple(include), }) def test_deconstruction_with_opclasses(self): fields = ['foo', 'bar'] name = 'unique_fields' opclasses = ['varchar_pattern_ops', 'text_pattern_ops'] constraint = models.UniqueConstraint(fields=fields, name=name, opclasses=opclasses) path, args, kwargs = constraint.deconstruct() self.assertEqual(path, 'django.db.models.UniqueConstraint') self.assertEqual(args, ()) self.assertEqual(kwargs, { 'fields': tuple(fields), 'name': name, 'opclasses': opclasses, }) def test_database_constraint(self): with self.assertRaises(IntegrityError): UniqueConstraintProduct.objects.create(name=self.p1.name, color=self.p1.color) @skipUnlessDBFeature('supports_partial_indexes') def test_database_constraint_with_condition(self): UniqueConstraintConditionProduct.objects.create(name='p1') UniqueConstraintConditionProduct.objects.create(name='p2') with self.assertRaises(IntegrityError): UniqueConstraintConditionProduct.objects.create(name='p1') def test_model_validation(self): msg = 'Unique constraint product with this Name and Color already exists.' with self.assertRaisesMessage(ValidationError, msg): UniqueConstraintProduct(name=self.p1.name, color=self.p1.color).validate_unique() @skipUnlessDBFeature('supports_partial_indexes') def test_model_validation_with_condition(self): """Partial unique constraints are ignored by Model.validate_unique().""" obj1 = UniqueConstraintConditionProduct.objects.create(name='p1', color='red') obj2 = UniqueConstraintConditionProduct.objects.create(name='p2') UniqueConstraintConditionProduct(name=obj1.name, color='blue').validate_unique() UniqueConstraintConditionProduct(name=obj2.name).validate_unique() def test_name(self): constraints = get_constraints(UniqueConstraintProduct._meta.db_table) expected_name = 'name_color_uniq' self.assertIn(expected_name, constraints) def test_condition_must_be_q(self): with self.assertRaisesMessage(ValueError, 'UniqueConstraint.condition must be a Q instance.'): models.UniqueConstraint(name='uniq', fields=['name'], condition='invalid') @skipUnlessDBFeature('supports_deferrable_unique_constraints') def test_initially_deferred_database_constraint(self): obj_1 = UniqueConstraintDeferrable.objects.create(name='p1', shelf='front') obj_2 = UniqueConstraintDeferrable.objects.create(name='p2', shelf='back') def swap(): obj_1.name, obj_2.name = obj_2.name, obj_1.name obj_1.save() obj_2.save() swap() # Behavior can be changed with SET CONSTRAINTS. with self.assertRaises(IntegrityError): with atomic(), connection.cursor() as cursor: constraint_name = connection.ops.quote_name('name_init_deferred_uniq') cursor.execute('SET CONSTRAINTS %s IMMEDIATE' % constraint_name) swap() @skipUnlessDBFeature('supports_deferrable_unique_constraints') def test_initially_immediate_database_constraint(self): obj_1 = UniqueConstraintDeferrable.objects.create(name='p1', shelf='front') obj_2 = UniqueConstraintDeferrable.objects.create(name='p2', shelf='back') obj_1.shelf, obj_2.shelf = obj_2.shelf, obj_1.shelf with self.assertRaises(IntegrityError), atomic(): obj_1.save() # Behavior can be changed with SET CONSTRAINTS. with connection.cursor() as cursor: constraint_name = connection.ops.quote_name('sheld_init_immediate_uniq') cursor.execute('SET CONSTRAINTS %s DEFERRED' % constraint_name) obj_1.save() obj_2.save() def test_deferrable_with_condition(self): message = 'UniqueConstraint with conditions cannot be deferred.' with self.assertRaisesMessage(ValueError, message): models.UniqueConstraint( fields=['name'], name='name_without_color_unique', condition=models.Q(color__isnull=True), deferrable=models.Deferrable.DEFERRED, ) def test_deferrable_with_include(self): message = 'UniqueConstraint with include fields cannot be deferred.' with self.assertRaisesMessage(ValueError, message): models.UniqueConstraint( fields=['name'], name='name_inc_color_color_unique', include=['color'], deferrable=models.Deferrable.DEFERRED, ) def test_deferrable_with_opclasses(self): message = 'UniqueConstraint with opclasses cannot be deferred.' with self.assertRaisesMessage(ValueError, message): models.UniqueConstraint( fields=['name'], name='name_text_pattern_ops_unique', opclasses=['text_pattern_ops'], deferrable=models.Deferrable.DEFERRED, ) def test_invalid_defer_argument(self): message = 'UniqueConstraint.deferrable must be a Deferrable instance.' with self.assertRaisesMessage(ValueError, message): models.UniqueConstraint( fields=['name'], name='name_invalid', deferrable='invalid', ) @skipUnlessDBFeature( 'supports_table_check_constraints', 'supports_covering_indexes', ) def test_include_database_constraint(self): UniqueConstraintInclude.objects.create(name='p1', color='red') with self.assertRaises(IntegrityError): UniqueConstraintInclude.objects.create(name='p1', color='blue') def test_invalid_include_argument(self): msg = 'UniqueConstraint.include must be a list or tuple.' with self.assertRaisesMessage(ValueError, msg): models.UniqueConstraint( name='uniq_include', fields=['field'], include='other', ) def test_invalid_opclasses_argument(self): msg = 'UniqueConstraint.opclasses must be a list or tuple.' with self.assertRaisesMessage(ValueError, msg): models.UniqueConstraint( name='uniq_opclasses', fields=['field'], opclasses='jsonb_path_ops', ) def test_opclasses_and_fields_same_length(self): msg = ( 'UniqueConstraint.fields and UniqueConstraint.opclasses must have ' 'the same number of elements.' ) with self.assertRaisesMessage(ValueError, msg): models.UniqueConstraint( name='uniq_opclasses', fields=['field'], opclasses=['foo', 'bar'], )
5cf8b27e27fa2f9147637677d890a60ca036102daf60e62ace842db59697c2b1
import itertools import json import os import re from urllib.parse import unquote from django.apps import apps from django.conf import settings from django.http import HttpResponse, HttpResponseRedirect, JsonResponse from django.template import Context, Engine from django.urls import translate_url from django.utils.formats import get_format from django.utils.http import url_has_allowed_host_and_scheme from django.utils.translation import ( LANGUAGE_SESSION_KEY, check_for_language, get_language, ) from django.utils.translation.trans_real import DjangoTranslation from django.views.generic import View LANGUAGE_QUERY_PARAMETER = 'language' def set_language(request): """ Redirect to a given URL while setting the chosen language in the session (if enabled) and in a cookie. The URL and the language code need to be specified in the request parameters. Since this view changes how the user will see the rest of the site, it must only be accessed as a POST request. If called as a GET request, it will redirect to the page in the request (the 'next' parameter) without changing any state. """ next_url = request.POST.get('next', request.GET.get('next')) if ( (next_url or request.accepts('text/html')) and not url_has_allowed_host_and_scheme( url=next_url, allowed_hosts={request.get_host()}, require_https=request.is_secure(), ) ): next_url = request.META.get('HTTP_REFERER') # HTTP_REFERER may be encoded. next_url = next_url and unquote(next_url) if not url_has_allowed_host_and_scheme( url=next_url, allowed_hosts={request.get_host()}, require_https=request.is_secure(), ): next_url = '/' response = HttpResponseRedirect(next_url) if next_url else HttpResponse(status=204) if request.method == 'POST': lang_code = request.POST.get(LANGUAGE_QUERY_PARAMETER) if lang_code and check_for_language(lang_code): if next_url: next_trans = translate_url(next_url, lang_code) if next_trans != next_url: response = HttpResponseRedirect(next_trans) if hasattr(request, 'session'): # Storing the language in the session is deprecated. # (RemovedInDjango40Warning) request.session[LANGUAGE_SESSION_KEY] = lang_code response.set_cookie( settings.LANGUAGE_COOKIE_NAME, lang_code, max_age=settings.LANGUAGE_COOKIE_AGE, path=settings.LANGUAGE_COOKIE_PATH, domain=settings.LANGUAGE_COOKIE_DOMAIN, secure=settings.LANGUAGE_COOKIE_SECURE, httponly=settings.LANGUAGE_COOKIE_HTTPONLY, samesite=settings.LANGUAGE_COOKIE_SAMESITE, ) return response def get_formats(): """Return all formats strings required for i18n to work.""" FORMAT_SETTINGS = ( 'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT', 'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT', 'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR', 'THOUSAND_SEPARATOR', 'NUMBER_GROUPING', 'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS' ) return {attr: get_format(attr) for attr in FORMAT_SETTINGS} js_catalog_template = r""" {% autoescape off %} (function(globals) { var django = globals.django || (globals.django = {}); {% if plural %} django.pluralidx = function(n) { var v={{ plural }}; if (typeof(v) == 'boolean') { return v ? 1 : 0; } else { return v; } }; {% else %} django.pluralidx = function(count) { return (count == 1) ? 0 : 1; }; {% endif %} /* gettext library */ django.catalog = django.catalog || {}; {% if catalog_str %} var newcatalog = {{ catalog_str }}; for (var key in newcatalog) { django.catalog[key] = newcatalog[key]; } {% endif %} if (!django.jsi18n_initialized) { django.gettext = function(msgid) { var value = django.catalog[msgid]; if (typeof(value) == 'undefined') { return msgid; } else { return (typeof(value) == 'string') ? value : value[0]; } }; django.ngettext = function(singular, plural, count) { var value = django.catalog[singular]; if (typeof(value) == 'undefined') { return (count == 1) ? singular : plural; } else { return value.constructor === Array ? value[django.pluralidx(count)] : value; } }; django.gettext_noop = function(msgid) { return msgid; }; django.pgettext = function(context, msgid) { var value = django.gettext(context + '\x04' + msgid); if (value.includes('\x04')) { value = msgid; } return value; }; django.npgettext = function(context, singular, plural, count) { var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count); if (value.includes('\x04')) { value = django.ngettext(singular, plural, count); } return value; }; django.interpolate = function(fmt, obj, named) { if (named) { return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])}); } else { return fmt.replace(/%s/g, function(match){return String(obj.shift())}); } }; /* formatting library */ django.formats = {{ formats_str }}; django.get_format = function(format_type) { var value = django.formats[format_type]; if (typeof(value) == 'undefined') { return format_type; } else { return value; } }; /* add to global namespace */ globals.pluralidx = django.pluralidx; globals.gettext = django.gettext; globals.ngettext = django.ngettext; globals.gettext_noop = django.gettext_noop; globals.pgettext = django.pgettext; globals.npgettext = django.npgettext; globals.interpolate = django.interpolate; globals.get_format = django.get_format; django.jsi18n_initialized = true; } }(this)); {% endautoescape %} """ class JavaScriptCatalog(View): """ Return the selected language catalog as a JavaScript library. Receive the list of packages to check for translations in the `packages` kwarg either from the extra dictionary passed to the path() function or as a plus-sign delimited string from the request. Default is 'django.conf'. You can override the gettext domain for this view, but usually you don't want to do that as JavaScript messages go to the djangojs domain. This might be needed if you deliver your JavaScript source from Django templates. """ domain = 'djangojs' packages = None def get(self, request, *args, **kwargs): locale = get_language() domain = kwargs.get('domain', self.domain) # If packages are not provided, default to all installed packages, as # DjangoTranslation without localedirs harvests them all. packages = kwargs.get('packages', '') packages = packages.split('+') if packages else self.packages paths = self.get_paths(packages) if packages else None self.translation = DjangoTranslation(locale, domain=domain, localedirs=paths) context = self.get_context_data(**kwargs) return self.render_to_response(context) def get_paths(self, packages): allowable_packages = {app_config.name: app_config for app_config in apps.get_app_configs()} app_configs = [allowable_packages[p] for p in packages if p in allowable_packages] if len(app_configs) < len(packages): excluded = [p for p in packages if p not in allowable_packages] raise ValueError( 'Invalid package(s) provided to JavaScriptCatalog: %s' % ','.join(excluded) ) # paths of requested packages return [os.path.join(app.path, 'locale') for app in app_configs] @property def _num_plurals(self): """ Return the number of plurals for this catalog language, or 2 if no plural string is available. """ match = re.search(r'nplurals=\s*(\d+)', self._plural_string or '') if match: return int(match[1]) return 2 @property def _plural_string(self): """ Return the plural string (including nplurals) for this catalog language, or None if no plural string is available. """ if '' in self.translation._catalog: for line in self.translation._catalog[''].split('\n'): if line.startswith('Plural-Forms:'): return line.split(':', 1)[1].strip() return None def get_plural(self): plural = self._plural_string if plural is not None: # This should be a compiled function of a typical plural-form: # Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : # n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2; plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=', 1)[1] return plural def get_catalog(self): pdict = {} num_plurals = self._num_plurals catalog = {} trans_cat = self.translation._catalog trans_fallback_cat = self.translation._fallback._catalog if self.translation._fallback else {} seen_keys = set() for key, value in itertools.chain(trans_cat.items(), trans_fallback_cat.items()): if key == '' or key in seen_keys: continue if isinstance(key, str): catalog[key] = value elif isinstance(key, tuple): msgid, cnt = key pdict.setdefault(msgid, {})[cnt] = value else: raise TypeError(key) seen_keys.add(key) for k, v in pdict.items(): catalog[k] = [v.get(i, '') for i in range(num_plurals)] return catalog def get_context_data(self, **kwargs): return { 'catalog': self.get_catalog(), 'formats': get_formats(), 'plural': self.get_plural(), } def render_to_response(self, context, **response_kwargs): def indent(s): return s.replace('\n', '\n ') template = Engine().from_string(js_catalog_template) context['catalog_str'] = indent( json.dumps(context['catalog'], sort_keys=True, indent=2) ) if context['catalog'] else None context['formats_str'] = indent(json.dumps(context['formats'], sort_keys=True, indent=2)) return HttpResponse(template.render(Context(context)), 'text/javascript; charset="utf-8"') class JSONCatalog(JavaScriptCatalog): """ Return the selected language catalog as a JSON object. Receive the same parameters as JavaScriptCatalog and return a response with a JSON object of the following format: { "catalog": { # Translations catalog }, "formats": { # Language formats for date, time, etc. }, "plural": '...' # Expression for plural forms, or null. } """ def render_to_response(self, context, **response_kwargs): return JsonResponse(context)
042113bec17110eab0944699c8a4da487664a24b8aa9a5021a5583ac4a729cd7
import copy import datetime import inspect from decimal import Decimal from django.core.exceptions import EmptyResultSet, FieldError from django.db import NotSupportedError, connection from django.db.models import fields from django.db.models.constants import LOOKUP_SEP from django.db.models.query_utils import Q from django.utils.deconstruct import deconstructible from django.utils.functional import cached_property from django.utils.hashable import make_hashable class SQLiteNumericMixin: """ Some expressions with output_field=DecimalField() must be cast to numeric to be properly filtered. """ def as_sqlite(self, compiler, connection, **extra_context): sql, params = self.as_sql(compiler, connection, **extra_context) try: if self.output_field.get_internal_type() == 'DecimalField': sql = 'CAST(%s AS NUMERIC)' % sql except FieldError: pass return sql, params class Combinable: """ Provide the ability to combine one or two objects with some connector. For example F('foo') + F('bar'). """ # Arithmetic connectors ADD = '+' SUB = '-' MUL = '*' DIV = '/' POW = '^' # The following is a quoted % operator - it is quoted because it can be # used in strings that also have parameter substitution. MOD = '%%' # Bitwise operators - note that these are generated by .bitand() # and .bitor(), the '&' and '|' are reserved for boolean operator # usage. BITAND = '&' BITOR = '|' BITLEFTSHIFT = '<<' BITRIGHTSHIFT = '>>' BITXOR = '#' def _combine(self, other, connector, reversed): if not hasattr(other, 'resolve_expression'): # everything must be resolvable to an expression output_field = ( fields.DurationField() if isinstance(other, datetime.timedelta) else None ) other = Value(other, output_field=output_field) if reversed: return CombinedExpression(other, connector, self) return CombinedExpression(self, connector, other) ############# # OPERATORS # ############# def __neg__(self): return self._combine(-1, self.MUL, False) def __add__(self, other): return self._combine(other, self.ADD, False) def __sub__(self, other): return self._combine(other, self.SUB, False) def __mul__(self, other): return self._combine(other, self.MUL, False) def __truediv__(self, other): return self._combine(other, self.DIV, False) def __mod__(self, other): return self._combine(other, self.MOD, False) def __pow__(self, other): return self._combine(other, self.POW, False) def __and__(self, other): if getattr(self, 'conditional', False) and getattr(other, 'conditional', False): return Q(self) & Q(other) raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) def bitand(self, other): return self._combine(other, self.BITAND, False) def bitleftshift(self, other): return self._combine(other, self.BITLEFTSHIFT, False) def bitrightshift(self, other): return self._combine(other, self.BITRIGHTSHIFT, False) def bitxor(self, other): return self._combine(other, self.BITXOR, False) def __or__(self, other): if getattr(self, 'conditional', False) and getattr(other, 'conditional', False): return Q(self) | Q(other) raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) def bitor(self, other): return self._combine(other, self.BITOR, False) def __radd__(self, other): return self._combine(other, self.ADD, True) def __rsub__(self, other): return self._combine(other, self.SUB, True) def __rmul__(self, other): return self._combine(other, self.MUL, True) def __rtruediv__(self, other): return self._combine(other, self.DIV, True) def __rmod__(self, other): return self._combine(other, self.MOD, True) def __rpow__(self, other): return self._combine(other, self.POW, True) def __rand__(self, other): raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) def __ror__(self, other): raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) @deconstructible class BaseExpression: """Base class for all query expressions.""" # aggregate specific fields is_summary = False _output_field_resolved_to_none = False # Can the expression be used in a WHERE clause? filterable = True # Can the expression can be used as a source expression in Window? window_compatible = False def __init__(self, output_field=None): if output_field is not None: self.output_field = output_field def __getstate__(self): state = self.__dict__.copy() state.pop('convert_value', None) return state def get_db_converters(self, connection): return ( [] if self.convert_value is self._convert_value_noop else [self.convert_value] ) + self.output_field.get_db_converters(connection) def get_source_expressions(self): return [] def set_source_expressions(self, exprs): assert not exprs def _parse_expressions(self, *expressions): return [ arg if hasattr(arg, 'resolve_expression') else ( F(arg) if isinstance(arg, str) else Value(arg) ) for arg in expressions ] def as_sql(self, compiler, connection): """ Responsible for returning a (sql, [params]) tuple to be included in the current query. Different backends can provide their own implementation, by providing an `as_{vendor}` method and patching the Expression: ``` def override_as_sql(self, compiler, connection): # custom logic return super().as_sql(compiler, connection) setattr(Expression, 'as_' + connection.vendor, override_as_sql) ``` Arguments: * compiler: the query compiler responsible for generating the query. Must have a compile method, returning a (sql, [params]) tuple. Calling compiler(value) will return a quoted `value`. * connection: the database connection used for the current query. Return: (sql, params) Where `sql` is a string containing ordered sql parameters to be replaced with the elements of the list `params`. """ raise NotImplementedError("Subclasses must implement as_sql()") @cached_property def contains_aggregate(self): return any(expr and expr.contains_aggregate for expr in self.get_source_expressions()) @cached_property def contains_over_clause(self): return any(expr and expr.contains_over_clause for expr in self.get_source_expressions()) @cached_property def contains_column_references(self): return any(expr and expr.contains_column_references for expr in self.get_source_expressions()) def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): """ Provide the chance to do any preprocessing or validation before being added to the query. Arguments: * query: the backend query implementation * allow_joins: boolean allowing or denying use of joins in this query * reuse: a set of reusable joins for multijoins * summarize: a terminal aggregate clause * for_save: whether this expression about to be used in a save or update Return: an Expression to be added to the query. """ c = self.copy() c.is_summary = summarize c.set_source_expressions([ expr.resolve_expression(query, allow_joins, reuse, summarize) if expr else None for expr in c.get_source_expressions() ]) return c @property def conditional(self): return isinstance(self.output_field, fields.BooleanField) @property def field(self): return self.output_field @cached_property def output_field(self): """Return the output type of this expressions.""" output_field = self._resolve_output_field() if output_field is None: self._output_field_resolved_to_none = True raise FieldError('Cannot resolve expression type, unknown output_field') return output_field @cached_property def _output_field_or_none(self): """ Return the output field of this expression, or None if _resolve_output_field() didn't return an output type. """ try: return self.output_field except FieldError: if not self._output_field_resolved_to_none: raise def _resolve_output_field(self): """ Attempt to infer the output type of the expression. If the output fields of all source fields match then, simply infer the same type here. This isn't always correct, but it makes sense most of the time. Consider the difference between `2 + 2` and `2 / 3`. Inferring the type here is a convenience for the common case. The user should supply their own output_field with more complex computations. If a source's output field resolves to None, exclude it from this check. If all sources are None, then an error is raised higher up the stack in the output_field property. """ sources_iter = (source for source in self.get_source_fields() if source is not None) for output_field in sources_iter: for source in sources_iter: if not isinstance(output_field, source.__class__): raise FieldError( 'Expression contains mixed types: %s, %s. You must ' 'set output_field.' % ( output_field.__class__.__name__, source.__class__.__name__, ) ) return output_field @staticmethod def _convert_value_noop(value, expression, connection): return value @cached_property def convert_value(self): """ Expressions provide their own converters because users have the option of manually specifying the output_field which may be a different type from the one the database returns. """ field = self.output_field internal_type = field.get_internal_type() if internal_type == 'FloatField': return lambda value, expression, connection: None if value is None else float(value) elif internal_type.endswith('IntegerField'): return lambda value, expression, connection: None if value is None else int(value) elif internal_type == 'DecimalField': return lambda value, expression, connection: None if value is None else Decimal(value) return self._convert_value_noop def get_lookup(self, lookup): return self.output_field.get_lookup(lookup) def get_transform(self, name): return self.output_field.get_transform(name) def relabeled_clone(self, change_map): clone = self.copy() clone.set_source_expressions([ e.relabeled_clone(change_map) if e is not None else None for e in self.get_source_expressions() ]) return clone def copy(self): return copy.copy(self) def get_group_by_cols(self, alias=None): if not self.contains_aggregate: return [self] cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def get_source_fields(self): """Return the underlying field types used by this aggregate.""" return [e._output_field_or_none for e in self.get_source_expressions()] def asc(self, **kwargs): return OrderBy(self, **kwargs) def desc(self, **kwargs): return OrderBy(self, descending=True, **kwargs) def reverse_ordering(self): return self def flatten(self): """ Recursively yield this expression and all subexpressions, in depth-first order. """ yield self for expr in self.get_source_expressions(): if expr: yield from expr.flatten() def select_format(self, compiler, sql, params): """ Custom format for select clauses. For example, EXISTS expressions need to be wrapped in CASE WHEN on Oracle. """ if hasattr(self.output_field, 'select_format'): return self.output_field.select_format(compiler, sql, params) return sql, params @cached_property def identity(self): constructor_signature = inspect.signature(self.__init__) args, kwargs = self._constructor_args signature = constructor_signature.bind_partial(*args, **kwargs) signature.apply_defaults() arguments = signature.arguments.items() identity = [self.__class__] for arg, value in arguments: if isinstance(value, fields.Field): if value.name and value.model: value = (value.model._meta.label, value.name) else: value = type(value) else: value = make_hashable(value) identity.append((arg, value)) return tuple(identity) def __eq__(self, other): if not isinstance(other, BaseExpression): return NotImplemented return other.identity == self.identity def __hash__(self): return hash(self.identity) class Expression(BaseExpression, Combinable): """An expression that can be combined with other expressions.""" pass class CombinedExpression(SQLiteNumericMixin, Expression): def __init__(self, lhs, connector, rhs, output_field=None): super().__init__(output_field=output_field) self.connector = connector self.lhs = lhs self.rhs = rhs def __repr__(self): return "<{}: {}>".format(self.__class__.__name__, self) def __str__(self): return "{} {} {}".format(self.lhs, self.connector, self.rhs) def get_source_expressions(self): return [self.lhs, self.rhs] def set_source_expressions(self, exprs): self.lhs, self.rhs = exprs def as_sql(self, compiler, connection): try: lhs_type = self.lhs.output_field.get_internal_type() except FieldError: lhs_type = None try: rhs_type = self.rhs.output_field.get_internal_type() except FieldError: rhs_type = None if ( not connection.features.has_native_duration_field and 'DurationField' in {lhs_type, rhs_type} and lhs_type != rhs_type ): return DurationExpression(self.lhs, self.connector, self.rhs).as_sql(compiler, connection) datetime_fields = {'DateField', 'DateTimeField', 'TimeField'} if self.connector == self.SUB and lhs_type in datetime_fields and lhs_type == rhs_type: return TemporalSubtraction(self.lhs, self.rhs).as_sql(compiler, connection) expressions = [] expression_params = [] sql, params = compiler.compile(self.lhs) expressions.append(sql) expression_params.extend(params) sql, params = compiler.compile(self.rhs) expressions.append(sql) expression_params.extend(params) # order of precedence expression_wrapper = '(%s)' sql = connection.ops.combine_expression(self.connector, expressions) return expression_wrapper % sql, expression_params def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize c.lhs = c.lhs.resolve_expression(query, allow_joins, reuse, summarize, for_save) c.rhs = c.rhs.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c class DurationExpression(CombinedExpression): def compile(self, side, compiler, connection): try: output = side.output_field except FieldError: pass else: if output.get_internal_type() == 'DurationField': sql, params = compiler.compile(side) return connection.ops.format_for_duration_arithmetic(sql), params return compiler.compile(side) def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) expressions = [] expression_params = [] sql, params = self.compile(self.lhs, compiler, connection) expressions.append(sql) expression_params.extend(params) sql, params = self.compile(self.rhs, compiler, connection) expressions.append(sql) expression_params.extend(params) # order of precedence expression_wrapper = '(%s)' sql = connection.ops.combine_duration_expression(self.connector, expressions) return expression_wrapper % sql, expression_params class TemporalSubtraction(CombinedExpression): output_field = fields.DurationField() def __init__(self, lhs, rhs): super().__init__(lhs, self.SUB, rhs) def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) lhs = compiler.compile(self.lhs) rhs = compiler.compile(self.rhs) return connection.ops.subtract_temporals(self.lhs.output_field.get_internal_type(), lhs, rhs) @deconstructible class F(Combinable): """An object capable of resolving references to existing query objects.""" def __init__(self, name): """ Arguments: * name: the name of the field this expression references """ self.name = name def __repr__(self): return "{}({})".format(self.__class__.__name__, self.name) def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): return query.resolve_ref(self.name, allow_joins, reuse, summarize) def asc(self, **kwargs): return OrderBy(self, **kwargs) def desc(self, **kwargs): return OrderBy(self, descending=True, **kwargs) def __eq__(self, other): return self.__class__ == other.__class__ and self.name == other.name def __hash__(self): return hash(self.name) class ResolvedOuterRef(F): """ An object that contains a reference to an outer query. In this case, the reference to the outer query has been resolved because the inner query has been used as a subquery. """ contains_aggregate = False def as_sql(self, *args, **kwargs): raise ValueError( 'This queryset contains a reference to an outer query and may ' 'only be used in a subquery.' ) def resolve_expression(self, *args, **kwargs): col = super().resolve_expression(*args, **kwargs) # FIXME: Rename possibly_multivalued to multivalued and fix detection # for non-multivalued JOINs (e.g. foreign key fields). This should take # into account only many-to-many and one-to-many relationships. col.possibly_multivalued = LOOKUP_SEP in self.name return col def relabeled_clone(self, relabels): return self def get_group_by_cols(self, alias=None): return [] class OuterRef(F): contains_aggregate = False def resolve_expression(self, *args, **kwargs): if isinstance(self.name, self.__class__): return self.name return ResolvedOuterRef(self.name) def relabeled_clone(self, relabels): return self class Func(SQLiteNumericMixin, Expression): """An SQL function call.""" function = None template = '%(function)s(%(expressions)s)' arg_joiner = ', ' arity = None # The number of arguments the function accepts. def __init__(self, *expressions, output_field=None, **extra): if self.arity is not None and len(expressions) != self.arity: raise TypeError( "'%s' takes exactly %s %s (%s given)" % ( self.__class__.__name__, self.arity, "argument" if self.arity == 1 else "arguments", len(expressions), ) ) super().__init__(output_field=output_field) self.source_expressions = self._parse_expressions(*expressions) self.extra = extra def __repr__(self): args = self.arg_joiner.join(str(arg) for arg in self.source_expressions) extra = {**self.extra, **self._get_repr_options()} if extra: extra = ', '.join(str(key) + '=' + str(val) for key, val in sorted(extra.items())) return "{}({}, {})".format(self.__class__.__name__, args, extra) return "{}({})".format(self.__class__.__name__, args) def _get_repr_options(self): """Return a dict of extra __init__() options to include in the repr.""" return {} def get_source_expressions(self): return self.source_expressions def set_source_expressions(self, exprs): self.source_expressions = exprs def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize for pos, arg in enumerate(c.source_expressions): c.source_expressions[pos] = arg.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c def as_sql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context): connection.ops.check_expression_support(self) sql_parts = [] params = [] for arg in self.source_expressions: arg_sql, arg_params = compiler.compile(arg) sql_parts.append(arg_sql) params.extend(arg_params) data = {**self.extra, **extra_context} # Use the first supplied value in this order: the parameter to this # method, a value supplied in __init__()'s **extra (the value in # `data`), or the value defined on the class. if function is not None: data['function'] = function else: data.setdefault('function', self.function) template = template or data.get('template', self.template) arg_joiner = arg_joiner or data.get('arg_joiner', self.arg_joiner) data['expressions'] = data['field'] = arg_joiner.join(sql_parts) return template % data, params def copy(self): copy = super().copy() copy.source_expressions = self.source_expressions[:] copy.extra = self.extra.copy() return copy class Value(Expression): """Represent a wrapped value as a node within an expression.""" def __init__(self, value, output_field=None): """ Arguments: * value: the value this expression represents. The value will be added into the sql parameter list and properly quoted. * output_field: an instance of the model field type that this expression will return, such as IntegerField() or CharField(). """ super().__init__(output_field=output_field) self.value = value def __repr__(self): return "{}({})".format(self.__class__.__name__, self.value) def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) val = self.value output_field = self._output_field_or_none if output_field is not None: if self.for_save: val = output_field.get_db_prep_save(val, connection=connection) else: val = output_field.get_db_prep_value(val, connection=connection) if hasattr(output_field, 'get_placeholder'): return output_field.get_placeholder(val, compiler, connection), [val] if val is None: # cx_Oracle does not always convert None to the appropriate # NULL type (like in case expressions using numbers), so we # use a literal SQL NULL return 'NULL', [] return '%s', [val] def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) c.for_save = for_save return c def get_group_by_cols(self, alias=None): return [] class RawSQL(Expression): def __init__(self, sql, params, output_field=None): if output_field is None: output_field = fields.Field() self.sql, self.params = sql, params super().__init__(output_field=output_field) def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params) def as_sql(self, compiler, connection): return '(%s)' % self.sql, self.params def get_group_by_cols(self, alias=None): return [self] def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): # Resolve parents fields used in raw SQL. for parent in query.model._meta.get_parent_list(): for parent_field in parent._meta.local_fields: _, column_name = parent_field.get_attname_column() if column_name.lower() in self.sql.lower(): query.resolve_ref(parent_field.name, allow_joins, reuse, summarize) break return super().resolve_expression(query, allow_joins, reuse, summarize, for_save) class Star(Expression): def __repr__(self): return "'*'" def as_sql(self, compiler, connection): return '*', [] class Random(Expression): output_field = fields.FloatField() def __repr__(self): return "Random()" def as_sql(self, compiler, connection): return connection.ops.random_function_sql(), [] class Col(Expression): contains_column_references = True possibly_multivalued = False def __init__(self, alias, target, output_field=None): if output_field is None: output_field = target super().__init__(output_field=output_field) self.alias, self.target = alias, target def __repr__(self): alias, target = self.alias, self.target identifiers = (alias, str(target)) if alias else (str(target),) return '{}({})'.format(self.__class__.__name__, ', '.join(identifiers)) def as_sql(self, compiler, connection): alias, column = self.alias, self.target.column identifiers = (alias, column) if alias else (column,) sql = '.'.join(map(compiler.quote_name_unless_alias, identifiers)) return sql, [] def relabeled_clone(self, relabels): if self.alias is None: return self return self.__class__(relabels.get(self.alias, self.alias), self.target, self.output_field) def get_group_by_cols(self, alias=None): return [self] def get_db_converters(self, connection): if self.target == self.output_field: return self.output_field.get_db_converters(connection) return (self.output_field.get_db_converters(connection) + self.target.get_db_converters(connection)) class Ref(Expression): """ Reference to column alias of the query. For example, Ref('sum_cost') in qs.annotate(sum_cost=Sum('cost')) query. """ def __init__(self, refs, source): super().__init__() self.refs, self.source = refs, source def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source) def get_source_expressions(self): return [self.source] def set_source_expressions(self, exprs): self.source, = exprs def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): # The sub-expression `source` has already been resolved, as this is # just a reference to the name of `source`. return self def relabeled_clone(self, relabels): return self def as_sql(self, compiler, connection): return connection.ops.quote_name(self.refs), [] def get_group_by_cols(self, alias=None): return [self] class ExpressionList(Func): """ An expression containing multiple expressions. Can be used to provide a list of expressions as an argument to another expression, like an ordering clause. """ template = '%(expressions)s' def __init__(self, *expressions, **extra): if not expressions: raise ValueError('%s requires at least one expression.' % self.__class__.__name__) super().__init__(*expressions, **extra) def __str__(self): return self.arg_joiner.join(str(arg) for arg in self.source_expressions) class ExpressionWrapper(Expression): """ An expression that can wrap another expression so that it can provide extra context to the inner expression, such as the output_field. """ def __init__(self, expression, output_field): super().__init__(output_field=output_field) if getattr(expression, '_output_field_or_none', True) is None: expression = expression.copy() expression.output_field = output_field self.expression = expression def set_source_expressions(self, exprs): self.expression = exprs[0] def get_source_expressions(self): return [self.expression] def get_group_by_cols(self, alias=None): return self.expression.get_group_by_cols(alias=alias) def as_sql(self, compiler, connection): return self.expression.as_sql(compiler, connection) def __repr__(self): return "{}({})".format(self.__class__.__name__, self.expression) class When(Expression): template = 'WHEN %(condition)s THEN %(result)s' # This isn't a complete conditional expression, must be used in Case(). conditional = False def __init__(self, condition=None, then=None, **lookups): if lookups: if condition is None: condition, lookups = Q(**lookups), None elif getattr(condition, 'conditional', False): condition, lookups = Q(condition, **lookups), None if condition is None or not getattr(condition, 'conditional', False) or lookups: raise TypeError( 'When() supports a Q object, a boolean expression, or lookups ' 'as a condition.' ) if isinstance(condition, Q) and not condition: raise ValueError("An empty Q() can't be used as a When() condition.") super().__init__(output_field=None) self.condition = condition self.result = self._parse_expressions(then)[0] def __str__(self): return "WHEN %r THEN %r" % (self.condition, self.result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_source_expressions(self): return [self.condition, self.result] def set_source_expressions(self, exprs): self.condition, self.result = exprs def get_source_fields(self): # We're only interested in the fields of the result expressions. return [self.result._output_field_or_none] def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize if hasattr(c.condition, 'resolve_expression'): c.condition = c.condition.resolve_expression(query, allow_joins, reuse, summarize, False) c.result = c.result.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c def as_sql(self, compiler, connection, template=None, **extra_context): connection.ops.check_expression_support(self) template_params = extra_context sql_params = [] condition_sql, condition_params = compiler.compile(self.condition) template_params['condition'] = condition_sql sql_params.extend(condition_params) result_sql, result_params = compiler.compile(self.result) template_params['result'] = result_sql sql_params.extend(result_params) template = template or self.template return template % template_params, sql_params def get_group_by_cols(self, alias=None): # This is not a complete expression and cannot be used in GROUP BY. cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols class Case(Expression): """ An SQL searched CASE expression: CASE WHEN n > 0 THEN 'positive' WHEN n < 0 THEN 'negative' ELSE 'zero' END """ template = 'CASE %(cases)s ELSE %(default)s END' case_joiner = ' ' def __init__(self, *cases, default=None, output_field=None, **extra): if not all(isinstance(case, When) for case in cases): raise TypeError("Positional arguments must all be When objects.") super().__init__(output_field) self.cases = list(cases) self.default = self._parse_expressions(default)[0] self.extra = extra def __str__(self): return "CASE %s, ELSE %r" % (', '.join(str(c) for c in self.cases), self.default) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_source_expressions(self): return self.cases + [self.default] def set_source_expressions(self, exprs): *self.cases, self.default = exprs def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize for pos, case in enumerate(c.cases): c.cases[pos] = case.resolve_expression(query, allow_joins, reuse, summarize, for_save) c.default = c.default.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c def copy(self): c = super().copy() c.cases = c.cases[:] return c def as_sql(self, compiler, connection, template=None, case_joiner=None, **extra_context): connection.ops.check_expression_support(self) if not self.cases: return compiler.compile(self.default) template_params = {**self.extra, **extra_context} case_parts = [] sql_params = [] for case in self.cases: try: case_sql, case_params = compiler.compile(case) except EmptyResultSet: continue case_parts.append(case_sql) sql_params.extend(case_params) default_sql, default_params = compiler.compile(self.default) if not case_parts: return default_sql, default_params case_joiner = case_joiner or self.case_joiner template_params['cases'] = case_joiner.join(case_parts) template_params['default'] = default_sql sql_params.extend(default_params) template = template or template_params.get('template', self.template) sql = template % template_params if self._output_field_or_none is not None: sql = connection.ops.unification_cast_sql(self.output_field) % sql return sql, sql_params class Subquery(Expression): """ An explicit subquery. It may contain OuterRef() references to the outer query which will be resolved when it is applied to that query. """ template = '(%(subquery)s)' contains_aggregate = False def __init__(self, queryset, output_field=None, **extra): self.query = queryset.query self.extra = extra # Prevent the QuerySet from being evaluated. self.queryset = queryset._chain(_result_cache=[], prefetch_done=True) super().__init__(output_field) def __getstate__(self): state = super().__getstate__() args, kwargs = state['_constructor_args'] if args: args = (self.queryset, *args[1:]) else: kwargs['queryset'] = self.queryset state['_constructor_args'] = args, kwargs return state def get_source_expressions(self): return [self.query] def set_source_expressions(self, exprs): self.query = exprs[0] def _resolve_output_field(self): return self.query.output_field def copy(self): clone = super().copy() clone.query = clone.query.clone() return clone @property def external_aliases(self): return self.query.external_aliases def as_sql(self, compiler, connection, template=None, **extra_context): connection.ops.check_expression_support(self) template_params = {**self.extra, **extra_context} subquery_sql, sql_params = self.query.as_sql(compiler, connection) template_params['subquery'] = subquery_sql[1:-1] template = template or template_params.get('template', self.template) sql = template % template_params return sql, sql_params def get_group_by_cols(self, alias=None): if alias: return [Ref(alias, self)] external_cols = self.query.get_external_cols() if any(col.possibly_multivalued for col in external_cols): return [self] return external_cols class Exists(Subquery): template = 'EXISTS(%(subquery)s)' output_field = fields.BooleanField() def __init__(self, queryset, negated=False, **kwargs): # As a performance optimization, remove ordering since EXISTS doesn't # care about it, just whether or not a row matches. queryset = queryset.order_by() self.negated = negated super().__init__(queryset, **kwargs) def __invert__(self): clone = self.copy() clone.negated = not self.negated return clone def as_sql(self, compiler, connection, template=None, **extra_context): sql, params = super().as_sql(compiler, connection, template, **extra_context) if self.negated: sql = 'NOT {}'.format(sql) return sql, params def select_format(self, compiler, sql, params): # Wrap EXISTS() with a CASE WHEN expression if a database backend # (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP # BY list. if not compiler.connection.features.supports_boolean_expr_in_select_clause: sql = 'CASE WHEN {} THEN 1 ELSE 0 END'.format(sql) return sql, params class OrderBy(BaseExpression): template = '%(expression)s %(ordering)s' conditional = False def __init__(self, expression, descending=False, nulls_first=False, nulls_last=False): if nulls_first and nulls_last: raise ValueError('nulls_first and nulls_last are mutually exclusive') self.nulls_first = nulls_first self.nulls_last = nulls_last self.descending = descending if not hasattr(expression, 'resolve_expression'): raise ValueError('expression must be an expression type') self.expression = expression def __repr__(self): return "{}({}, descending={})".format( self.__class__.__name__, self.expression, self.descending) def set_source_expressions(self, exprs): self.expression = exprs[0] def get_source_expressions(self): return [self.expression] def as_sql(self, compiler, connection, template=None, **extra_context): template = template or self.template if connection.features.supports_order_by_nulls_modifier: if self.nulls_last: template = '%s NULLS LAST' % template elif self.nulls_first: template = '%s NULLS FIRST' % template else: if self.nulls_last and not ( self.descending and connection.features.order_by_nulls_first ): template = '%%(expression)s IS NULL, %s' % template elif self.nulls_first and not ( not self.descending and connection.features.order_by_nulls_first ): template = '%%(expression)s IS NOT NULL, %s' % template connection.ops.check_expression_support(self) expression_sql, params = compiler.compile(self.expression) placeholders = { 'expression': expression_sql, 'ordering': 'DESC' if self.descending else 'ASC', **extra_context, } template = template or self.template params *= template.count('%(expression)s') return (template % placeholders).rstrip(), params def as_oracle(self, compiler, connection): # Oracle doesn't allow ORDER BY EXISTS() unless it's wrapped in # a CASE WHEN. if isinstance(self.expression, Exists): copy = self.copy() copy.expression = Case( When(self.expression, then=True), default=False, output_field=fields.BooleanField(), ) return copy.as_sql(compiler, connection) return self.as_sql(compiler, connection) def get_group_by_cols(self, alias=None): cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def reverse_ordering(self): self.descending = not self.descending if self.nulls_first or self.nulls_last: self.nulls_first = not self.nulls_first self.nulls_last = not self.nulls_last return self def asc(self): self.descending = False def desc(self): self.descending = True class Window(Expression): template = '%(expression)s OVER (%(window)s)' # Although the main expression may either be an aggregate or an # expression with an aggregate function, the GROUP BY that will # be introduced in the query as a result is not desired. contains_aggregate = False contains_over_clause = True filterable = False def __init__(self, expression, partition_by=None, order_by=None, frame=None, output_field=None): self.partition_by = partition_by self.order_by = order_by self.frame = frame if not getattr(expression, 'window_compatible', False): raise ValueError( "Expression '%s' isn't compatible with OVER clauses." % expression.__class__.__name__ ) if self.partition_by is not None: if not isinstance(self.partition_by, (tuple, list)): self.partition_by = (self.partition_by,) self.partition_by = ExpressionList(*self.partition_by) if self.order_by is not None: if isinstance(self.order_by, (list, tuple)): self.order_by = ExpressionList(*self.order_by) elif not isinstance(self.order_by, BaseExpression): raise ValueError( 'order_by must be either an Expression or a sequence of ' 'expressions.' ) super().__init__(output_field=output_field) self.source_expression = self._parse_expressions(expression)[0] def _resolve_output_field(self): return self.source_expression.output_field def get_source_expressions(self): return [self.source_expression, self.partition_by, self.order_by, self.frame] def set_source_expressions(self, exprs): self.source_expression, self.partition_by, self.order_by, self.frame = exprs def as_sql(self, compiler, connection, template=None): connection.ops.check_expression_support(self) if not connection.features.supports_over_clause: raise NotSupportedError('This backend does not support window expressions.') expr_sql, params = compiler.compile(self.source_expression) window_sql, window_params = [], [] if self.partition_by is not None: sql_expr, sql_params = self.partition_by.as_sql( compiler=compiler, connection=connection, template='PARTITION BY %(expressions)s', ) window_sql.extend(sql_expr) window_params.extend(sql_params) if self.order_by is not None: window_sql.append(' ORDER BY ') order_sql, order_params = compiler.compile(self.order_by) window_sql.extend(order_sql) window_params.extend(order_params) if self.frame: frame_sql, frame_params = compiler.compile(self.frame) window_sql.append(' ' + frame_sql) window_params.extend(frame_params) params.extend(window_params) template = template or self.template return template % { 'expression': expr_sql, 'window': ''.join(window_sql).strip() }, params def __str__(self): return '{} OVER ({}{}{})'.format( str(self.source_expression), 'PARTITION BY ' + str(self.partition_by) if self.partition_by else '', 'ORDER BY ' + str(self.order_by) if self.order_by else '', str(self.frame or ''), ) def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self) def get_group_by_cols(self, alias=None): return [] class WindowFrame(Expression): """ Model the frame clause in window expressions. There are two types of frame clauses which are subclasses, however, all processing and validation (by no means intended to be complete) is done here. Thus, providing an end for a frame is optional (the default is UNBOUNDED FOLLOWING, which is the last row in the frame). """ template = '%(frame_type)s BETWEEN %(start)s AND %(end)s' def __init__(self, start=None, end=None): self.start = Value(start) self.end = Value(end) def set_source_expressions(self, exprs): self.start, self.end = exprs def get_source_expressions(self): return [self.start, self.end] def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) start, end = self.window_frame_start_end(connection, self.start.value, self.end.value) return self.template % { 'frame_type': self.frame_type, 'start': start, 'end': end, }, [] def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self) def get_group_by_cols(self, alias=None): return [] def __str__(self): if self.start.value is not None and self.start.value < 0: start = '%d %s' % (abs(self.start.value), connection.ops.PRECEDING) elif self.start.value is not None and self.start.value == 0: start = connection.ops.CURRENT_ROW else: start = connection.ops.UNBOUNDED_PRECEDING if self.end.value is not None and self.end.value > 0: end = '%d %s' % (self.end.value, connection.ops.FOLLOWING) elif self.end.value is not None and self.end.value == 0: end = connection.ops.CURRENT_ROW else: end = connection.ops.UNBOUNDED_FOLLOWING return self.template % { 'frame_type': self.frame_type, 'start': start, 'end': end, } def window_frame_start_end(self, connection, start, end): raise NotImplementedError('Subclasses must implement window_frame_start_end().') class RowRange(WindowFrame): frame_type = 'ROWS' def window_frame_start_end(self, connection, start, end): return connection.ops.window_frame_rows_start_end(start, end) class ValueRange(WindowFrame): frame_type = 'RANGE' def window_frame_start_end(self, connection, start, end): return connection.ops.window_frame_range_start_end(start, end)
39b9fb21a41b2f4b6ffb0c4dcb470f7de36b11f5a2591251e68d7560fe345d71
import datetime import posixpath from django import forms from django.core import checks from django.core.files.base import File from django.core.files.images import ImageFile from django.core.files.storage import Storage, default_storage from django.db.models import signals from django.db.models.fields import Field from django.db.models.query_utils import DeferredAttribute from django.utils.translation import gettext_lazy as _ class FieldFile(File): def __init__(self, instance, field, name): super().__init__(None, name) self.instance = instance self.field = field self.storage = field.storage self._committed = True def __eq__(self, other): # Older code may be expecting FileField values to be simple strings. # By overriding the == operator, it can remain backwards compatibility. if hasattr(other, 'name'): return self.name == other.name return self.name == other def __hash__(self): return hash(self.name) # The standard File contains most of the necessary properties, but # FieldFiles can be instantiated without a name, so that needs to # be checked for here. def _require_file(self): if not self: raise ValueError("The '%s' attribute has no file associated with it." % self.field.name) def _get_file(self): self._require_file() if getattr(self, '_file', None) is None: self._file = self.storage.open(self.name, 'rb') return self._file def _set_file(self, file): self._file = file def _del_file(self): del self._file file = property(_get_file, _set_file, _del_file) @property def path(self): self._require_file() return self.storage.path(self.name) @property def url(self): self._require_file() return self.storage.url(self.name) @property def size(self): self._require_file() if not self._committed: return self.file.size return self.storage.size(self.name) def open(self, mode='rb'): self._require_file() if getattr(self, '_file', None) is None: self.file = self.storage.open(self.name, mode) else: self.file.open(mode) return self # open() doesn't alter the file's contents, but it does reset the pointer open.alters_data = True # In addition to the standard File API, FieldFiles have extra methods # to further manipulate the underlying file, as well as update the # associated model instance. def save(self, name, content, save=True): name = self.field.generate_filename(self.instance, name) self.name = self.storage.save(name, content, max_length=self.field.max_length) setattr(self.instance, self.field.name, self.name) self._committed = True # Save the object because it has changed, unless save is False if save: self.instance.save() save.alters_data = True def delete(self, save=True): if not self: return # Only close the file if it's already open, which we know by the # presence of self._file if hasattr(self, '_file'): self.close() del self.file self.storage.delete(self.name) self.name = None setattr(self.instance, self.field.name, self.name) self._committed = False if save: self.instance.save() delete.alters_data = True @property def closed(self): file = getattr(self, '_file', None) return file is None or file.closed def close(self): file = getattr(self, '_file', None) if file is not None: file.close() def __getstate__(self): # FieldFile needs access to its associated model field, an instance and # the file's name. Everything else will be restored later, by # FileDescriptor below. return { 'name': self.name, 'closed': False, '_committed': True, '_file': None, 'instance': self.instance, 'field': self.field, } def __setstate__(self, state): self.__dict__.update(state) self.storage = self.field.storage class FileDescriptor(DeferredAttribute): """ The descriptor for the file attribute on the model instance. Return a FieldFile when accessed so you can write code like:: >>> from myapp.models import MyModel >>> instance = MyModel.objects.get(pk=1) >>> instance.file.size Assign a file object on assignment so you can do:: >>> with open('/path/to/hello.world') as f: ... instance.file = File(f) """ def __get__(self, instance, cls=None): if instance is None: return self # This is slightly complicated, so worth an explanation. # instance.file`needs to ultimately return some instance of `File`, # probably a subclass. Additionally, this returned object needs to have # the FieldFile API so that users can easily do things like # instance.file.path and have that delegated to the file storage engine. # Easy enough if we're strict about assignment in __set__, but if you # peek below you can see that we're not. So depending on the current # value of the field we have to dynamically construct some sort of # "thing" to return. # The instance dict contains whatever was originally assigned # in __set__. file = super().__get__(instance, cls) # If this value is a string (instance.file = "path/to/file") or None # then we simply wrap it with the appropriate attribute class according # to the file field. [This is FieldFile for FileFields and # ImageFieldFile for ImageFields; it's also conceivable that user # subclasses might also want to subclass the attribute class]. This # object understands how to convert a path to a file, and also how to # handle None. if isinstance(file, str) or file is None: attr = self.field.attr_class(instance, self.field, file) instance.__dict__[self.field.attname] = attr # Other types of files may be assigned as well, but they need to have # the FieldFile interface added to them. Thus, we wrap any other type of # File inside a FieldFile (well, the field's attr_class, which is # usually FieldFile). elif isinstance(file, File) and not isinstance(file, FieldFile): file_copy = self.field.attr_class(instance, self.field, file.name) file_copy.file = file file_copy._committed = False instance.__dict__[self.field.attname] = file_copy # Finally, because of the (some would say boneheaded) way pickle works, # the underlying FieldFile might not actually itself have an associated # file. So we need to reset the details of the FieldFile in those cases. elif isinstance(file, FieldFile) and not hasattr(file, 'field'): file.instance = instance file.field = self.field file.storage = self.field.storage # Make sure that the instance is correct. elif isinstance(file, FieldFile) and instance is not file.instance: file.instance = instance # That was fun, wasn't it? return instance.__dict__[self.field.attname] def __set__(self, instance, value): instance.__dict__[self.field.attname] = value class FileField(Field): # The class to wrap instance attributes in. Accessing the file object off # the instance will always return an instance of attr_class. attr_class = FieldFile # The descriptor to use for accessing the attribute off of the class. descriptor_class = FileDescriptor description = _("File") def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs): self._primary_key_set_explicitly = 'primary_key' in kwargs self.storage = storage or default_storage if callable(self.storage): self.storage = self.storage() if not isinstance(self.storage, Storage): raise TypeError( "%s.storage must be a subclass/instance of %s.%s" % (self.__class__.__qualname__, Storage.__module__, Storage.__qualname__) ) self.upload_to = upload_to kwargs.setdefault('max_length', 100) super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_primary_key(), *self._check_upload_to(), ] def _check_primary_key(self): if self._primary_key_set_explicitly: return [ checks.Error( "'primary_key' is not a valid argument for a %s." % self.__class__.__name__, obj=self, id='fields.E201', ) ] else: return [] def _check_upload_to(self): if isinstance(self.upload_to, str) and self.upload_to.startswith('/'): return [ checks.Error( "%s's 'upload_to' argument must be a relative path, not an " "absolute path." % self.__class__.__name__, obj=self, id='fields.E202', hint='Remove the leading slash.', ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 100: del kwargs["max_length"] kwargs['upload_to'] = self.upload_to if self.storage is not default_storage: kwargs['storage'] = self.storage return name, path, args, kwargs def get_internal_type(self): return "FileField" def get_prep_value(self, value): value = super().get_prep_value(value) # Need to convert File objects provided via a form to string for database insertion if value is None: return None return str(value) def pre_save(self, model_instance, add): file = super().pre_save(model_instance, add) if file and not file._committed: # Commit the file to storage prior to saving the model file.save(file.name, file.file, save=False) return file def generate_filename(self, instance, filename): """ Apply (if callable) or prepend (if a string) upload_to to the filename, then delegate further processing of the name to the storage backend. Until the storage layer, all file paths are expected to be Unix style (with forward slashes). """ if callable(self.upload_to): filename = self.upload_to(instance, filename) else: dirname = datetime.datetime.now().strftime(str(self.upload_to)) filename = posixpath.join(dirname, filename) return self.storage.generate_filename(filename) def save_form_data(self, instance, data): # Important: None means "no change", other false value means "clear" # This subtle distinction (rather than a more explicit marker) is # needed because we need to consume values that are also sane for a # regular (non Model-) Form to find in its cleaned_data dictionary. if data is not None: # This value will be converted to str and stored in the # database, so leaving False as-is is not acceptable. setattr(instance, self.name, data or '') def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.FileField, 'max_length': self.max_length, **kwargs, }) class ImageFileDescriptor(FileDescriptor): """ Just like the FileDescriptor, but for ImageFields. The only difference is assigning the width/height to the width_field/height_field, if appropriate. """ def __set__(self, instance, value): previous_file = instance.__dict__.get(self.field.attname) super().__set__(instance, value) # To prevent recalculating image dimensions when we are instantiating # an object from the database (bug #11084), only update dimensions if # the field had a value before this assignment. Since the default # value for FileField subclasses is an instance of field.attr_class, # previous_file will only be None when we are called from # Model.__init__(). The ImageField.update_dimension_fields method # hooked up to the post_init signal handles the Model.__init__() cases. # Assignment happening outside of Model.__init__() will trigger the # update right here. if previous_file is not None: self.field.update_dimension_fields(instance, force=True) class ImageFieldFile(ImageFile, FieldFile): def delete(self, save=True): # Clear the image dimensions cache if hasattr(self, '_dimensions_cache'): del self._dimensions_cache super().delete(save) class ImageField(FileField): attr_class = ImageFieldFile descriptor_class = ImageFileDescriptor description = _("Image") def __init__(self, verbose_name=None, name=None, width_field=None, height_field=None, **kwargs): self.width_field, self.height_field = width_field, height_field super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_image_library_installed(), ] def _check_image_library_installed(self): try: from PIL import Image # NOQA except ImportError: return [ checks.Error( 'Cannot use ImageField because Pillow is not installed.', hint=('Get Pillow at https://pypi.org/project/Pillow/ ' 'or run command "python -m pip install Pillow".'), obj=self, id='fields.E210', ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.width_field: kwargs['width_field'] = self.width_field if self.height_field: kwargs['height_field'] = self.height_field return name, path, args, kwargs def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) # Attach update_dimension_fields so that dimension fields declared # after their corresponding image field don't stay cleared by # Model.__init__, see bug #11196. # Only run post-initialization dimension update on non-abstract models if not cls._meta.abstract: signals.post_init.connect(self.update_dimension_fields, sender=cls) def update_dimension_fields(self, instance, force=False, *args, **kwargs): """ Update field's width and height fields, if defined. This method is hooked up to model's post_init signal to update dimensions after instantiating a model instance. However, dimensions won't be updated if the dimensions fields are already populated. This avoids unnecessary recalculation when loading an object from the database. Dimensions can be forced to update with force=True, which is how ImageFileDescriptor.__set__ calls this method. """ # Nothing to update if the field doesn't have dimension fields or if # the field is deferred. has_dimension_fields = self.width_field or self.height_field if not has_dimension_fields or self.attname not in instance.__dict__: return # getattr will call the ImageFileDescriptor's __get__ method, which # coerces the assigned value into an instance of self.attr_class # (ImageFieldFile in this case). file = getattr(instance, self.attname) # Nothing to update if we have no file and not being forced to update. if not file and not force: return dimension_fields_filled = not( (self.width_field and not getattr(instance, self.width_field)) or (self.height_field and not getattr(instance, self.height_field)) ) # When both dimension fields have values, we are most likely loading # data from the database or updating an image field that already had # an image stored. In the first case, we don't want to update the # dimension fields because we are already getting their values from the # database. In the second case, we do want to update the dimensions # fields and will skip this return because force will be True since we # were called from ImageFileDescriptor.__set__. if dimension_fields_filled and not force: return # file should be an instance of ImageFieldFile or should be None. if file: width = file.width height = file.height else: # No file, so clear dimensions fields. width = None height = None # Update the width and height fields. if self.width_field: setattr(instance, self.width_field, width) if self.height_field: setattr(instance, self.height_field, height) def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.ImageField, **kwargs, })
daeb05f3e17e2e84234ddbd95b696dd7d2a0f94f8343b12a572962d35147d31c
from collections import namedtuple import cx_Oracle from django.db import models from django.db.backends.base.introspection import ( BaseDatabaseIntrospection, FieldInfo as BaseFieldInfo, TableInfo, ) from django.utils.functional import cached_property FieldInfo = namedtuple('FieldInfo', BaseFieldInfo._fields + ('is_autofield', 'is_json')) class DatabaseIntrospection(BaseDatabaseIntrospection): cache_bust_counter = 1 # Maps type objects to Django Field types. @cached_property def data_types_reverse(self): if self.connection.cx_oracle_version < (8,): return { cx_Oracle.BLOB: 'BinaryField', cx_Oracle.CLOB: 'TextField', cx_Oracle.DATETIME: 'DateField', cx_Oracle.FIXED_CHAR: 'CharField', cx_Oracle.FIXED_NCHAR: 'CharField', cx_Oracle.INTERVAL: 'DurationField', cx_Oracle.NATIVE_FLOAT: 'FloatField', cx_Oracle.NCHAR: 'CharField', cx_Oracle.NCLOB: 'TextField', cx_Oracle.NUMBER: 'DecimalField', cx_Oracle.STRING: 'CharField', cx_Oracle.TIMESTAMP: 'DateTimeField', } else: return { cx_Oracle.DB_TYPE_DATE: 'DateField', cx_Oracle.DB_TYPE_BINARY_DOUBLE: 'FloatField', cx_Oracle.DB_TYPE_BLOB: 'BinaryField', cx_Oracle.DB_TYPE_CHAR: 'CharField', cx_Oracle.DB_TYPE_CLOB: 'TextField', cx_Oracle.DB_TYPE_INTERVAL_DS: 'DurationField', cx_Oracle.DB_TYPE_NCHAR: 'CharField', cx_Oracle.DB_TYPE_NCLOB: 'TextField', cx_Oracle.DB_TYPE_NVARCHAR: 'CharField', cx_Oracle.DB_TYPE_NUMBER: 'DecimalField', cx_Oracle.DB_TYPE_TIMESTAMP: 'DateTimeField', cx_Oracle.DB_TYPE_VARCHAR: 'CharField', } def get_field_type(self, data_type, description): if data_type == cx_Oracle.NUMBER: precision, scale = description[4:6] if scale == 0: if precision > 11: return 'BigAutoField' if description.is_autofield else 'BigIntegerField' elif 1 < precision < 6 and description.is_autofield: return 'SmallAutoField' elif precision == 1: return 'BooleanField' elif description.is_autofield: return 'AutoField' else: return 'IntegerField' elif scale == -127: return 'FloatField' elif data_type == cx_Oracle.NCLOB and description.is_json: return 'JSONField' return super().get_field_type(data_type, description) def get_table_list(self, cursor): """Return a list of table and view names in the current database.""" cursor.execute(""" SELECT table_name, 't' FROM user_tables WHERE NOT EXISTS ( SELECT 1 FROM user_mviews WHERE user_mviews.mview_name = user_tables.table_name ) UNION ALL SELECT view_name, 'v' FROM user_views UNION ALL SELECT mview_name, 'v' FROM user_mviews """) return [TableInfo(self.identifier_converter(row[0]), row[1]) for row in cursor.fetchall()] def get_table_description(self, cursor, table_name): """ Return a description of the table with the DB-API cursor.description interface. """ # user_tab_columns gives data default for columns cursor.execute(""" SELECT column_name, data_default, CASE WHEN char_used IS NULL THEN data_length ELSE char_length END as internal_size, CASE WHEN identity_column = 'YES' THEN 1 ELSE 0 END as is_autofield, CASE WHEN EXISTS ( SELECT 1 FROM user_json_columns WHERE user_json_columns.table_name = user_tab_cols.table_name AND user_json_columns.column_name = user_tab_cols.column_name ) THEN 1 ELSE 0 END as is_json FROM user_tab_cols WHERE table_name = UPPER(%s)""", [table_name]) field_map = { column: (internal_size, default if default != 'NULL' else None, is_autofield, is_json) for column, default, internal_size, is_autofield, is_json in cursor.fetchall() } self.cache_bust_counter += 1 cursor.execute("SELECT * FROM {} WHERE ROWNUM < 2 AND {} > 0".format( self.connection.ops.quote_name(table_name), self.cache_bust_counter)) description = [] for desc in cursor.description: name = desc[0] internal_size, default, is_autofield, is_json = field_map[name] name = name % {} # cx_Oracle, for some reason, doubles percent signs. description.append(FieldInfo( self.identifier_converter(name), *desc[1:3], internal_size, desc[4] or 0, desc[5] or 0, *desc[6:], default, is_autofield, is_json, )) return description def identifier_converter(self, name): """Identifier comparison is case insensitive under Oracle.""" return name.lower() def get_sequences(self, cursor, table_name, table_fields=()): cursor.execute(""" SELECT user_tab_identity_cols.sequence_name, user_tab_identity_cols.column_name FROM user_tab_identity_cols, user_constraints, user_cons_columns cols WHERE user_constraints.constraint_name = cols.constraint_name AND user_constraints.table_name = user_tab_identity_cols.table_name AND cols.column_name = user_tab_identity_cols.column_name AND user_constraints.constraint_type = 'P' AND user_tab_identity_cols.table_name = UPPER(%s) """, [table_name]) # Oracle allows only one identity column per table. row = cursor.fetchone() if row: return [{ 'name': self.identifier_converter(row[0]), 'table': self.identifier_converter(table_name), 'column': self.identifier_converter(row[1]), }] # To keep backward compatibility for AutoFields that aren't Oracle # identity columns. for f in table_fields: if isinstance(f, models.AutoField): return [{'table': table_name, 'column': f.column}] return [] def get_relations(self, cursor, table_name): """ Return a dictionary of {field_name: (field_name_other_table, other_table)} representing all relationships to the given table. """ table_name = table_name.upper() cursor.execute(""" SELECT ca.column_name, cb.table_name, cb.column_name FROM user_constraints, USER_CONS_COLUMNS ca, USER_CONS_COLUMNS cb WHERE user_constraints.table_name = %s AND user_constraints.constraint_name = ca.constraint_name AND user_constraints.r_constraint_name = cb.constraint_name AND ca.position = cb.position""", [table_name]) return { self.identifier_converter(field_name): ( self.identifier_converter(rel_field_name), self.identifier_converter(rel_table_name), ) for field_name, rel_table_name, rel_field_name in cursor.fetchall() } def get_key_columns(self, cursor, table_name): cursor.execute(""" SELECT ccol.column_name, rcol.table_name AS referenced_table, rcol.column_name AS referenced_column FROM user_constraints c JOIN user_cons_columns ccol ON ccol.constraint_name = c.constraint_name JOIN user_cons_columns rcol ON rcol.constraint_name = c.r_constraint_name WHERE c.table_name = %s AND c.constraint_type = 'R'""", [table_name.upper()]) return [ tuple(self.identifier_converter(cell) for cell in row) for row in cursor.fetchall() ] def get_primary_key_column(self, cursor, table_name): cursor.execute(""" SELECT cols.column_name FROM user_constraints, user_cons_columns cols WHERE user_constraints.constraint_name = cols.constraint_name AND user_constraints.constraint_type = 'P' AND user_constraints.table_name = UPPER(%s) AND cols.position = 1 """, [table_name]) row = cursor.fetchone() return self.identifier_converter(row[0]) if row else None def get_constraints(self, cursor, table_name): """ Retrieve any constraints or keys (unique, pk, fk, check, index) across one or more columns. """ constraints = {} # Loop over the constraints, getting PKs, uniques, and checks cursor.execute(""" SELECT user_constraints.constraint_name, LISTAGG(LOWER(cols.column_name), ',') WITHIN GROUP (ORDER BY cols.position), CASE user_constraints.constraint_type WHEN 'P' THEN 1 ELSE 0 END AS is_primary_key, CASE WHEN user_constraints.constraint_type IN ('P', 'U') THEN 1 ELSE 0 END AS is_unique, CASE user_constraints.constraint_type WHEN 'C' THEN 1 ELSE 0 END AS is_check_constraint FROM user_constraints LEFT OUTER JOIN user_cons_columns cols ON user_constraints.constraint_name = cols.constraint_name WHERE user_constraints.constraint_type = ANY('P', 'U', 'C') AND user_constraints.table_name = UPPER(%s) GROUP BY user_constraints.constraint_name, user_constraints.constraint_type """, [table_name]) for constraint, columns, pk, unique, check in cursor.fetchall(): constraint = self.identifier_converter(constraint) constraints[constraint] = { 'columns': columns.split(','), 'primary_key': pk, 'unique': unique, 'foreign_key': None, 'check': check, 'index': unique, # All uniques come with an index } # Foreign key constraints cursor.execute(""" SELECT cons.constraint_name, LISTAGG(LOWER(cols.column_name), ',') WITHIN GROUP (ORDER BY cols.position), LOWER(rcols.table_name), LOWER(rcols.column_name) FROM user_constraints cons INNER JOIN user_cons_columns rcols ON rcols.constraint_name = cons.r_constraint_name AND rcols.position = 1 LEFT OUTER JOIN user_cons_columns cols ON cons.constraint_name = cols.constraint_name WHERE cons.constraint_type = 'R' AND cons.table_name = UPPER(%s) GROUP BY cons.constraint_name, rcols.table_name, rcols.column_name """, [table_name]) for constraint, columns, other_table, other_column in cursor.fetchall(): constraint = self.identifier_converter(constraint) constraints[constraint] = { 'primary_key': False, 'unique': False, 'foreign_key': (other_table, other_column), 'check': False, 'index': False, 'columns': columns.split(','), } # Now get indexes cursor.execute(""" SELECT ind.index_name, LOWER(ind.index_type), LISTAGG(LOWER(cols.column_name), ',') WITHIN GROUP (ORDER BY cols.column_position), LISTAGG(cols.descend, ',') WITHIN GROUP (ORDER BY cols.column_position) FROM user_ind_columns cols, user_indexes ind WHERE cols.table_name = UPPER(%s) AND NOT EXISTS ( SELECT 1 FROM user_constraints cons WHERE ind.index_name = cons.index_name ) AND cols.index_name = ind.index_name GROUP BY ind.index_name, ind.index_type """, [table_name]) for constraint, type_, columns, orders in cursor.fetchall(): constraint = self.identifier_converter(constraint) constraints[constraint] = { 'primary_key': False, 'unique': False, 'foreign_key': None, 'check': False, 'index': True, 'type': 'idx' if type_ == 'normal' else type_, 'columns': columns.split(','), 'orders': orders.split(','), } return constraints
e78526a2581e21a2e49bdb3da3db253d05980d3fa4515b8fba7cfd24442152db
""" Oracle database backend for Django. Requires cx_Oracle: https://oracle.github.io/python-cx_Oracle/ """ import datetime import decimal import os import platform from contextlib import contextmanager from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.db import IntegrityError from django.db.backends.base.base import BaseDatabaseWrapper from django.utils.asyncio import async_unsafe from django.utils.encoding import force_bytes, force_str from django.utils.functional import cached_property def _setup_environment(environ): # Cygwin requires some special voodoo to set the environment variables # properly so that Oracle will see them. if platform.system().upper().startswith('CYGWIN'): try: import ctypes except ImportError as e: raise ImproperlyConfigured("Error loading ctypes: %s; " "the Oracle backend requires ctypes to " "operate correctly under Cygwin." % e) kernel32 = ctypes.CDLL('kernel32') for name, value in environ: kernel32.SetEnvironmentVariableA(name, value) else: os.environ.update(environ) _setup_environment([ # Oracle takes client-side character set encoding from the environment. ('NLS_LANG', '.AL32UTF8'), # This prevents Unicode from getting mangled by getting encoded into the # potentially non-Unicode database character set. ('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'), ]) try: import cx_Oracle as Database except ImportError as e: raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e) # Some of these import cx_Oracle, so import them after checking if it's installed. from .client import DatabaseClient # NOQA isort:skip from .creation import DatabaseCreation # NOQA isort:skip from .features import DatabaseFeatures # NOQA isort:skip from .introspection import DatabaseIntrospection # NOQA isort:skip from .operations import DatabaseOperations # NOQA isort:skip from .schema import DatabaseSchemaEditor # NOQA isort:skip from .utils import Oracle_datetime # NOQA isort:skip from .validation import DatabaseValidation # NOQA isort:skip @contextmanager def wrap_oracle_errors(): try: yield except Database.DatabaseError as e: # cx_Oracle raises a cx_Oracle.DatabaseError exception with the # following attributes and values: # code = 2091 # message = 'ORA-02091: transaction rolled back # 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS # _C00102056) violated - parent key not found' # or: # 'ORA-00001: unique constraint (DJANGOTEST.DEFERRABLE_ # PINK_CONSTRAINT) violated # Convert that case to Django's IntegrityError exception. x = e.args[0] if ( hasattr(x, 'code') and hasattr(x, 'message') and x.code == 2091 and ('ORA-02291' in x.message or 'ORA-00001' in x.message) ): raise IntegrityError(*tuple(e.args)) raise class _UninitializedOperatorsDescriptor: def __get__(self, instance, cls=None): # If connection.operators is looked up before a connection has been # created, transparently initialize connection.operators to avert an # AttributeError. if instance is None: raise AttributeError("operators not available as class attribute") # Creating a cursor will initialize the operators. instance.cursor().close() return instance.__dict__['operators'] class DatabaseWrapper(BaseDatabaseWrapper): vendor = 'oracle' display_name = 'Oracle' # This dictionary maps Field objects to their associated Oracle column # types, as strings. Column-type strings can contain format strings; they'll # be interpolated against the values of Field.__dict__ before being output. # If a column type is set to None, it won't be included in the output. # # Any format strings starting with "qn_" are quoted before being used in the # output (the "qn_" prefix is stripped before the lookup is performed. data_types = { 'AutoField': 'NUMBER(11) GENERATED BY DEFAULT ON NULL AS IDENTITY', 'BigAutoField': 'NUMBER(19) GENERATED BY DEFAULT ON NULL AS IDENTITY', 'BinaryField': 'BLOB', 'BooleanField': 'NUMBER(1)', 'CharField': 'NVARCHAR2(%(max_length)s)', 'DateField': 'DATE', 'DateTimeField': 'TIMESTAMP', 'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)', 'DurationField': 'INTERVAL DAY(9) TO SECOND(6)', 'FileField': 'NVARCHAR2(%(max_length)s)', 'FilePathField': 'NVARCHAR2(%(max_length)s)', 'FloatField': 'DOUBLE PRECISION', 'IntegerField': 'NUMBER(11)', 'JSONField': 'NCLOB', 'BigIntegerField': 'NUMBER(19)', 'IPAddressField': 'VARCHAR2(15)', 'GenericIPAddressField': 'VARCHAR2(39)', 'NullBooleanField': 'NUMBER(1)', 'OneToOneField': 'NUMBER(11)', 'PositiveBigIntegerField': 'NUMBER(19)', 'PositiveIntegerField': 'NUMBER(11)', 'PositiveSmallIntegerField': 'NUMBER(11)', 'SlugField': 'NVARCHAR2(%(max_length)s)', 'SmallAutoField': 'NUMBER(5) GENERATED BY DEFAULT ON NULL AS IDENTITY', 'SmallIntegerField': 'NUMBER(11)', 'TextField': 'NCLOB', 'TimeField': 'TIMESTAMP', 'URLField': 'VARCHAR2(%(max_length)s)', 'UUIDField': 'VARCHAR2(32)', } data_type_check_constraints = { 'BooleanField': '%(qn_column)s IN (0,1)', 'JSONField': '%(qn_column)s IS JSON', 'NullBooleanField': '%(qn_column)s IN (0,1)', 'PositiveBigIntegerField': '%(qn_column)s >= 0', 'PositiveIntegerField': '%(qn_column)s >= 0', 'PositiveSmallIntegerField': '%(qn_column)s >= 0', } # Oracle doesn't support a database index on these columns. _limited_data_types = ('clob', 'nclob', 'blob') operators = _UninitializedOperatorsDescriptor() _standard_operators = { 'exact': '= %s', 'iexact': '= UPPER(%s)', 'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'gt': '> %s', 'gte': '>= %s', 'lt': '< %s', 'lte': '<= %s', 'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", 'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)", } _likec_operators = { **_standard_operators, 'contains': "LIKEC %s ESCAPE '\\'", 'icontains': "LIKEC UPPER(%s) ESCAPE '\\'", 'startswith': "LIKEC %s ESCAPE '\\'", 'endswith': "LIKEC %s ESCAPE '\\'", 'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'", 'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'", } # The patterns below are used to generate SQL pattern lookup clauses when # the right-hand side of the lookup isn't a raw string (it might be an expression # or the result of a bilateral transformation). # In those cases, special characters for LIKE operators (e.g. \, %, _) # should be escaped on the database side. # # Note: we use str.format() here for readability as '%' is used as a wildcard for # the LIKE operator. pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')" _pattern_ops = { 'contains': "'%%' || {} || '%%'", 'icontains': "'%%' || UPPER({}) || '%%'", 'startswith': "{} || '%%'", 'istartswith': "UPPER({}) || '%%'", 'endswith': "'%%' || {}", 'iendswith': "'%%' || UPPER({})", } _standard_pattern_ops = {k: "LIKE TRANSLATE( " + v + " USING NCHAR_CS)" " ESCAPE TRANSLATE('\\' USING NCHAR_CS)" for k, v in _pattern_ops.items()} _likec_pattern_ops = {k: "LIKEC " + v + " ESCAPE '\\'" for k, v in _pattern_ops.items()} Database = Database SchemaEditorClass = DatabaseSchemaEditor # Classes instantiated in __init__(). client_class = DatabaseClient creation_class = DatabaseCreation features_class = DatabaseFeatures introspection_class = DatabaseIntrospection ops_class = DatabaseOperations validation_class = DatabaseValidation def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True) self.features.can_return_columns_from_insert = use_returning_into def _dsn(self): settings_dict = self.settings_dict if not settings_dict['HOST'].strip(): settings_dict['HOST'] = 'localhost' if settings_dict['PORT']: return Database.makedsn(settings_dict['HOST'], int(settings_dict['PORT']), settings_dict['NAME']) return settings_dict['NAME'] def _connect_string(self): return '%s/"%s"@%s' % (self.settings_dict['USER'], self.settings_dict['PASSWORD'], self._dsn()) def get_connection_params(self): conn_params = self.settings_dict['OPTIONS'].copy() if 'use_returning_into' in conn_params: del conn_params['use_returning_into'] return conn_params @async_unsafe def get_new_connection(self, conn_params): return Database.connect( user=self.settings_dict['USER'], password=self.settings_dict['PASSWORD'], dsn=self._dsn(), **conn_params, ) def init_connection_state(self): cursor = self.create_cursor() # Set the territory first. The territory overrides NLS_DATE_FORMAT # and NLS_TIMESTAMP_FORMAT to the territory default. When all of # these are set in single statement it isn't clear what is supposed # to happen. cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'") # Set Oracle date to ANSI date format. This only needs to execute # once when we create a new connection. We also set the Territory # to 'AMERICA' which forces Sunday to evaluate to a '1' in # TO_CHAR(). cursor.execute( "ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'" " NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'" + (" TIME_ZONE = 'UTC'" if settings.USE_TZ else '') ) cursor.close() if 'operators' not in self.__dict__: # Ticket #14149: Check whether our LIKE implementation will # work for this connection or we need to fall back on LIKEC. # This check is performed only once per DatabaseWrapper # instance per thread, since subsequent connections will use # the same settings. cursor = self.create_cursor() try: cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s" % self._standard_operators['contains'], ['X']) except Database.DatabaseError: self.operators = self._likec_operators self.pattern_ops = self._likec_pattern_ops else: self.operators = self._standard_operators self.pattern_ops = self._standard_pattern_ops cursor.close() self.connection.stmtcachesize = 20 # Ensure all changes are preserved even when AUTOCOMMIT is False. if not self.get_autocommit(): self.commit() @async_unsafe def create_cursor(self, name=None): return FormatStylePlaceholderCursor(self.connection) def _commit(self): if self.connection is not None: with wrap_oracle_errors(): return self.connection.commit() # Oracle doesn't support releasing savepoints. But we fake them when query # logging is enabled to keep query counts consistent with other backends. def _savepoint_commit(self, sid): if self.queries_logged: self.queries_log.append({ 'sql': '-- RELEASE SAVEPOINT %s (faked)' % self.ops.quote_name(sid), 'time': '0.000', }) def _set_autocommit(self, autocommit): with self.wrap_database_errors: self.connection.autocommit = autocommit def check_constraints(self, table_names=None): """ Check constraints by setting them to immediate. Return them to deferred afterward. """ with self.cursor() as cursor: cursor.execute('SET CONSTRAINTS ALL IMMEDIATE') cursor.execute('SET CONSTRAINTS ALL DEFERRED') def is_usable(self): try: self.connection.ping() except Database.Error: return False else: return True @cached_property def cx_oracle_version(self): return tuple(int(x) for x in Database.version.split('.')) @cached_property def oracle_version(self): with self.temporary_connection(): return tuple(int(x) for x in self.connection.version.split('.')) class OracleParam: """ Wrapper object for formatting parameters for Oracle. If the string representation of the value is large enough (greater than 4000 characters) the input size needs to be set as CLOB. Alternatively, if the parameter has an `input_size` attribute, then the value of the `input_size` attribute will be used instead. Otherwise, no input size will be set for the parameter when executing the query. """ def __init__(self, param, cursor, strings_only=False): # With raw SQL queries, datetimes can reach this function # without being converted by DateTimeField.get_db_prep_value. if settings.USE_TZ and (isinstance(param, datetime.datetime) and not isinstance(param, Oracle_datetime)): param = Oracle_datetime.from_datetime(param) string_size = 0 # Oracle doesn't recognize True and False correctly. if param is True: param = 1 elif param is False: param = 0 if hasattr(param, 'bind_parameter'): self.force_bytes = param.bind_parameter(cursor) elif isinstance(param, (Database.Binary, datetime.timedelta)): self.force_bytes = param else: # To transmit to the database, we need Unicode if supported # To get size right, we must consider bytes. self.force_bytes = force_str(param, cursor.charset, strings_only) if isinstance(self.force_bytes, str): # We could optimize by only converting up to 4000 bytes here string_size = len(force_bytes(param, cursor.charset, strings_only)) if hasattr(param, 'input_size'): # If parameter has `input_size` attribute, use that. self.input_size = param.input_size elif string_size > 4000: # Mark any string param greater than 4000 characters as a CLOB. self.input_size = Database.CLOB elif isinstance(param, datetime.datetime): self.input_size = Database.TIMESTAMP else: self.input_size = None class VariableWrapper: """ An adapter class for cursor variables that prevents the wrapped object from being converted into a string when used to instantiate an OracleParam. This can be used generally for any other object that should be passed into Cursor.execute as-is. """ def __init__(self, var): self.var = var def bind_parameter(self, cursor): return self.var def __getattr__(self, key): return getattr(self.var, key) def __setattr__(self, key, value): if key == 'var': self.__dict__[key] = value else: setattr(self.var, key, value) class FormatStylePlaceholderCursor: """ Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var" style. This fixes it -- but note that if you want to use a literal "%s" in a query, you'll need to use "%%s". """ charset = 'utf-8' def __init__(self, connection): self.cursor = connection.cursor() self.cursor.outputtypehandler = self._output_type_handler @staticmethod def _output_number_converter(value): return decimal.Decimal(value) if '.' in value else int(value) @staticmethod def _get_decimal_converter(precision, scale): if scale == 0: return int context = decimal.Context(prec=precision) quantize_value = decimal.Decimal(1).scaleb(-scale) return lambda v: decimal.Decimal(v).quantize(quantize_value, context=context) @staticmethod def _output_type_handler(cursor, name, defaultType, length, precision, scale): """ Called for each db column fetched from cursors. Return numbers as the appropriate Python type. """ if defaultType == Database.NUMBER: if scale == -127: if precision == 0: # NUMBER column: decimal-precision floating point. # This will normally be an integer from a sequence, # but it could be a decimal value. outconverter = FormatStylePlaceholderCursor._output_number_converter else: # FLOAT column: binary-precision floating point. # This comes from FloatField columns. outconverter = float elif precision > 0: # NUMBER(p,s) column: decimal-precision fixed point. # This comes from IntegerField and DecimalField columns. outconverter = FormatStylePlaceholderCursor._get_decimal_converter(precision, scale) else: # No type information. This normally comes from a # mathematical expression in the SELECT list. Guess int # or Decimal based on whether it has a decimal point. outconverter = FormatStylePlaceholderCursor._output_number_converter return cursor.var( Database.STRING, size=255, arraysize=cursor.arraysize, outconverter=outconverter, ) def _format_params(self, params): try: return {k: OracleParam(v, self, True) for k, v in params.items()} except AttributeError: return tuple(OracleParam(p, self, True) for p in params) def _guess_input_sizes(self, params_list): # Try dict handling; if that fails, treat as sequence if hasattr(params_list[0], 'keys'): sizes = {} for params in params_list: for k, value in params.items(): if value.input_size: sizes[k] = value.input_size if sizes: self.setinputsizes(**sizes) else: # It's not a list of dicts; it's a list of sequences sizes = [None] * len(params_list[0]) for params in params_list: for i, value in enumerate(params): if value.input_size: sizes[i] = value.input_size if sizes: self.setinputsizes(*sizes) def _param_generator(self, params): # Try dict handling; if that fails, treat as sequence if hasattr(params, 'items'): return {k: v.force_bytes for k, v in params.items()} else: return [p.force_bytes for p in params] def _fix_for_params(self, query, params, unify_by_values=False): # cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it # it does want a trailing ';' but not a trailing '/'. However, these # characters must be included in the original query in case the query # is being passed to SQL*Plus. if query.endswith(';') or query.endswith('/'): query = query[:-1] if params is None: params = [] elif hasattr(params, 'keys'): # Handle params as dict args = {k: ":%s" % k for k in params} query = query % args elif unify_by_values and params: # Handle params as a dict with unified query parameters by their # values. It can be used only in single query execute() because # executemany() shares the formatted query with each of the params # list. e.g. for input params = [0.75, 2, 0.75, 'sth', 0.75] # params_dict = {0.75: ':arg0', 2: ':arg1', 'sth': ':arg2'} # args = [':arg0', ':arg1', ':arg0', ':arg2', ':arg0'] # params = {':arg0': 0.75, ':arg1': 2, ':arg2': 'sth'} params_dict = { param: ':arg%d' % i for i, param in enumerate(dict.fromkeys(params)) } args = [params_dict[param] for param in params] params = {value: key for key, value in params_dict.items()} query = query % tuple(args) else: # Handle params as sequence args = [(':arg%d' % i) for i in range(len(params))] query = query % tuple(args) return query, self._format_params(params) def execute(self, query, params=None): query, params = self._fix_for_params(query, params, unify_by_values=True) self._guess_input_sizes([params]) with wrap_oracle_errors(): return self.cursor.execute(query, self._param_generator(params)) def executemany(self, query, params=None): if not params: # No params given, nothing to do return None # uniform treatment for sequences and iterables params_iter = iter(params) query, firstparams = self._fix_for_params(query, next(params_iter)) # we build a list of formatted params; as we're going to traverse it # more than once, we can't make it lazy by using a generator formatted = [firstparams] + [self._format_params(p) for p in params_iter] self._guess_input_sizes(formatted) with wrap_oracle_errors(): return self.cursor.executemany(query, [self._param_generator(p) for p in formatted]) def close(self): try: self.cursor.close() except Database.InterfaceError: # already closed pass def var(self, *args): return VariableWrapper(self.cursor.var(*args)) def arrayvar(self, *args): return VariableWrapper(self.cursor.arrayvar(*args)) def __getattr__(self, attr): return getattr(self.cursor, attr) def __iter__(self): return iter(self.cursor)
e832a098db21a233d04c04c57aa89e6ec49337d8388b4d8ee378fa341a2fdadc
import datetime import decimal from importlib import import_module import sqlparse from django.conf import settings from django.db import NotSupportedError, transaction from django.db.backends import utils from django.utils import timezone from django.utils.encoding import force_str class BaseDatabaseOperations: """ Encapsulate backend-specific differences, such as the way a backend performs ordering or calculates the ID of a recently-inserted row. """ compiler_module = "django.db.models.sql.compiler" # Integer field safe ranges by `internal_type` as documented # in docs/ref/models/fields.txt. integer_field_ranges = { 'SmallIntegerField': (-32768, 32767), 'IntegerField': (-2147483648, 2147483647), 'BigIntegerField': (-9223372036854775808, 9223372036854775807), 'PositiveBigIntegerField': (0, 9223372036854775807), 'PositiveSmallIntegerField': (0, 32767), 'PositiveIntegerField': (0, 2147483647), 'SmallAutoField': (-32768, 32767), 'AutoField': (-2147483648, 2147483647), 'BigAutoField': (-9223372036854775808, 9223372036854775807), } set_operators = { 'union': 'UNION', 'intersection': 'INTERSECT', 'difference': 'EXCEPT', } # Mapping of Field.get_internal_type() (typically the model field's class # name) to the data type to use for the Cast() function, if different from # DatabaseWrapper.data_types. cast_data_types = {} # CharField data type if the max_length argument isn't provided. cast_char_field_without_max_length = None # Start and end points for window expressions. PRECEDING = 'PRECEDING' FOLLOWING = 'FOLLOWING' UNBOUNDED_PRECEDING = 'UNBOUNDED ' + PRECEDING UNBOUNDED_FOLLOWING = 'UNBOUNDED ' + FOLLOWING CURRENT_ROW = 'CURRENT ROW' # Prefix for EXPLAIN queries, or None EXPLAIN isn't supported. explain_prefix = None def __init__(self, connection): self.connection = connection self._cache = None def autoinc_sql(self, table, column): """ Return any SQL needed to support auto-incrementing primary keys, or None if no SQL is necessary. This SQL is executed when a table is created. """ return None def bulk_batch_size(self, fields, objs): """ Return the maximum allowed batch size for the backend. The fields are the fields going to be inserted in the batch, the objs contains all the objects to be inserted. """ return len(objs) def cache_key_culling_sql(self): """ Return an SQL query that retrieves the first cache key greater than the n smallest. This is used by the 'db' cache backend to determine where to start culling. """ return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s" def unification_cast_sql(self, output_field): """ Given a field instance, return the SQL that casts the result of a union to that type. The resulting string should contain a '%s' placeholder for the expression being cast. """ return '%s' def date_extract_sql(self, lookup_type, field_name): """ Given a lookup_type of 'year', 'month', or 'day', return the SQL that extracts a value from the given date field field_name. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_extract_sql() method') def date_trunc_sql(self, lookup_type, field_name): """ Given a lookup_type of 'year', 'month', or 'day', return the SQL that truncates the given date field field_name to a date object with only the given specificity. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_trunc_sql() method.') def datetime_cast_date_sql(self, field_name, tzname): """ Return the SQL to cast a datetime value to date value. """ raise NotImplementedError( 'subclasses of BaseDatabaseOperations may require a ' 'datetime_cast_date_sql() method.' ) def datetime_cast_time_sql(self, field_name, tzname): """ Return the SQL to cast a datetime value to time value. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_time_sql() method') def datetime_extract_sql(self, lookup_type, field_name, tzname): """ Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or 'second', return the SQL that extracts a value from the given datetime field field_name. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_extract_sql() method') def datetime_trunc_sql(self, lookup_type, field_name, tzname): """ Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or 'second', return the SQL that truncates the given datetime field field_name to a datetime object with only the given specificity. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_trunc_sql() method') def time_trunc_sql(self, lookup_type, field_name): """ Given a lookup_type of 'hour', 'minute' or 'second', return the SQL that truncates the given time field field_name to a time object with only the given specificity. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a time_trunc_sql() method') def time_extract_sql(self, lookup_type, field_name): """ Given a lookup_type of 'hour', 'minute', or 'second', return the SQL that extracts a value from the given time field field_name. """ return self.date_extract_sql(lookup_type, field_name) def json_cast_text_sql(self, field_name): """Return the SQL to cast a JSON value to text value.""" raise NotImplementedError( 'subclasses of BaseDatabaseOperations may require a ' 'json_cast_text_sql() method' ) def deferrable_sql(self): """ Return the SQL to make a constraint "initially deferred" during a CREATE TABLE statement. """ return '' def distinct_sql(self, fields, params): """ Return an SQL DISTINCT clause which removes duplicate rows from the result set. If any fields are given, only check the given fields for duplicates. """ if fields: raise NotSupportedError('DISTINCT ON fields is not supported by this database backend') else: return ['DISTINCT'], [] def fetch_returned_insert_columns(self, cursor, returning_params): """ Given a cursor object that has just performed an INSERT...RETURNING statement into a table, return the newly created data. """ return cursor.fetchone() def field_cast_sql(self, db_type, internal_type): """ Given a column type (e.g. 'BLOB', 'VARCHAR') and an internal type (e.g. 'GenericIPAddressField'), return the SQL to cast it before using it in a WHERE statement. The resulting string should contain a '%s' placeholder for the column being searched against. """ return '%s' def force_no_ordering(self): """ Return a list used in the "ORDER BY" clause to force no ordering at all. Return an empty list to include nothing in the ordering. """ return [] def for_update_sql(self, nowait=False, skip_locked=False, of=(), no_key=False): """ Return the FOR UPDATE SQL clause to lock rows for an update operation. """ return 'FOR%s UPDATE%s%s%s' % ( ' NO KEY' if no_key else '', ' OF %s' % ', '.join(of) if of else '', ' NOWAIT' if nowait else '', ' SKIP LOCKED' if skip_locked else '', ) def _get_limit_offset_params(self, low_mark, high_mark): offset = low_mark or 0 if high_mark is not None: return (high_mark - offset), offset elif offset: return self.connection.ops.no_limit_value(), offset return None, offset def limit_offset_sql(self, low_mark, high_mark): """Return LIMIT/OFFSET SQL clause.""" limit, offset = self._get_limit_offset_params(low_mark, high_mark) return ' '.join(sql for sql in ( ('LIMIT %d' % limit) if limit else None, ('OFFSET %d' % offset) if offset else None, ) if sql) def last_executed_query(self, cursor, sql, params): """ Return a string of the query last executed by the given cursor, with placeholders replaced with actual values. `sql` is the raw query containing placeholders and `params` is the sequence of parameters. These are used by default, but this method exists for database backends to provide a better implementation according to their own quoting schemes. """ # Convert params to contain string values. def to_string(s): return force_str(s, strings_only=True, errors='replace') if isinstance(params, (list, tuple)): u_params = tuple(to_string(val) for val in params) elif params is None: u_params = () else: u_params = {to_string(k): to_string(v) for k, v in params.items()} return "QUERY = %r - PARAMS = %r" % (sql, u_params) def last_insert_id(self, cursor, table_name, pk_name): """ Given a cursor object that has just performed an INSERT statement into a table that has an auto-incrementing ID, return the newly created ID. `pk_name` is the name of the primary-key column. """ return cursor.lastrowid def lookup_cast(self, lookup_type, internal_type=None): """ Return the string to use in a query when performing lookups ("contains", "like", etc.). It should contain a '%s' placeholder for the column being searched against. """ return "%s" def max_in_list_size(self): """ Return the maximum number of items that can be passed in a single 'IN' list condition, or None if the backend does not impose a limit. """ return None def max_name_length(self): """ Return the maximum length of table and column names, or None if there is no limit. """ return None def no_limit_value(self): """ Return the value to use for the LIMIT when we are wanting "LIMIT infinity". Return None if the limit clause can be omitted in this case. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a no_limit_value() method') def pk_default_value(self): """ Return the value to use during an INSERT statement to specify that the field should use its default value. """ return 'DEFAULT' def prepare_sql_script(self, sql): """ Take an SQL script that may contain multiple lines and return a list of statements to feed to successive cursor.execute() calls. Since few databases are able to process raw SQL scripts in a single cursor.execute() call and PEP 249 doesn't talk about this use case, the default implementation is conservative. """ return [ sqlparse.format(statement, strip_comments=True) for statement in sqlparse.split(sql) if statement ] def process_clob(self, value): """ Return the value of a CLOB column, for backends that return a locator object that requires additional processing. """ return value def return_insert_columns(self, fields): """ For backends that support returning columns as part of an insert query, return the SQL and params to append to the INSERT query. The returned fragment should contain a format string to hold the appropriate column. """ pass def compiler(self, compiler_name): """ Return the SQLCompiler class corresponding to the given name, in the namespace corresponding to the `compiler_module` attribute on this backend. """ if self._cache is None: self._cache = import_module(self.compiler_module) return getattr(self._cache, compiler_name) def quote_name(self, name): """ Return a quoted version of the given table, index, or column name. Do not quote the given name if it's already been quoted. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a quote_name() method') def random_function_sql(self): """Return an SQL expression that returns a random value.""" return 'RANDOM()' def regex_lookup(self, lookup_type): """ Return the string to use in a query when performing regular expression lookups (using "regex" or "iregex"). It should contain a '%s' placeholder for the column being searched against. If the feature is not supported (or part of it is not supported), raise NotImplementedError. """ raise NotImplementedError('subclasses of BaseDatabaseOperations may require a regex_lookup() method') def savepoint_create_sql(self, sid): """ Return the SQL for starting a new savepoint. Only required if the "uses_savepoints" feature is True. The "sid" parameter is a string for the savepoint id. """ return "SAVEPOINT %s" % self.quote_name(sid) def savepoint_commit_sql(self, sid): """ Return the SQL for committing the given savepoint. """ return "RELEASE SAVEPOINT %s" % self.quote_name(sid) def savepoint_rollback_sql(self, sid): """ Return the SQL for rolling back the given savepoint. """ return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid) def set_time_zone_sql(self): """ Return the SQL that will set the connection's time zone. Return '' if the backend doesn't support time zones. """ return '' def sql_flush(self, style, tables, *, reset_sequences=False, allow_cascade=False): """ Return a list of SQL statements required to remove all data from the given database tables (without actually removing the tables themselves). The `style` argument is a Style object as returned by either color_style() or no_style() in django.core.management.color. If `reset_sequences` is True, the list includes SQL statements required to reset the sequences. The `allow_cascade` argument determines whether truncation may cascade to tables with foreign keys pointing the tables being truncated. PostgreSQL requires a cascade even if these tables are empty. """ raise NotImplementedError('subclasses of BaseDatabaseOperations must provide an sql_flush() method') def execute_sql_flush(self, sql_list): """Execute a list of SQL statements to flush the database.""" with transaction.atomic( using=self.connection.alias, savepoint=self.connection.features.can_rollback_ddl, ): with self.connection.cursor() as cursor: for sql in sql_list: cursor.execute(sql) def sequence_reset_by_name_sql(self, style, sequences): """ Return a list of the SQL statements required to reset sequences passed in `sequences`. The `style` argument is a Style object as returned by either color_style() or no_style() in django.core.management.color. """ return [] def sequence_reset_sql(self, style, model_list): """ Return a list of the SQL statements required to reset sequences for the given models. The `style` argument is a Style object as returned by either color_style() or no_style() in django.core.management.color. """ return [] # No sequence reset required by default. def start_transaction_sql(self): """Return the SQL statement required to start a transaction.""" return "BEGIN;" def end_transaction_sql(self, success=True): """Return the SQL statement required to end a transaction.""" if not success: return "ROLLBACK;" return "COMMIT;" def tablespace_sql(self, tablespace, inline=False): """ Return the SQL that will be used in a query to define the tablespace. Return '' if the backend doesn't support tablespaces. If `inline` is True, append the SQL to a row; otherwise append it to the entire CREATE TABLE or CREATE INDEX statement. """ return '' def prep_for_like_query(self, x): """Prepare a value for use in a LIKE query.""" return str(x).replace("\\", "\\\\").replace("%", r"\%").replace("_", r"\_") # Same as prep_for_like_query(), but called for "iexact" matches, which # need not necessarily be implemented using "LIKE" in the backend. prep_for_iexact_query = prep_for_like_query def validate_autopk_value(self, value): """ Certain backends do not accept some values for "serial" fields (for example zero in MySQL). Raise a ValueError if the value is invalid, otherwise return the validated value. """ return value def adapt_unknown_value(self, value): """ Transform a value to something compatible with the backend driver. This method only depends on the type of the value. It's designed for cases where the target type isn't known, such as .raw() SQL queries. As a consequence it may not work perfectly in all circumstances. """ if isinstance(value, datetime.datetime): # must be before date return self.adapt_datetimefield_value(value) elif isinstance(value, datetime.date): return self.adapt_datefield_value(value) elif isinstance(value, datetime.time): return self.adapt_timefield_value(value) elif isinstance(value, decimal.Decimal): return self.adapt_decimalfield_value(value) else: return value def adapt_datefield_value(self, value): """ Transform a date value to an object compatible with what is expected by the backend driver for date columns. """ if value is None: return None return str(value) def adapt_datetimefield_value(self, value): """ Transform a datetime value to an object compatible with what is expected by the backend driver for datetime columns. """ if value is None: return None return str(value) def adapt_timefield_value(self, value): """ Transform a time value to an object compatible with what is expected by the backend driver for time columns. """ if value is None: return None if timezone.is_aware(value): raise ValueError("Django does not support timezone-aware times.") return str(value) def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None): """ Transform a decimal.Decimal value to an object compatible with what is expected by the backend driver for decimal (numeric) columns. """ return utils.format_number(value, max_digits, decimal_places) def adapt_ipaddressfield_value(self, value): """ Transform a string representation of an IP address into the expected type for the backend driver. """ return value or None def year_lookup_bounds_for_date_field(self, value): """ Return a two-elements list with the lower and upper bound to be used with a BETWEEN operator to query a DateField value using a year lookup. `value` is an int, containing the looked-up year. """ first = datetime.date(value, 1, 1) second = datetime.date(value, 12, 31) first = self.adapt_datefield_value(first) second = self.adapt_datefield_value(second) return [first, second] def year_lookup_bounds_for_datetime_field(self, value): """ Return a two-elements list with the lower and upper bound to be used with a BETWEEN operator to query a DateTimeField value using a year lookup. `value` is an int, containing the looked-up year. """ first = datetime.datetime(value, 1, 1) second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999) if settings.USE_TZ: tz = timezone.get_current_timezone() first = timezone.make_aware(first, tz) second = timezone.make_aware(second, tz) first = self.adapt_datetimefield_value(first) second = self.adapt_datetimefield_value(second) return [first, second] def get_db_converters(self, expression): """ Return a list of functions needed to convert field data. Some field types on some backends do not provide data in the correct format, this is the hook for converter functions. """ return [] def convert_durationfield_value(self, value, expression, connection): if value is not None: return datetime.timedelta(0, 0, value) def check_expression_support(self, expression): """ Check that the backend supports the provided expression. This is used on specific backends to rule out known expressions that have problematic or nonexistent implementations. If the expression has a known problem, the backend should raise NotSupportedError. """ pass def conditional_expression_supported_in_where_clause(self, expression): """ Return True, if the conditional expression is supported in the WHERE clause. """ return True def combine_expression(self, connector, sub_expressions): """ Combine a list of subexpressions into a single expression, using the provided connecting operator. This is required because operators can vary between backends (e.g., Oracle with %% and &) and between subexpression types (e.g., date expressions). """ conn = ' %s ' % connector return conn.join(sub_expressions) def combine_duration_expression(self, connector, sub_expressions): return self.combine_expression(connector, sub_expressions) def binary_placeholder_sql(self, value): """ Some backends require special syntax to insert binary content (MySQL for example uses '_binary %s'). """ return '%s' def modify_insert_params(self, placeholder, params): """ Allow modification of insert parameters. Needed for Oracle Spatial backend due to #10888. """ return params def integer_field_range(self, internal_type): """ Given an integer field internal type (e.g. 'PositiveIntegerField'), return a tuple of the (min_value, max_value) form representing the range of the column type bound to the field. """ return self.integer_field_ranges[internal_type] def subtract_temporals(self, internal_type, lhs, rhs): if self.connection.features.supports_temporal_subtraction: lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs return '(%s - %s)' % (lhs_sql, rhs_sql), (*lhs_params, *rhs_params) raise NotSupportedError("This backend does not support %s subtraction." % internal_type) def window_frame_start(self, start): if isinstance(start, int): if start < 0: return '%d %s' % (abs(start), self.PRECEDING) elif start == 0: return self.CURRENT_ROW elif start is None: return self.UNBOUNDED_PRECEDING raise ValueError("start argument must be a negative integer, zero, or None, but got '%s'." % start) def window_frame_end(self, end): if isinstance(end, int): if end == 0: return self.CURRENT_ROW elif end > 0: return '%d %s' % (end, self.FOLLOWING) elif end is None: return self.UNBOUNDED_FOLLOWING raise ValueError("end argument must be a positive integer, zero, or None, but got '%s'." % end) def window_frame_rows_start_end(self, start=None, end=None): """ Return SQL for start and end points in an OVER clause window frame. """ if not self.connection.features.supports_over_clause: raise NotSupportedError('This backend does not support window expressions.') return self.window_frame_start(start), self.window_frame_end(end) def window_frame_range_start_end(self, start=None, end=None): start_, end_ = self.window_frame_rows_start_end(start, end) if ( self.connection.features.only_supports_unbounded_with_preceding_and_following and ((start and start < 0) or (end and end > 0)) ): raise NotSupportedError( '%s only supports UNBOUNDED together with PRECEDING and ' 'FOLLOWING.' % self.connection.display_name ) return start_, end_ def explain_query_prefix(self, format=None, **options): if not self.connection.features.supports_explaining_query_execution: raise NotSupportedError('This backend does not support explaining query execution.') if format: supported_formats = self.connection.features.supported_explain_formats normalized_format = format.upper() if normalized_format not in supported_formats: msg = '%s is not a recognized format.' % normalized_format if supported_formats: msg += ' Allowed formats: %s' % ', '.join(sorted(supported_formats)) raise ValueError(msg) if options: raise ValueError('Unknown options: %s' % ', '.join(sorted(options.keys()))) return self.explain_prefix def insert_statement(self, ignore_conflicts=False): return 'INSERT INTO' def ignore_conflicts_suffix_sql(self, ignore_conflicts=None): return ''
9d7ae7e83f63a855505c54b755fa1143abcd787bca1d306195836dee3950e011
import uuid from django.conf import settings from django.db.backends.base.operations import BaseDatabaseOperations from django.utils import timezone from django.utils.encoding import force_str class DatabaseOperations(BaseDatabaseOperations): compiler_module = "django.db.backends.mysql.compiler" # MySQL stores positive fields as UNSIGNED ints. integer_field_ranges = { **BaseDatabaseOperations.integer_field_ranges, 'PositiveSmallIntegerField': (0, 65535), 'PositiveIntegerField': (0, 4294967295), 'PositiveBigIntegerField': (0, 18446744073709551615), } cast_data_types = { 'AutoField': 'signed integer', 'BigAutoField': 'signed integer', 'SmallAutoField': 'signed integer', 'CharField': 'char(%(max_length)s)', 'DecimalField': 'decimal(%(max_digits)s, %(decimal_places)s)', 'TextField': 'char', 'IntegerField': 'signed integer', 'BigIntegerField': 'signed integer', 'SmallIntegerField': 'signed integer', 'PositiveBigIntegerField': 'unsigned integer', 'PositiveIntegerField': 'unsigned integer', 'PositiveSmallIntegerField': 'unsigned integer', } cast_char_field_without_max_length = 'char' explain_prefix = 'EXPLAIN' def date_extract_sql(self, lookup_type, field_name): # https://dev.mysql.com/doc/mysql/en/date-and-time-functions.html if lookup_type == 'week_day': # DAYOFWEEK() returns an integer, 1-7, Sunday=1. return "DAYOFWEEK(%s)" % field_name elif lookup_type == 'iso_week_day': # WEEKDAY() returns an integer, 0-6, Monday=0. return "WEEKDAY(%s) + 1" % field_name elif lookup_type == 'week': # Override the value of default_week_format for consistency with # other database backends. # Mode 3: Monday, 1-53, with 4 or more days this year. return "WEEK(%s, 3)" % field_name elif lookup_type == 'iso_year': # Get the year part from the YEARWEEK function, which returns a # number as year * 100 + week. return "TRUNCATE(YEARWEEK(%s, 3), -2) / 100" % field_name else: # EXTRACT returns 1-53 based on ISO-8601 for the week number. return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name) def date_trunc_sql(self, lookup_type, field_name): fields = { 'year': '%%Y-01-01', 'month': '%%Y-%%m-01', } # Use double percents to escape. if lookup_type in fields: format_str = fields[lookup_type] return "CAST(DATE_FORMAT(%s, '%s') AS DATE)" % (field_name, format_str) elif lookup_type == 'quarter': return "MAKEDATE(YEAR(%s), 1) + INTERVAL QUARTER(%s) QUARTER - INTERVAL 1 QUARTER" % ( field_name, field_name ) elif lookup_type == 'week': return "DATE_SUB(%s, INTERVAL WEEKDAY(%s) DAY)" % ( field_name, field_name ) else: return "DATE(%s)" % (field_name) def _prepare_tzname_delta(self, tzname): if '+' in tzname: return tzname[tzname.find('+'):] elif '-' in tzname: return tzname[tzname.find('-'):] return tzname def _convert_field_to_tz(self, field_name, tzname): if settings.USE_TZ and self.connection.timezone_name != tzname: field_name = "CONVERT_TZ(%s, '%s', '%s')" % ( field_name, self.connection.timezone_name, self._prepare_tzname_delta(tzname), ) return field_name def datetime_cast_date_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return "DATE(%s)" % field_name def datetime_cast_time_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return "TIME(%s)" % field_name def datetime_extract_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return self.date_extract_sql(lookup_type, field_name) def datetime_trunc_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) fields = ['year', 'month', 'day', 'hour', 'minute', 'second'] format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape. format_def = ('0000-', '01', '-01', ' 00:', '00', ':00') if lookup_type == 'quarter': return ( "CAST(DATE_FORMAT(MAKEDATE(YEAR({field_name}), 1) + " "INTERVAL QUARTER({field_name}) QUARTER - " + "INTERVAL 1 QUARTER, '%%Y-%%m-01 00:00:00') AS DATETIME)" ).format(field_name=field_name) if lookup_type == 'week': return ( "CAST(DATE_FORMAT(DATE_SUB({field_name}, " "INTERVAL WEEKDAY({field_name}) DAY), " "'%%Y-%%m-%%d 00:00:00') AS DATETIME)" ).format(field_name=field_name) try: i = fields.index(lookup_type) + 1 except ValueError: sql = field_name else: format_str = ''.join(format[:i] + format_def[i:]) sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str) return sql def time_trunc_sql(self, lookup_type, field_name): fields = { 'hour': '%%H:00:00', 'minute': '%%H:%%i:00', 'second': '%%H:%%i:%%s', } # Use double percents to escape. if lookup_type in fields: format_str = fields[lookup_type] return "CAST(DATE_FORMAT(%s, '%s') AS TIME)" % (field_name, format_str) else: return "TIME(%s)" % (field_name) def fetch_returned_insert_rows(self, cursor): """ Given a cursor object that has just performed an INSERT...RETURNING statement into a table, return the tuple of returned data. """ return cursor.fetchall() def format_for_duration_arithmetic(self, sql): return 'INTERVAL %s MICROSECOND' % sql def force_no_ordering(self): """ "ORDER BY NULL" prevents MySQL from implicitly ordering by grouped columns. If no ordering would otherwise be applied, we don't want any implicit sorting going on. """ return [(None, ("NULL", [], False))] def last_executed_query(self, cursor, sql, params): # With MySQLdb, cursor objects have an (undocumented) "_executed" # attribute where the exact query sent to the database is saved. # See MySQLdb/cursors.py in the source distribution. # MySQLdb returns string, PyMySQL bytes. return force_str(getattr(cursor, '_executed', None), errors='replace') def no_limit_value(self): # 2**64 - 1, as recommended by the MySQL documentation return 18446744073709551615 def quote_name(self, name): if name.startswith("`") and name.endswith("`"): return name # Quoting once is enough. return "`%s`" % name def random_function_sql(self): return 'RAND()' def return_insert_columns(self, fields): # MySQL and MariaDB < 10.5.0 don't support an INSERT...RETURNING # statement. if not fields: return '', () columns = [ '%s.%s' % ( self.quote_name(field.model._meta.db_table), self.quote_name(field.column), ) for field in fields ] return 'RETURNING %s' % ', '.join(columns), () def sql_flush(self, style, tables, *, reset_sequences=False, allow_cascade=False): if not tables: return [] sql = ['SET FOREIGN_KEY_CHECKS = 0;'] if reset_sequences: # It's faster to TRUNCATE tables that require a sequence reset # since ALTER TABLE AUTO_INCREMENT is slower than TRUNCATE. sql.extend( '%s %s;' % ( style.SQL_KEYWORD('TRUNCATE'), style.SQL_FIELD(self.quote_name(table_name)), ) for table_name in tables ) else: # Otherwise issue a simple DELETE since it's faster than TRUNCATE # and preserves sequences. sql.extend( '%s %s %s;' % ( style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'), style.SQL_FIELD(self.quote_name(table_name)), ) for table_name in tables ) sql.append('SET FOREIGN_KEY_CHECKS = 1;') return sql def sequence_reset_by_name_sql(self, style, sequences): return [ '%s %s %s %s = 1;' % ( style.SQL_KEYWORD('ALTER'), style.SQL_KEYWORD('TABLE'), style.SQL_FIELD(self.quote_name(sequence_info['table'])), style.SQL_FIELD('AUTO_INCREMENT'), ) for sequence_info in sequences ] def validate_autopk_value(self, value): # MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653. if value == 0: raise ValueError('The database backend does not accept 0 as a ' 'value for AutoField.') return value def adapt_datetimefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # MySQL doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.") return str(value) def adapt_timefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # MySQL doesn't support tz-aware times if timezone.is_aware(value): raise ValueError("MySQL backend does not support timezone-aware times.") return str(value) def max_name_length(self): return 64 def bulk_insert_sql(self, fields, placeholder_rows): placeholder_rows_sql = (", ".join(row) for row in placeholder_rows) values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql) return "VALUES " + values_sql def combine_expression(self, connector, sub_expressions): if connector == '^': return 'POW(%s)' % ','.join(sub_expressions) # Convert the result to a signed integer since MySQL's binary operators # return an unsigned integer. elif connector in ('&', '|', '<<', '#'): connector = '^' if connector == '#' else connector return 'CONVERT(%s, SIGNED)' % connector.join(sub_expressions) elif connector == '>>': lhs, rhs = sub_expressions return 'FLOOR(%(lhs)s / POW(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs} return super().combine_expression(connector, sub_expressions) def get_db_converters(self, expression): converters = super().get_db_converters(expression) internal_type = expression.output_field.get_internal_type() if internal_type in ['BooleanField', 'NullBooleanField']: converters.append(self.convert_booleanfield_value) elif internal_type == 'DateTimeField': if settings.USE_TZ: converters.append(self.convert_datetimefield_value) elif internal_type == 'UUIDField': converters.append(self.convert_uuidfield_value) return converters def convert_booleanfield_value(self, value, expression, connection): if value in (0, 1): value = bool(value) return value def convert_datetimefield_value(self, value, expression, connection): if value is not None: value = timezone.make_aware(value, self.connection.timezone) return value def convert_uuidfield_value(self, value, expression, connection): if value is not None: value = uuid.UUID(value) return value def binary_placeholder_sql(self, value): return '_binary %s' if value is not None and not hasattr(value, 'as_sql') else '%s' def subtract_temporals(self, internal_type, lhs, rhs): lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs if internal_type == 'TimeField': if self.connection.mysql_is_mariadb: # MariaDB includes the microsecond component in TIME_TO_SEC as # a decimal. MySQL returns an integer without microseconds. return 'CAST((TIME_TO_SEC(%(lhs)s) - TIME_TO_SEC(%(rhs)s)) * 1000000 AS SIGNED)' % { 'lhs': lhs_sql, 'rhs': rhs_sql }, (*lhs_params, *rhs_params) return ( "((TIME_TO_SEC(%(lhs)s) * 1000000 + MICROSECOND(%(lhs)s)) -" " (TIME_TO_SEC(%(rhs)s) * 1000000 + MICROSECOND(%(rhs)s)))" ) % {'lhs': lhs_sql, 'rhs': rhs_sql}, tuple(lhs_params) * 2 + tuple(rhs_params) * 2 params = (*rhs_params, *lhs_params) return "TIMESTAMPDIFF(MICROSECOND, %s, %s)" % (rhs_sql, lhs_sql), params def explain_query_prefix(self, format=None, **options): # Alias MySQL's TRADITIONAL to TEXT for consistency with other backends. if format and format.upper() == 'TEXT': format = 'TRADITIONAL' elif not format and 'TREE' in self.connection.features.supported_explain_formats: # Use TREE by default (if supported) as it's more informative. format = 'TREE' analyze = options.pop('analyze', False) prefix = super().explain_query_prefix(format, **options) if analyze and self.connection.features.supports_explain_analyze: # MariaDB uses ANALYZE instead of EXPLAIN ANALYZE. prefix = 'ANALYZE' if self.connection.mysql_is_mariadb else prefix + ' ANALYZE' if format and not (analyze and not self.connection.mysql_is_mariadb): # Only MariaDB supports the analyze option with formats. prefix += ' FORMAT=%s' % format return prefix def regex_lookup(self, lookup_type): # REGEXP BINARY doesn't work correctly in MySQL 8+ and REGEXP_LIKE # doesn't exist in MySQL 5.x or in MariaDB. if self.connection.mysql_version < (8, 0, 0) or self.connection.mysql_is_mariadb: if lookup_type == 'regex': return '%s REGEXP BINARY %s' return '%s REGEXP %s' match_option = 'c' if lookup_type == 'regex' else 'i' return "REGEXP_LIKE(%%s, %%s, '%s')" % match_option def insert_statement(self, ignore_conflicts=False): return 'INSERT IGNORE INTO' if ignore_conflicts else super().insert_statement(ignore_conflicts) def lookup_cast(self, lookup_type, internal_type=None): lookup = '%s' if internal_type == 'JSONField': if self.connection.mysql_is_mariadb or lookup_type in ( 'iexact', 'contains', 'icontains', 'startswith', 'istartswith', 'endswith', 'iendswith', 'regex', 'iregex', ): lookup = 'JSON_UNQUOTE(%s)' return lookup
b85c9fc41277557c94fed9aa1bd0f33f3ee1942b2c31bdabad6e2adcaf83d675
import datetime import decimal import uuid from functools import lru_cache from itertools import chain from django.conf import settings from django.core.exceptions import FieldError from django.db import DatabaseError, NotSupportedError, models from django.db.backends.base.operations import BaseDatabaseOperations from django.db.models.expressions import Col from django.utils import timezone from django.utils.dateparse import parse_date, parse_datetime, parse_time from django.utils.functional import cached_property class DatabaseOperations(BaseDatabaseOperations): cast_char_field_without_max_length = 'text' cast_data_types = { 'DateField': 'TEXT', 'DateTimeField': 'TEXT', } explain_prefix = 'EXPLAIN QUERY PLAN' def bulk_batch_size(self, fields, objs): """ SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of 999 variables per query. If there's only a single field to insert, the limit is 500 (SQLITE_MAX_COMPOUND_SELECT). """ if len(fields) == 1: return 500 elif len(fields) > 1: return self.connection.features.max_query_params // len(fields) else: return len(objs) def check_expression_support(self, expression): bad_fields = (models.DateField, models.DateTimeField, models.TimeField) bad_aggregates = (models.Sum, models.Avg, models.Variance, models.StdDev) if isinstance(expression, bad_aggregates): for expr in expression.get_source_expressions(): try: output_field = expr.output_field except (AttributeError, FieldError): # Not every subexpression has an output_field which is fine # to ignore. pass else: if isinstance(output_field, bad_fields): raise NotSupportedError( 'You cannot use Sum, Avg, StdDev, and Variance ' 'aggregations on date/time fields in sqlite3 ' 'since date/time is saved as text.' ) if ( isinstance(expression, models.Aggregate) and expression.distinct and len(expression.source_expressions) > 1 ): raise NotSupportedError( "SQLite doesn't support DISTINCT on aggregate functions " "accepting multiple arguments." ) def date_extract_sql(self, lookup_type, field_name): """ Support EXTRACT with a user-defined function django_date_extract() that's registered in connect(). Use single quotes because this is a string and could otherwise cause a collision with a field name. """ return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name) def format_for_duration_arithmetic(self, sql): """Do nothing since formatting is handled in the custom function.""" return sql def date_trunc_sql(self, lookup_type, field_name): return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name) def time_trunc_sql(self, lookup_type, field_name): return "django_time_trunc('%s', %s)" % (lookup_type.lower(), field_name) def _convert_tznames_to_sql(self, tzname): if settings.USE_TZ: return "'%s'" % tzname, "'%s'" % self.connection.timezone_name return 'NULL', 'NULL' def datetime_cast_date_sql(self, field_name, tzname): return 'django_datetime_cast_date(%s, %s, %s)' % ( field_name, *self._convert_tznames_to_sql(tzname), ) def datetime_cast_time_sql(self, field_name, tzname): return 'django_datetime_cast_time(%s, %s, %s)' % ( field_name, *self._convert_tznames_to_sql(tzname), ) def datetime_extract_sql(self, lookup_type, field_name, tzname): return "django_datetime_extract('%s', %s, %s, %s)" % ( lookup_type.lower(), field_name, *self._convert_tznames_to_sql(tzname), ) def datetime_trunc_sql(self, lookup_type, field_name, tzname): return "django_datetime_trunc('%s', %s, %s, %s)" % ( lookup_type.lower(), field_name, *self._convert_tznames_to_sql(tzname), ) def time_extract_sql(self, lookup_type, field_name): return "django_time_extract('%s', %s)" % (lookup_type.lower(), field_name) def pk_default_value(self): return "NULL" def _quote_params_for_last_executed_query(self, params): """ Only for last_executed_query! Don't use this to execute SQL queries! """ # This function is limited both by SQLITE_LIMIT_VARIABLE_NUMBER (the # number of parameters, default = 999) and SQLITE_MAX_COLUMN (the # number of return values, default = 2000). Since Python's sqlite3 # module doesn't expose the get_limit() C API, assume the default # limits are in effect and split the work in batches if needed. BATCH_SIZE = 999 if len(params) > BATCH_SIZE: results = () for index in range(0, len(params), BATCH_SIZE): chunk = params[index:index + BATCH_SIZE] results += self._quote_params_for_last_executed_query(chunk) return results sql = 'SELECT ' + ', '.join(['QUOTE(?)'] * len(params)) # Bypass Django's wrappers and use the underlying sqlite3 connection # to avoid logging this query - it would trigger infinite recursion. cursor = self.connection.connection.cursor() # Native sqlite3 cursors cannot be used as context managers. try: return cursor.execute(sql, params).fetchone() finally: cursor.close() def last_executed_query(self, cursor, sql, params): # Python substitutes parameters in Modules/_sqlite/cursor.c with: # pysqlite_statement_bind_parameters(self->statement, parameters, allow_8bit_chars); # Unfortunately there is no way to reach self->statement from Python, # so we quote and substitute parameters manually. if params: if isinstance(params, (list, tuple)): params = self._quote_params_for_last_executed_query(params) else: values = tuple(params.values()) values = self._quote_params_for_last_executed_query(values) params = dict(zip(params, values)) return sql % params # For consistency with SQLiteCursorWrapper.execute(), just return sql # when there are no parameters. See #13648 and #17158. else: return sql def quote_name(self, name): if name.startswith('"') and name.endswith('"'): return name # Quoting once is enough. return '"%s"' % name def no_limit_value(self): return -1 def __references_graph(self, table_name): query = """ WITH tables AS ( SELECT %s name UNION SELECT sqlite_master.name FROM sqlite_master JOIN tables ON (sql REGEXP %s || tables.name || %s) ) SELECT name FROM tables; """ params = ( table_name, r'(?i)\s+references\s+("|\')?', r'("|\')?\s*\(', ) with self.connection.cursor() as cursor: results = cursor.execute(query, params) return [row[0] for row in results.fetchall()] @cached_property def _references_graph(self): # 512 is large enough to fit the ~330 tables (as of this writing) in # Django's test suite. return lru_cache(maxsize=512)(self.__references_graph) def sql_flush(self, style, tables, *, reset_sequences=False, allow_cascade=False): if tables and allow_cascade: # Simulate TRUNCATE CASCADE by recursively collecting the tables # referencing the tables to be flushed. tables = set(chain.from_iterable(self._references_graph(table) for table in tables)) sql = ['%s %s %s;' % ( style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'), style.SQL_FIELD(self.quote_name(table)) ) for table in tables] if reset_sequences: sequences = [{'table': table} for table in tables] sql.extend(self.sequence_reset_by_name_sql(style, sequences)) return sql def sequence_reset_by_name_sql(self, style, sequences): if not sequences: return [] return [ '%s %s %s %s = 0 %s %s %s (%s);' % ( style.SQL_KEYWORD('UPDATE'), style.SQL_TABLE(self.quote_name('sqlite_sequence')), style.SQL_KEYWORD('SET'), style.SQL_FIELD(self.quote_name('seq')), style.SQL_KEYWORD('WHERE'), style.SQL_FIELD(self.quote_name('name')), style.SQL_KEYWORD('IN'), ', '.join([ "'%s'" % sequence_info['table'] for sequence_info in sequences ]), ), ] def adapt_datetimefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # SQLite doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.") return str(value) def adapt_timefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # SQLite doesn't support tz-aware datetimes if timezone.is_aware(value): raise ValueError("SQLite backend does not support timezone-aware times.") return str(value) def get_db_converters(self, expression): converters = super().get_db_converters(expression) internal_type = expression.output_field.get_internal_type() if internal_type == 'DateTimeField': converters.append(self.convert_datetimefield_value) elif internal_type == 'DateField': converters.append(self.convert_datefield_value) elif internal_type == 'TimeField': converters.append(self.convert_timefield_value) elif internal_type == 'DecimalField': converters.append(self.get_decimalfield_converter(expression)) elif internal_type == 'UUIDField': converters.append(self.convert_uuidfield_value) elif internal_type in ('NullBooleanField', 'BooleanField'): converters.append(self.convert_booleanfield_value) return converters def convert_datetimefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.datetime): value = parse_datetime(value) if settings.USE_TZ and not timezone.is_aware(value): value = timezone.make_aware(value, self.connection.timezone) return value def convert_datefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.date): value = parse_date(value) return value def convert_timefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.time): value = parse_time(value) return value def get_decimalfield_converter(self, expression): # SQLite stores only 15 significant digits. Digits coming from # float inaccuracy must be removed. create_decimal = decimal.Context(prec=15).create_decimal_from_float if isinstance(expression, Col): quantize_value = decimal.Decimal(1).scaleb(-expression.output_field.decimal_places) def converter(value, expression, connection): if value is not None: return create_decimal(value).quantize(quantize_value, context=expression.output_field.context) else: def converter(value, expression, connection): if value is not None: return create_decimal(value) return converter def convert_uuidfield_value(self, value, expression, connection): if value is not None: value = uuid.UUID(value) return value def convert_booleanfield_value(self, value, expression, connection): return bool(value) if value in (1, 0) else value def bulk_insert_sql(self, fields, placeholder_rows): return " UNION ALL ".join( "SELECT %s" % ", ".join(row) for row in placeholder_rows ) def combine_expression(self, connector, sub_expressions): # SQLite doesn't have a ^ operator, so use the user-defined POWER # function that's registered in connect(). if connector == '^': return 'POWER(%s)' % ','.join(sub_expressions) elif connector == '#': return 'BITXOR(%s)' % ','.join(sub_expressions) return super().combine_expression(connector, sub_expressions) def combine_duration_expression(self, connector, sub_expressions): if connector not in ['+', '-']: raise DatabaseError('Invalid connector for timedelta: %s.' % connector) fn_params = ["'%s'" % connector] + sub_expressions if len(fn_params) > 3: raise ValueError('Too many params for timedelta operations.') return "django_format_dtdelta(%s)" % ', '.join(fn_params) def integer_field_range(self, internal_type): # SQLite doesn't enforce any integer constraints return (None, None) def subtract_temporals(self, internal_type, lhs, rhs): lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs params = (*lhs_params, *rhs_params) if internal_type == 'TimeField': return 'django_time_diff(%s, %s)' % (lhs_sql, rhs_sql), params return 'django_timestamp_diff(%s, %s)' % (lhs_sql, rhs_sql), params def insert_statement(self, ignore_conflicts=False): return 'INSERT OR IGNORE INTO' if ignore_conflicts else super().insert_statement(ignore_conflicts)
11f1947b1aa96b7c835f7210e0f571f408d55fca98f5d8d9b452f0762ab234bf
import cx_Oracle from django.db.backends.oracle.introspection import DatabaseIntrospection from django.utils.functional import cached_property class OracleIntrospection(DatabaseIntrospection): # Associating any OBJECTVAR instances with GeometryField. This won't work # right on Oracle objects that aren't MDSYS.SDO_GEOMETRY, but it is the # only object type supported within Django anyways. @cached_property def data_types_reverse(self): return { **super().data_types_reverse, cx_Oracle.OBJECT: 'GeometryField', } def get_geometry_type(self, table_name, description): with self.connection.cursor() as cursor: # Querying USER_SDO_GEOM_METADATA to get the SRID and dimension information. try: cursor.execute( 'SELECT "DIMINFO", "SRID" FROM "USER_SDO_GEOM_METADATA" ' 'WHERE "TABLE_NAME"=%s AND "COLUMN_NAME"=%s', (table_name.upper(), description.name.upper()) ) row = cursor.fetchone() except Exception as exc: raise Exception( 'Could not find entry in USER_SDO_GEOM_METADATA ' 'corresponding to "%s"."%s"' % (table_name, description.name) ) from exc # TODO: Research way to find a more specific geometry field type for # the column's contents. field_type = 'GeometryField' # Getting the field parameters. field_params = {} dim, srid = row if srid != 4326: field_params['srid'] = srid # Size of object array (SDO_DIM_ARRAY) is number of dimensions. dim = dim.size() if dim != 2: field_params['dim'] = dim return field_type, field_params
c679561faeb1bab34b41d55e7ad748d6b2ba9fed737503ad24f888576b128e33
from datetime import date, datetime, timedelta from operator import attrgetter from django.db import IntegrityError from django.test import TestCase from .models import ( CustomMembership, Employee, Event, Friendship, Group, Ingredient, Invitation, Membership, Person, PersonSelfRefM2M, Recipe, RecipeIngredient, Relationship, SymmetricalFriendship, ) class M2mThroughTests(TestCase): @classmethod def setUpTestData(cls): cls.bob = Person.objects.create(name='Bob') cls.jim = Person.objects.create(name='Jim') cls.jane = Person.objects.create(name='Jane') cls.rock = Group.objects.create(name='Rock') cls.roll = Group.objects.create(name='Roll') def test_retrieve_intermediate_items(self): Membership.objects.create(person=self.jim, group=self.rock) Membership.objects.create(person=self.jane, group=self.rock) expected = ['Jane', 'Jim'] self.assertQuerysetEqual( self.rock.members.all(), expected, attrgetter("name") ) def test_get_on_intermediate_model(self): Membership.objects.create(person=self.jane, group=self.rock) queryset = Membership.objects.get(person=self.jane, group=self.rock) self.assertEqual( repr(queryset), '<Membership: Jane is a member of Rock>' ) def test_filter_on_intermediate_model(self): Membership.objects.create(person=self.jim, group=self.rock) Membership.objects.create(person=self.jane, group=self.rock) queryset = Membership.objects.filter(group=self.rock) expected = [ '<Membership: Jim is a member of Rock>', '<Membership: Jane is a member of Rock>', ] self.assertQuerysetEqual( queryset, expected ) def test_add_on_m2m_with_intermediate_model(self): self.rock.members.add(self.bob, through_defaults={'invite_reason': 'He is good.'}) self.assertSequenceEqual(self.rock.members.all(), [self.bob]) self.assertEqual(self.rock.membership_set.get().invite_reason, 'He is good.') def test_add_on_m2m_with_intermediate_model_callable_through_default(self): def invite_reason_callable(): return 'They were good at %s' % datetime.now() self.rock.members.add( self.bob, self.jane, through_defaults={'invite_reason': invite_reason_callable}, ) self.assertSequenceEqual(self.rock.members.all(), [self.bob, self.jane]) self.assertEqual( self.rock.membership_set.filter( invite_reason__startswith='They were good at ', ).count(), 2, ) # invite_reason_callable() is called once. self.assertEqual( self.bob.membership_set.get().invite_reason, self.jane.membership_set.get().invite_reason, ) def test_set_on_m2m_with_intermediate_model_callable_through_default(self): self.rock.members.set( [self.bob, self.jane], through_defaults={'invite_reason': lambda: 'Why not?'}, ) self.assertSequenceEqual(self.rock.members.all(), [self.bob, self.jane]) self.assertEqual( self.rock.membership_set.filter( invite_reason__startswith='Why not?', ).count(), 2, ) def test_add_on_m2m_with_intermediate_model_value_required(self): self.rock.nodefaultsnonulls.add(self.jim, through_defaults={'nodefaultnonull': 1}) self.assertEqual(self.rock.testnodefaultsornulls_set.get().nodefaultnonull, 1) def test_add_on_m2m_with_intermediate_model_value_required_fails(self): with self.assertRaises(IntegrityError): self.rock.nodefaultsnonulls.add(self.jim) def test_create_on_m2m_with_intermediate_model(self): annie = self.rock.members.create(name='Annie', through_defaults={'invite_reason': 'She was just awesome.'}) self.assertSequenceEqual(self.rock.members.all(), [annie]) self.assertEqual(self.rock.membership_set.get().invite_reason, 'She was just awesome.') def test_create_on_m2m_with_intermediate_model_callable_through_default(self): annie = self.rock.members.create( name='Annie', through_defaults={'invite_reason': lambda: 'She was just awesome.'}, ) self.assertSequenceEqual(self.rock.members.all(), [annie]) self.assertEqual( self.rock.membership_set.get().invite_reason, 'She was just awesome.', ) def test_create_on_m2m_with_intermediate_model_value_required(self): self.rock.nodefaultsnonulls.create(name='Test', through_defaults={'nodefaultnonull': 1}) self.assertEqual(self.rock.testnodefaultsornulls_set.get().nodefaultnonull, 1) def test_create_on_m2m_with_intermediate_model_value_required_fails(self): with self.assertRaises(IntegrityError): self.rock.nodefaultsnonulls.create(name='Test') def test_get_or_create_on_m2m_with_intermediate_model_value_required(self): self.rock.nodefaultsnonulls.get_or_create(name='Test', through_defaults={'nodefaultnonull': 1}) self.assertEqual(self.rock.testnodefaultsornulls_set.get().nodefaultnonull, 1) def test_get_or_create_on_m2m_with_intermediate_model_value_required_fails(self): with self.assertRaises(IntegrityError): self.rock.nodefaultsnonulls.get_or_create(name='Test') def test_update_or_create_on_m2m_with_intermediate_model_value_required(self): self.rock.nodefaultsnonulls.update_or_create(name='Test', through_defaults={'nodefaultnonull': 1}) self.assertEqual(self.rock.testnodefaultsornulls_set.get().nodefaultnonull, 1) def test_update_or_create_on_m2m_with_intermediate_model_value_required_fails(self): with self.assertRaises(IntegrityError): self.rock.nodefaultsnonulls.update_or_create(name='Test') def test_remove_on_m2m_with_intermediate_model(self): Membership.objects.create(person=self.jim, group=self.rock) self.rock.members.remove(self.jim) self.assertSequenceEqual(self.rock.members.all(), []) def test_remove_on_m2m_with_intermediate_model_multiple(self): Membership.objects.create(person=self.jim, group=self.rock, invite_reason='1') Membership.objects.create(person=self.jim, group=self.rock, invite_reason='2') self.assertSequenceEqual(self.rock.members.all(), [self.jim, self.jim]) self.rock.members.remove(self.jim) self.assertSequenceEqual(self.rock.members.all(), []) def test_set_on_m2m_with_intermediate_model(self): members = list(Person.objects.filter(name__in=['Bob', 'Jim'])) self.rock.members.set(members) self.assertSequenceEqual(self.rock.members.all(), [self.bob, self.jim]) def test_set_on_m2m_with_intermediate_model_value_required(self): self.rock.nodefaultsnonulls.set([self.jim], through_defaults={'nodefaultnonull': 1}) self.assertEqual(self.rock.testnodefaultsornulls_set.get().nodefaultnonull, 1) self.rock.nodefaultsnonulls.set([self.jim], through_defaults={'nodefaultnonull': 2}) self.assertEqual(self.rock.testnodefaultsornulls_set.get().nodefaultnonull, 1) self.rock.nodefaultsnonulls.set([self.jim], through_defaults={'nodefaultnonull': 2}, clear=True) self.assertEqual(self.rock.testnodefaultsornulls_set.get().nodefaultnonull, 2) def test_set_on_m2m_with_intermediate_model_value_required_fails(self): with self.assertRaises(IntegrityError): self.rock.nodefaultsnonulls.set([self.jim]) def test_clear_removes_all_the_m2m_relationships(self): Membership.objects.create(person=self.jim, group=self.rock) Membership.objects.create(person=self.jane, group=self.rock) self.rock.members.clear() self.assertQuerysetEqual( self.rock.members.all(), [] ) def test_retrieve_reverse_intermediate_items(self): Membership.objects.create(person=self.jim, group=self.rock) Membership.objects.create(person=self.jim, group=self.roll) expected = ['Rock', 'Roll'] self.assertQuerysetEqual( self.jim.group_set.all(), expected, attrgetter("name") ) def test_add_on_reverse_m2m_with_intermediate_model(self): self.bob.group_set.add(self.rock) self.assertSequenceEqual(self.bob.group_set.all(), [self.rock]) def test_create_on_reverse_m2m_with_intermediate_model(self): funk = self.bob.group_set.create(name='Funk') self.assertSequenceEqual(self.bob.group_set.all(), [funk]) def test_remove_on_reverse_m2m_with_intermediate_model(self): Membership.objects.create(person=self.bob, group=self.rock) self.bob.group_set.remove(self.rock) self.assertSequenceEqual(self.bob.group_set.all(), []) def test_set_on_reverse_m2m_with_intermediate_model(self): members = list(Group.objects.filter(name__in=['Rock', 'Roll'])) self.bob.group_set.set(members) self.assertSequenceEqual(self.bob.group_set.all(), [self.rock, self.roll]) def test_clear_on_reverse_removes_all_the_m2m_relationships(self): Membership.objects.create(person=self.jim, group=self.rock) Membership.objects.create(person=self.jim, group=self.roll) self.jim.group_set.clear() self.assertQuerysetEqual( self.jim.group_set.all(), [] ) def test_query_model_by_attribute_name_of_related_model(self): Membership.objects.create(person=self.jim, group=self.rock) Membership.objects.create(person=self.jane, group=self.rock) Membership.objects.create(person=self.bob, group=self.roll) Membership.objects.create(person=self.jim, group=self.roll) Membership.objects.create(person=self.jane, group=self.roll) self.assertQuerysetEqual( Group.objects.filter(members__name='Bob'), ['Roll'], attrgetter("name") ) def test_order_by_relational_field_through_model(self): today = datetime.now() yesterday = today - timedelta(days=1) CustomMembership.objects.create(person=self.jim, group=self.rock, date_joined=yesterday) CustomMembership.objects.create(person=self.bob, group=self.rock, date_joined=today) CustomMembership.objects.create(person=self.jane, group=self.roll, date_joined=yesterday) CustomMembership.objects.create(person=self.jim, group=self.roll, date_joined=today) self.assertSequenceEqual( self.rock.custom_members.order_by('custom_person_related_name'), [self.jim, self.bob] ) self.assertSequenceEqual( self.roll.custom_members.order_by('custom_person_related_name'), [self.jane, self.jim] ) def test_query_first_model_by_intermediate_model_attribute(self): Membership.objects.create( person=self.jane, group=self.roll, invite_reason="She was just awesome." ) Membership.objects.create( person=self.jim, group=self.roll, invite_reason="He is good." ) Membership.objects.create(person=self.bob, group=self.roll) qs = Group.objects.filter( membership__invite_reason="She was just awesome." ) self.assertQuerysetEqual( qs, ['Roll'], attrgetter("name") ) def test_query_second_model_by_intermediate_model_attribute(self): Membership.objects.create( person=self.jane, group=self.roll, invite_reason="She was just awesome." ) Membership.objects.create( person=self.jim, group=self.roll, invite_reason="He is good." ) Membership.objects.create(person=self.bob, group=self.roll) qs = Person.objects.filter( membership__invite_reason="She was just awesome." ) self.assertQuerysetEqual( qs, ['Jane'], attrgetter("name") ) def test_query_model_by_related_model_name(self): Membership.objects.create(person=self.jim, group=self.rock) Membership.objects.create(person=self.jane, group=self.rock) Membership.objects.create(person=self.bob, group=self.roll) Membership.objects.create(person=self.jim, group=self.roll) Membership.objects.create(person=self.jane, group=self.roll) self.assertQuerysetEqual( Person.objects.filter(group__name="Rock"), ['Jane', 'Jim'], attrgetter("name") ) def test_query_model_by_custom_related_name(self): CustomMembership.objects.create(person=self.bob, group=self.rock) CustomMembership.objects.create(person=self.jim, group=self.rock) self.assertQuerysetEqual( Person.objects.filter(custom__name="Rock"), ['Bob', 'Jim'], attrgetter("name") ) def test_query_model_by_intermediate_can_return_non_unique_queryset(self): Membership.objects.create(person=self.jim, group=self.rock) Membership.objects.create( person=self.jane, group=self.rock, date_joined=datetime(2006, 1, 1) ) Membership.objects.create( person=self.bob, group=self.roll, date_joined=datetime(2004, 1, 1)) Membership.objects.create(person=self.jim, group=self.roll) Membership.objects.create( person=self.jane, group=self.roll, date_joined=datetime(2004, 1, 1)) qs = Person.objects.filter( membership__date_joined__gt=datetime(2004, 1, 1) ) self.assertQuerysetEqual( qs, ['Jane', 'Jim', 'Jim'], attrgetter("name") ) def test_custom_related_name_forward_empty_qs(self): self.assertQuerysetEqual( self.rock.custom_members.all(), [] ) def test_custom_related_name_reverse_empty_qs(self): self.assertQuerysetEqual( self.bob.custom.all(), [] ) def test_custom_related_name_forward_non_empty_qs(self): CustomMembership.objects.create(person=self.bob, group=self.rock) CustomMembership.objects.create(person=self.jim, group=self.rock) self.assertQuerysetEqual( self.rock.custom_members.all(), ['Bob', 'Jim'], attrgetter("name") ) def test_custom_related_name_reverse_non_empty_qs(self): CustomMembership.objects.create(person=self.bob, group=self.rock) CustomMembership.objects.create(person=self.jim, group=self.rock) self.assertQuerysetEqual( self.bob.custom.all(), ['Rock'], attrgetter("name") ) def test_custom_related_name_doesnt_conflict_with_fky_related_name(self): CustomMembership.objects.create(person=self.bob, group=self.rock) self.assertQuerysetEqual( self.bob.custom_person_related_name.all(), ['<CustomMembership: Bob is a member of Rock>'] ) def test_through_fields(self): """ Relations with intermediary tables with multiple FKs to the M2M's ``to`` model are possible. """ event = Event.objects.create(title='Rockwhale 2014') Invitation.objects.create(event=event, inviter=self.bob, invitee=self.jim) Invitation.objects.create(event=event, inviter=self.bob, invitee=self.jane) self.assertQuerysetEqual( event.invitees.all(), ['Jane', 'Jim'], attrgetter('name') ) class M2mThroughReferentialTests(TestCase): def test_self_referential_empty_qs(self): tony = PersonSelfRefM2M.objects.create(name="Tony") self.assertQuerysetEqual( tony.friends.all(), [] ) def test_self_referential_non_symmetrical_first_side(self): tony = PersonSelfRefM2M.objects.create(name="Tony") chris = PersonSelfRefM2M.objects.create(name="Chris") Friendship.objects.create( first=tony, second=chris, date_friended=datetime.now() ) self.assertQuerysetEqual( tony.friends.all(), ['Chris'], attrgetter("name") ) def test_self_referential_non_symmetrical_second_side(self): tony = PersonSelfRefM2M.objects.create(name="Tony") chris = PersonSelfRefM2M.objects.create(name="Chris") Friendship.objects.create( first=tony, second=chris, date_friended=datetime.now() ) self.assertQuerysetEqual( chris.friends.all(), [] ) def test_self_referential_non_symmetrical_clear_first_side(self): tony = PersonSelfRefM2M.objects.create(name="Tony") chris = PersonSelfRefM2M.objects.create(name="Chris") Friendship.objects.create( first=tony, second=chris, date_friended=datetime.now() ) chris.friends.clear() self.assertQuerysetEqual( chris.friends.all(), [] ) # Since this isn't a symmetrical relation, Tony's friend link still exists. self.assertQuerysetEqual( tony.friends.all(), ['Chris'], attrgetter("name") ) def test_self_referential_non_symmetrical_both(self): tony = PersonSelfRefM2M.objects.create(name="Tony") chris = PersonSelfRefM2M.objects.create(name="Chris") Friendship.objects.create( first=tony, second=chris, date_friended=datetime.now() ) Friendship.objects.create( first=chris, second=tony, date_friended=datetime.now() ) self.assertQuerysetEqual( tony.friends.all(), ['Chris'], attrgetter("name") ) self.assertQuerysetEqual( chris.friends.all(), ['Tony'], attrgetter("name") ) def test_through_fields_self_referential(self): john = Employee.objects.create(name='john') peter = Employee.objects.create(name='peter') mary = Employee.objects.create(name='mary') harry = Employee.objects.create(name='harry') Relationship.objects.create(source=john, target=peter, another=None) Relationship.objects.create(source=john, target=mary, another=None) Relationship.objects.create(source=john, target=harry, another=peter) self.assertQuerysetEqual( john.subordinates.all(), ['peter', 'mary', 'harry'], attrgetter('name') ) def test_self_referential_symmetrical(self): tony = PersonSelfRefM2M.objects.create(name='Tony') chris = PersonSelfRefM2M.objects.create(name='Chris') SymmetricalFriendship.objects.create( first=tony, second=chris, date_friended=date.today(), ) self.assertSequenceEqual(tony.sym_friends.all(), [chris]) # Manually created symmetrical m2m relation doesn't add mirror entry # automatically. self.assertSequenceEqual(chris.sym_friends.all(), []) SymmetricalFriendship.objects.create( first=chris, second=tony, date_friended=date.today() ) self.assertSequenceEqual(chris.sym_friends.all(), [tony]) def test_add_on_symmetrical_m2m_with_intermediate_model(self): tony = PersonSelfRefM2M.objects.create(name='Tony') chris = PersonSelfRefM2M.objects.create(name='Chris') date_friended = date(2017, 1, 3) tony.sym_friends.add(chris, through_defaults={'date_friended': date_friended}) self.assertSequenceEqual(tony.sym_friends.all(), [chris]) self.assertSequenceEqual(chris.sym_friends.all(), [tony]) friendship = tony.symmetricalfriendship_set.get() self.assertEqual(friendship.date_friended, date_friended) def test_set_on_symmetrical_m2m_with_intermediate_model(self): tony = PersonSelfRefM2M.objects.create(name='Tony') chris = PersonSelfRefM2M.objects.create(name='Chris') anne = PersonSelfRefM2M.objects.create(name='Anne') kate = PersonSelfRefM2M.objects.create(name='Kate') date_friended_add = date(2013, 1, 5) date_friended_set = date.today() tony.sym_friends.add( anne, chris, through_defaults={'date_friended': date_friended_add}, ) tony.sym_friends.set( [anne, kate], through_defaults={'date_friended': date_friended_set}, ) self.assertSequenceEqual(tony.sym_friends.all(), [anne, kate]) self.assertSequenceEqual(anne.sym_friends.all(), [tony]) self.assertSequenceEqual(kate.sym_friends.all(), [tony]) self.assertEqual( kate.symmetricalfriendship_set.get().date_friended, date_friended_set, ) # Date is preserved. self.assertEqual( anne.symmetricalfriendship_set.get().date_friended, date_friended_add, ) # Recreate relationship. tony.sym_friends.set( [anne], clear=True, through_defaults={'date_friended': date_friended_set}, ) self.assertSequenceEqual(tony.sym_friends.all(), [anne]) self.assertSequenceEqual(anne.sym_friends.all(), [tony]) self.assertEqual( anne.symmetricalfriendship_set.get().date_friended, date_friended_set, ) class M2mThroughToFieldsTests(TestCase): @classmethod def setUpTestData(cls): cls.pea = Ingredient.objects.create(iname='pea') cls.potato = Ingredient.objects.create(iname='potato') cls.tomato = Ingredient.objects.create(iname='tomato') cls.curry = Recipe.objects.create(rname='curry') RecipeIngredient.objects.create(recipe=cls.curry, ingredient=cls.potato) RecipeIngredient.objects.create(recipe=cls.curry, ingredient=cls.pea) RecipeIngredient.objects.create(recipe=cls.curry, ingredient=cls.tomato) def test_retrieval(self): # Forward retrieval self.assertSequenceEqual(self.curry.ingredients.all(), [self.pea, self.potato, self.tomato]) # Backward retrieval self.assertEqual(self.tomato.recipes.get(), self.curry) def test_choices(self): field = Recipe._meta.get_field('ingredients') self.assertEqual( [choice[0] for choice in field.get_choices(include_blank=False)], ['pea', 'potato', 'tomato'] )
744e8bcfc4b4aac028b9b182b511d4028e1e716652bc62607cfc6be5175c1766
import datetime import pickle import unittest import uuid from copy import deepcopy from unittest import mock from django.core.exceptions import FieldError from django.db import DatabaseError, NotSupportedError, connection from django.db.models import ( Avg, BooleanField, Case, CharField, Count, DateField, DateTimeField, DurationField, Exists, Expression, ExpressionList, ExpressionWrapper, F, Func, IntegerField, Max, Min, Model, OrderBy, OuterRef, Q, StdDev, Subquery, Sum, TimeField, UUIDField, Value, Variance, When, ) from django.db.models.expressions import Col, Combinable, Random, RawSQL, Ref from django.db.models.functions import ( Coalesce, Concat, Left, Length, Lower, Substr, Upper, ) from django.db.models.sql import constants from django.db.models.sql.datastructures import Join from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from django.test.utils import Approximate, isolate_apps from django.utils.functional import SimpleLazyObject from .models import ( UUID, UUIDPK, Company, Employee, Experiment, Manager, Number, RemoteEmployee, Result, SimulationRun, Time, ) class BasicExpressionsTests(TestCase): @classmethod def setUpTestData(cls): cls.example_inc = Company.objects.create( name="Example Inc.", num_employees=2300, num_chairs=5, ceo=Employee.objects.create(firstname="Joe", lastname="Smith", salary=10) ) cls.foobar_ltd = Company.objects.create( name="Foobar Ltd.", num_employees=3, num_chairs=4, based_in_eu=True, ceo=Employee.objects.create(firstname="Frank", lastname="Meyer", salary=20) ) cls.max = Employee.objects.create(firstname='Max', lastname='Mustermann', salary=30) cls.gmbh = Company.objects.create(name='Test GmbH', num_employees=32, num_chairs=1, ceo=cls.max) def setUp(self): self.company_query = Company.objects.values( "name", "num_employees", "num_chairs" ).order_by( "name", "num_employees", "num_chairs" ) def test_annotate_values_aggregate(self): companies = Company.objects.annotate( salaries=F('ceo__salary'), ).values('num_employees', 'salaries').aggregate( result=Sum( F('salaries') + F('num_employees'), output_field=IntegerField() ), ) self.assertEqual(companies['result'], 2395) def test_annotate_values_filter(self): companies = Company.objects.annotate( foo=RawSQL('%s', ['value']), ).filter(foo='value').order_by('name') self.assertQuerysetEqual( companies, ['<Company: Example Inc.>', '<Company: Foobar Ltd.>', '<Company: Test GmbH>'], ) def test_annotate_values_count(self): companies = Company.objects.annotate(foo=RawSQL('%s', ['value'])) self.assertEqual(companies.count(), 3) @skipUnlessDBFeature('supports_boolean_expr_in_select_clause') def test_filtering_on_annotate_that_uses_q(self): self.assertEqual( Company.objects.annotate( num_employees_check=ExpressionWrapper(Q(num_employees__gt=3), output_field=BooleanField()) ).filter(num_employees_check=True).count(), 2, ) def test_filtering_on_q_that_is_boolean(self): self.assertEqual( Company.objects.filter( ExpressionWrapper(Q(num_employees__gt=3), output_field=BooleanField()) ).count(), 2, ) def test_filtering_on_rawsql_that_is_boolean(self): self.assertEqual( Company.objects.filter( RawSQL('num_employees > %s', (3,), output_field=BooleanField()), ).count(), 2, ) def test_filter_inter_attribute(self): # We can filter on attribute relationships on same model obj, e.g. # find companies where the number of employees is greater # than the number of chairs. self.assertSequenceEqual( self.company_query.filter(num_employees__gt=F("num_chairs")), [ { "num_chairs": 5, "name": "Example Inc.", "num_employees": 2300, }, { "num_chairs": 1, "name": "Test GmbH", "num_employees": 32 }, ], ) def test_update(self): # We can set one field to have the value of another field # Make sure we have enough chairs self.company_query.update(num_chairs=F("num_employees")) self.assertSequenceEqual( self.company_query, [ { "num_chairs": 2300, "name": "Example Inc.", "num_employees": 2300 }, { "num_chairs": 3, "name": "Foobar Ltd.", "num_employees": 3 }, { "num_chairs": 32, "name": "Test GmbH", "num_employees": 32 } ], ) def test_arithmetic(self): # We can perform arithmetic operations in expressions # Make sure we have 2 spare chairs self.company_query.update(num_chairs=F("num_employees") + 2) self.assertSequenceEqual( self.company_query, [ { 'num_chairs': 2302, 'name': 'Example Inc.', 'num_employees': 2300 }, { 'num_chairs': 5, 'name': 'Foobar Ltd.', 'num_employees': 3 }, { 'num_chairs': 34, 'name': 'Test GmbH', 'num_employees': 32 } ], ) def test_order_of_operations(self): # Law of order of operations is followed self.company_query.update(num_chairs=F('num_employees') + 2 * F('num_employees')) self.assertSequenceEqual( self.company_query, [ { 'num_chairs': 6900, 'name': 'Example Inc.', 'num_employees': 2300 }, { 'num_chairs': 9, 'name': 'Foobar Ltd.', 'num_employees': 3 }, { 'num_chairs': 96, 'name': 'Test GmbH', 'num_employees': 32 } ], ) def test_parenthesis_priority(self): # Law of order of operations can be overridden by parentheses self.company_query.update(num_chairs=(F('num_employees') + 2) * F('num_employees')) self.assertSequenceEqual( self.company_query, [ { 'num_chairs': 5294600, 'name': 'Example Inc.', 'num_employees': 2300 }, { 'num_chairs': 15, 'name': 'Foobar Ltd.', 'num_employees': 3 }, { 'num_chairs': 1088, 'name': 'Test GmbH', 'num_employees': 32 } ], ) def test_update_with_fk(self): # ForeignKey can become updated with the value of another ForeignKey. self.assertEqual(Company.objects.update(point_of_contact=F('ceo')), 3) self.assertQuerysetEqual( Company.objects.all(), ['Joe Smith', 'Frank Meyer', 'Max Mustermann'], lambda c: str(c.point_of_contact), ordered=False ) def test_update_with_none(self): Number.objects.create(integer=1, float=1.0) Number.objects.create(integer=2) Number.objects.filter(float__isnull=False).update(float=Value(None)) self.assertQuerysetEqual( Number.objects.all(), [None, None], lambda n: n.float, ordered=False ) def test_filter_with_join(self): # F Expressions can also span joins Company.objects.update(point_of_contact=F('ceo')) c = Company.objects.first() c.point_of_contact = Employee.objects.create(firstname="Guido", lastname="van Rossum") c.save() self.assertQuerysetEqual( Company.objects.filter(ceo__firstname=F('point_of_contact__firstname')), ['Foobar Ltd.', 'Test GmbH'], lambda c: c.name, ordered=False ) Company.objects.exclude( ceo__firstname=F("point_of_contact__firstname") ).update(name="foo") self.assertEqual( Company.objects.exclude( ceo__firstname=F('point_of_contact__firstname') ).get().name, "foo", ) msg = "Joined field references are not permitted in this query" with self.assertRaisesMessage(FieldError, msg): Company.objects.exclude( ceo__firstname=F('point_of_contact__firstname') ).update(name=F('point_of_contact__lastname')) def test_object_update(self): # F expressions can be used to update attributes on single objects self.gmbh.num_employees = F('num_employees') + 4 self.gmbh.save() self.gmbh.refresh_from_db() self.assertEqual(self.gmbh.num_employees, 36) def test_new_object_save(self): # We should be able to use Funcs when inserting new data test_co = Company(name=Lower(Value('UPPER')), num_employees=32, num_chairs=1, ceo=self.max) test_co.save() test_co.refresh_from_db() self.assertEqual(test_co.name, "upper") def test_new_object_create(self): test_co = Company.objects.create(name=Lower(Value('UPPER')), num_employees=32, num_chairs=1, ceo=self.max) test_co.refresh_from_db() self.assertEqual(test_co.name, "upper") def test_object_create_with_aggregate(self): # Aggregates are not allowed when inserting new data msg = 'Aggregate functions are not allowed in this query (num_employees=Max(Value(1))).' with self.assertRaisesMessage(FieldError, msg): Company.objects.create( name='Company', num_employees=Max(Value(1)), num_chairs=1, ceo=Employee.objects.create(firstname="Just", lastname="Doit", salary=30), ) def test_object_update_fk(self): # F expressions cannot be used to update attributes which are foreign # keys, or attributes which involve joins. test_gmbh = Company.objects.get(pk=self.gmbh.pk) msg = 'F(ceo)": "Company.point_of_contact" must be a "Employee" instance.' with self.assertRaisesMessage(ValueError, msg): test_gmbh.point_of_contact = F('ceo') test_gmbh.point_of_contact = self.gmbh.ceo test_gmbh.save() test_gmbh.name = F('ceo__lastname') msg = 'Joined field references are not permitted in this query' with self.assertRaisesMessage(FieldError, msg): test_gmbh.save() def test_update_inherited_field_value(self): msg = 'Joined field references are not permitted in this query' with self.assertRaisesMessage(FieldError, msg): RemoteEmployee.objects.update(adjusted_salary=F('salary') * 5) def test_object_update_unsaved_objects(self): # F expressions cannot be used to update attributes on objects which do # not yet exist in the database acme = Company(name='The Acme Widget Co.', num_employees=12, num_chairs=5, ceo=self.max) acme.num_employees = F("num_employees") + 16 msg = ( 'Failed to insert expression "Col(expressions_company, ' 'expressions.Company.num_employees) + Value(16)" on ' 'expressions.Company.num_employees. F() expressions can only be ' 'used to update, not to insert.' ) with self.assertRaisesMessage(ValueError, msg): acme.save() acme.num_employees = 12 acme.name = Lower(F('name')) msg = ( 'Failed to insert expression "Lower(Col(expressions_company, ' 'expressions.Company.name))" on expressions.Company.name. F() ' 'expressions can only be used to update, not to insert.' ) with self.assertRaisesMessage(ValueError, msg): acme.save() def test_ticket_11722_iexact_lookup(self): Employee.objects.create(firstname="John", lastname="Doe") Employee.objects.create(firstname="Test", lastname="test") queryset = Employee.objects.filter(firstname__iexact=F('lastname')) self.assertQuerysetEqual(queryset, ["<Employee: Test test>"]) def test_ticket_16731_startswith_lookup(self): Employee.objects.create(firstname="John", lastname="Doe") e2 = Employee.objects.create(firstname="Jack", lastname="Jackson") e3 = Employee.objects.create(firstname="Jack", lastname="jackson") self.assertSequenceEqual( Employee.objects.filter(lastname__startswith=F('firstname')), [e2, e3] if connection.features.has_case_insensitive_like else [e2] ) qs = Employee.objects.filter(lastname__istartswith=F('firstname')).order_by('pk') self.assertSequenceEqual(qs, [e2, e3]) def test_ticket_18375_join_reuse(self): # Reverse multijoin F() references and the lookup target the same join. # Pre #18375 the F() join was generated first and the lookup couldn't # reuse that join. qs = Employee.objects.filter(company_ceo_set__num_chairs=F('company_ceo_set__num_employees')) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_ticket_18375_kwarg_ordering(self): # The next query was dict-randomization dependent - if the "gte=1" # was seen first, then the F() will reuse the join generated by the # gte lookup, if F() was seen first, then it generated a join the # other lookups could not reuse. qs = Employee.objects.filter( company_ceo_set__num_chairs=F('company_ceo_set__num_employees'), company_ceo_set__num_chairs__gte=1, ) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_ticket_18375_kwarg_ordering_2(self): # Another similar case for F() than above. Now we have the same join # in two filter kwargs, one in the lhs lookup, one in F. Here pre # #18375 the amount of joins generated was random if dict # randomization was enabled, that is the generated query dependent # on which clause was seen first. qs = Employee.objects.filter( company_ceo_set__num_employees=F('pk'), pk=F('company_ceo_set__num_employees') ) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_ticket_18375_chained_filters(self): # F() expressions do not reuse joins from previous filter. qs = Employee.objects.filter( company_ceo_set__num_employees=F('pk') ).filter( company_ceo_set__num_employees=F('company_ceo_set__num_employees') ) self.assertEqual(str(qs.query).count('JOIN'), 2) def test_order_by_exists(self): mary = Employee.objects.create(firstname='Mary', lastname='Mustermann', salary=20) mustermanns_by_seniority = Employee.objects.filter(lastname='Mustermann').order_by( # Order by whether the employee is the CEO of a company Exists(Company.objects.filter(ceo=OuterRef('pk'))).desc() ) self.assertSequenceEqual(mustermanns_by_seniority, [self.max, mary]) def test_order_by_multiline_sql(self): raw_order_by = ( RawSQL(''' CASE WHEN num_employees > 1000 THEN num_chairs ELSE 0 END ''', []).desc(), RawSQL(''' CASE WHEN num_chairs > 1 THEN 1 ELSE 0 END ''', []).asc() ) for qs in ( Company.objects.all(), Company.objects.distinct(), ): with self.subTest(qs=qs): self.assertSequenceEqual( qs.order_by(*raw_order_by), [self.example_inc, self.gmbh, self.foobar_ltd], ) def test_outerref(self): inner = Company.objects.filter(point_of_contact=OuterRef('pk')) msg = ( 'This queryset contains a reference to an outer query and may only ' 'be used in a subquery.' ) with self.assertRaisesMessage(ValueError, msg): inner.exists() outer = Employee.objects.annotate(is_point_of_contact=Exists(inner)) self.assertIs(outer.exists(), True) def test_exist_single_field_output_field(self): queryset = Company.objects.values('pk') self.assertIsInstance(Exists(queryset).output_field, BooleanField) def test_subquery(self): Company.objects.filter(name='Example Inc.').update( point_of_contact=Employee.objects.get(firstname='Joe', lastname='Smith'), ceo=self.max, ) Employee.objects.create(firstname='Bob', lastname='Brown', salary=40) qs = Employee.objects.annotate( is_point_of_contact=Exists(Company.objects.filter(point_of_contact=OuterRef('pk'))), is_not_point_of_contact=~Exists(Company.objects.filter(point_of_contact=OuterRef('pk'))), is_ceo_of_small_company=Exists(Company.objects.filter(num_employees__lt=200, ceo=OuterRef('pk'))), is_ceo_small_2=~~Exists(Company.objects.filter(num_employees__lt=200, ceo=OuterRef('pk'))), largest_company=Subquery(Company.objects.order_by('-num_employees').filter( Q(ceo=OuterRef('pk')) | Q(point_of_contact=OuterRef('pk')) ).values('name')[:1], output_field=CharField()) ).values( 'firstname', 'is_point_of_contact', 'is_not_point_of_contact', 'is_ceo_of_small_company', 'is_ceo_small_2', 'largest_company', ).order_by('firstname') results = list(qs) # Could use Coalesce(subq, Value('')) instead except for the bug in # cx_Oracle mentioned in #23843. bob = results[0] if bob['largest_company'] == '' and connection.features.interprets_empty_strings_as_nulls: bob['largest_company'] = None self.assertEqual(results, [ { 'firstname': 'Bob', 'is_point_of_contact': False, 'is_not_point_of_contact': True, 'is_ceo_of_small_company': False, 'is_ceo_small_2': False, 'largest_company': None, }, { 'firstname': 'Frank', 'is_point_of_contact': False, 'is_not_point_of_contact': True, 'is_ceo_of_small_company': True, 'is_ceo_small_2': True, 'largest_company': 'Foobar Ltd.', }, { 'firstname': 'Joe', 'is_point_of_contact': True, 'is_not_point_of_contact': False, 'is_ceo_of_small_company': False, 'is_ceo_small_2': False, 'largest_company': 'Example Inc.', }, { 'firstname': 'Max', 'is_point_of_contact': False, 'is_not_point_of_contact': True, 'is_ceo_of_small_company': True, 'is_ceo_small_2': True, 'largest_company': 'Example Inc.' } ]) # A less elegant way to write the same query: this uses a LEFT OUTER # JOIN and an IS NULL, inside a WHERE NOT IN which is probably less # efficient than EXISTS. self.assertCountEqual( qs.filter(is_point_of_contact=True).values('pk'), Employee.objects.exclude(company_point_of_contact_set=None).values('pk') ) def test_subquery_eq(self): qs = Employee.objects.annotate( is_ceo=Exists(Company.objects.filter(ceo=OuterRef('pk'))), is_point_of_contact=Exists( Company.objects.filter(point_of_contact=OuterRef('pk')), ), small_company=Exists( queryset=Company.objects.filter(num_employees__lt=200), ), ).filter(is_ceo=True, is_point_of_contact=False, small_company=True) self.assertNotEqual( qs.query.annotations['is_ceo'], qs.query.annotations['is_point_of_contact'], ) self.assertNotEqual( qs.query.annotations['is_ceo'], qs.query.annotations['small_company'], ) def test_in_subquery(self): # This is a contrived test (and you really wouldn't write this query), # but it is a succinct way to test the __in=Subquery() construct. small_companies = Company.objects.filter(num_employees__lt=200).values('pk') subquery_test = Company.objects.filter(pk__in=Subquery(small_companies)) self.assertCountEqual(subquery_test, [self.foobar_ltd, self.gmbh]) subquery_test2 = Company.objects.filter(pk=Subquery(small_companies.filter(num_employees=3))) self.assertCountEqual(subquery_test2, [self.foobar_ltd]) def test_uuid_pk_subquery(self): u = UUIDPK.objects.create() UUID.objects.create(uuid_fk=u) qs = UUIDPK.objects.filter(id__in=Subquery(UUID.objects.values('uuid_fk__id'))) self.assertCountEqual(qs, [u]) def test_nested_subquery(self): inner = Company.objects.filter(point_of_contact=OuterRef('pk')) outer = Employee.objects.annotate(is_point_of_contact=Exists(inner)) contrived = Employee.objects.annotate( is_point_of_contact=Subquery( outer.filter(pk=OuterRef('pk')).values('is_point_of_contact'), output_field=BooleanField(), ), ) self.assertCountEqual(contrived.values_list(), outer.values_list()) def test_nested_subquery_join_outer_ref(self): inner = Employee.objects.filter(pk=OuterRef('ceo__pk')).values('pk') qs = Employee.objects.annotate( ceo_company=Subquery( Company.objects.filter( ceo__in=inner, ceo__pk=OuterRef('pk'), ).values('pk'), ), ) self.assertSequenceEqual( qs.values_list('ceo_company', flat=True), [self.example_inc.pk, self.foobar_ltd.pk, self.gmbh.pk], ) def test_nested_subquery_outer_ref_2(self): first = Time.objects.create(time='09:00') second = Time.objects.create(time='17:00') third = Time.objects.create(time='21:00') SimulationRun.objects.bulk_create([ SimulationRun(start=first, end=second, midpoint='12:00'), SimulationRun(start=first, end=third, midpoint='15:00'), SimulationRun(start=second, end=first, midpoint='00:00'), ]) inner = Time.objects.filter(time=OuterRef(OuterRef('time')), pk=OuterRef('start')).values('time') middle = SimulationRun.objects.annotate(other=Subquery(inner)).values('other')[:1] outer = Time.objects.annotate(other=Subquery(middle, output_field=TimeField())) # This is a contrived example. It exercises the double OuterRef form. self.assertCountEqual(outer, [first, second, third]) def test_nested_subquery_outer_ref_with_autofield(self): first = Time.objects.create(time='09:00') second = Time.objects.create(time='17:00') SimulationRun.objects.create(start=first, end=second, midpoint='12:00') inner = SimulationRun.objects.filter(start=OuterRef(OuterRef('pk'))).values('start') middle = Time.objects.annotate(other=Subquery(inner)).values('other')[:1] outer = Time.objects.annotate(other=Subquery(middle, output_field=IntegerField())) # This exercises the double OuterRef form with AutoField as pk. self.assertCountEqual(outer, [first, second]) def test_annotations_within_subquery(self): Company.objects.filter(num_employees__lt=50).update(ceo=Employee.objects.get(firstname='Frank')) inner = Company.objects.filter( ceo=OuterRef('pk') ).values('ceo').annotate(total_employees=Sum('num_employees')).values('total_employees') outer = Employee.objects.annotate(total_employees=Subquery(inner)).filter(salary__lte=Subquery(inner)) self.assertSequenceEqual( outer.order_by('-total_employees').values('salary', 'total_employees'), [{'salary': 10, 'total_employees': 2300}, {'salary': 20, 'total_employees': 35}], ) def test_subquery_references_joined_table_twice(self): inner = Company.objects.filter( num_chairs__gte=OuterRef('ceo__salary'), num_employees__gte=OuterRef('point_of_contact__salary'), ) # Another contrived example (there is no need to have a subquery here) outer = Company.objects.filter(pk__in=Subquery(inner.values('pk'))) self.assertFalse(outer.exists()) def test_subquery_filter_by_aggregate(self): Number.objects.create(integer=1000, float=1.2) Employee.objects.create(salary=1000) qs = Number.objects.annotate( min_valuable_count=Subquery( Employee.objects.filter( salary=OuterRef('integer'), ).annotate(cnt=Count('salary')).filter(cnt__gt=0).values('cnt')[:1] ), ) self.assertEqual(qs.get().float, 1.2) def test_subquery_filter_by_lazy(self): self.max.manager = Manager.objects.create(name='Manager') self.max.save() max_manager = SimpleLazyObject( lambda: Manager.objects.get(pk=self.max.manager.pk) ) qs = Company.objects.annotate( ceo_manager=Subquery( Employee.objects.filter( lastname=OuterRef('ceo__lastname'), ).values('manager'), ), ).filter(ceo_manager=max_manager) self.assertEqual(qs.get(), self.gmbh) def test_aggregate_subquery_annotation(self): with self.assertNumQueries(1) as ctx: aggregate = Company.objects.annotate( ceo_salary=Subquery( Employee.objects.filter( id=OuterRef('ceo_id'), ).values('salary') ), ).aggregate( ceo_salary_gt_20=Count('pk', filter=Q(ceo_salary__gt=20)), ) self.assertEqual(aggregate, {'ceo_salary_gt_20': 1}) # Aggregation over a subquery annotation doesn't annotate the subquery # twice in the inner query. sql = ctx.captured_queries[0]['sql'] self.assertLessEqual(sql.count('SELECT'), 3) # GROUP BY isn't required to aggregate over a query that doesn't # contain nested aggregates. self.assertNotIn('GROUP BY', sql) def test_explicit_output_field(self): class FuncA(Func): output_field = CharField() class FuncB(Func): pass expr = FuncB(FuncA()) self.assertEqual(expr.output_field, FuncA.output_field) def test_outerref_mixed_case_table_name(self): inner = Result.objects.filter(result_time__gte=OuterRef('experiment__assigned')) outer = Result.objects.filter(pk__in=Subquery(inner.values('pk'))) self.assertFalse(outer.exists()) def test_outerref_with_operator(self): inner = Company.objects.filter(num_employees=OuterRef('ceo__salary') + 2) outer = Company.objects.filter(pk__in=Subquery(inner.values('pk'))) self.assertEqual(outer.get().name, 'Test GmbH') def test_nested_outerref_with_function(self): self.gmbh.point_of_contact = Employee.objects.get(lastname='Meyer') self.gmbh.save() inner = Employee.objects.filter( lastname__startswith=Left(OuterRef(OuterRef('lastname')), 1), ) qs = Employee.objects.annotate( ceo_company=Subquery( Company.objects.filter( point_of_contact__in=inner, ceo__pk=OuterRef('pk'), ).values('name'), ), ).filter(ceo_company__isnull=False) self.assertEqual(qs.get().ceo_company, 'Test GmbH') def test_annotation_with_outerref(self): gmbh_salary = Company.objects.annotate( max_ceo_salary_raise=Subquery( Company.objects.annotate( salary_raise=OuterRef('num_employees') + F('num_employees'), ).order_by('-salary_raise').values('salary_raise')[:1], output_field=IntegerField(), ), ).get(pk=self.gmbh.pk) self.assertEqual(gmbh_salary.max_ceo_salary_raise, 2332) def test_annotation_with_nested_outerref(self): self.gmbh.point_of_contact = Employee.objects.get(lastname='Meyer') self.gmbh.save() inner = Employee.objects.annotate( outer_lastname=OuterRef(OuterRef('lastname')), ).filter(lastname__startswith=Left('outer_lastname', 1)) qs = Employee.objects.annotate( ceo_company=Subquery( Company.objects.filter( point_of_contact__in=inner, ceo__pk=OuterRef('pk'), ).values('name'), ), ).filter(ceo_company__isnull=False) self.assertEqual(qs.get().ceo_company, 'Test GmbH') def test_pickle_expression(self): expr = Value(1, output_field=IntegerField()) expr.convert_value # populate cached property self.assertEqual(pickle.loads(pickle.dumps(expr)), expr) def test_incorrect_field_in_F_expression(self): with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."): list(Employee.objects.filter(firstname=F('nope'))) def test_incorrect_joined_field_in_F_expression(self): with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."): list(Company.objects.filter(ceo__pk=F('point_of_contact__nope'))) def test_exists_in_filter(self): inner = Company.objects.filter(ceo=OuterRef('pk')).values('pk') qs1 = Employee.objects.filter(Exists(inner)) qs2 = Employee.objects.annotate(found=Exists(inner)).filter(found=True) self.assertCountEqual(qs1, qs2) self.assertFalse(Employee.objects.exclude(Exists(inner)).exists()) self.assertCountEqual(qs2, Employee.objects.exclude(~Exists(inner))) def test_subquery_in_filter(self): inner = Company.objects.filter(ceo=OuterRef('pk')).values('based_in_eu') self.assertSequenceEqual( Employee.objects.filter(Subquery(inner)), [self.foobar_ltd.ceo], ) def test_subquery_group_by_outerref_in_filter(self): inner = Company.objects.annotate( employee=OuterRef('pk'), ).values('employee').annotate( min_num_chairs=Min('num_chairs'), ).values('ceo') self.assertIs(Employee.objects.filter(pk__in=Subquery(inner)).exists(), True) def test_case_in_filter_if_boolean_output_field(self): is_ceo = Company.objects.filter(ceo=OuterRef('pk')) is_poc = Company.objects.filter(point_of_contact=OuterRef('pk')) qs = Employee.objects.filter( Case( When(Exists(is_ceo), then=True), When(Exists(is_poc), then=True), default=False, output_field=BooleanField(), ), ) self.assertSequenceEqual(qs, [self.example_inc.ceo, self.foobar_ltd.ceo, self.max]) def test_boolean_expression_combined(self): is_ceo = Company.objects.filter(ceo=OuterRef('pk')) is_poc = Company.objects.filter(point_of_contact=OuterRef('pk')) self.gmbh.point_of_contact = self.max self.gmbh.save() self.assertSequenceEqual( Employee.objects.filter(Exists(is_ceo) | Exists(is_poc)), [self.example_inc.ceo, self.foobar_ltd.ceo, self.max], ) self.assertSequenceEqual( Employee.objects.filter(Exists(is_ceo) & Exists(is_poc)), [self.max], ) self.assertSequenceEqual( Employee.objects.filter(Exists(is_ceo) & Q(salary__gte=30)), [self.max], ) self.assertSequenceEqual( Employee.objects.filter(Exists(is_poc) | Q(salary__lt=15)), [self.example_inc.ceo, self.max], ) class IterableLookupInnerExpressionsTests(TestCase): @classmethod def setUpTestData(cls): ceo = Employee.objects.create(firstname='Just', lastname='Doit', salary=30) # MySQL requires that the values calculated for expressions don't pass # outside of the field's range, so it's inconvenient to use the values # in the more general tests. Company.objects.create(name='5020 Ltd', num_employees=50, num_chairs=20, ceo=ceo) Company.objects.create(name='5040 Ltd', num_employees=50, num_chairs=40, ceo=ceo) Company.objects.create(name='5050 Ltd', num_employees=50, num_chairs=50, ceo=ceo) Company.objects.create(name='5060 Ltd', num_employees=50, num_chairs=60, ceo=ceo) Company.objects.create(name='99300 Ltd', num_employees=99, num_chairs=300, ceo=ceo) def test_in_lookup_allows_F_expressions_and_expressions_for_integers(self): # __in lookups can use F() expressions for integers. queryset = Company.objects.filter(num_employees__in=([F('num_chairs') - 10])) self.assertQuerysetEqual(queryset, ['<Company: 5060 Ltd>'], ordered=False) self.assertQuerysetEqual( Company.objects.filter(num_employees__in=([F('num_chairs') - 10, F('num_chairs') + 10])), ['<Company: 5040 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter( num_employees__in=([F('num_chairs') - 10, F('num_chairs'), F('num_chairs') + 10]) ), ['<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) def test_expressions_in_lookups_join_choice(self): midpoint = datetime.time(13, 0) t1 = Time.objects.create(time=datetime.time(12, 0)) t2 = Time.objects.create(time=datetime.time(14, 0)) SimulationRun.objects.create(start=t1, end=t2, midpoint=midpoint) SimulationRun.objects.create(start=t1, end=None, midpoint=midpoint) SimulationRun.objects.create(start=None, end=t2, midpoint=midpoint) SimulationRun.objects.create(start=None, end=None, midpoint=midpoint) queryset = SimulationRun.objects.filter(midpoint__range=[F('start__time'), F('end__time')]) self.assertQuerysetEqual( queryset, ['<SimulationRun: 13:00:00 (12:00:00 to 14:00:00)>'], ordered=False ) for alias in queryset.query.alias_map.values(): if isinstance(alias, Join): self.assertEqual(alias.join_type, constants.INNER) queryset = SimulationRun.objects.exclude(midpoint__range=[F('start__time'), F('end__time')]) self.assertQuerysetEqual(queryset, [], ordered=False) for alias in queryset.query.alias_map.values(): if isinstance(alias, Join): self.assertEqual(alias.join_type, constants.LOUTER) def test_range_lookup_allows_F_expressions_and_expressions_for_integers(self): # Range lookups can use F() expressions for integers. Company.objects.filter(num_employees__exact=F("num_chairs")) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(F('num_chairs'), 100)), ['<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(F('num_chairs') - 10, F('num_chairs') + 10)), ['<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(F('num_chairs') - 10, 100)), ['<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(1, 100)), [ '<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>', '<Company: 99300 Ltd>', ], ordered=False ) @unittest.skipUnless(connection.vendor == 'sqlite', "This defensive test only works on databases that don't validate parameter types") def test_complex_expressions_do_not_introduce_sql_injection_via_untrusted_string_inclusion(self): """ This tests that SQL injection isn't possible using compilation of expressions in iterable filters, as their compilation happens before the main query compilation. It's limited to SQLite, as PostgreSQL, Oracle and other vendors have defense in depth against this by type checking. Testing against SQLite (the most permissive of the built-in databases) demonstrates that the problem doesn't exist while keeping the test simple. """ queryset = Company.objects.filter(name__in=[F('num_chairs') + '1)) OR ((1==1']) self.assertQuerysetEqual(queryset, [], ordered=False) def test_in_lookup_allows_F_expressions_and_expressions_for_datetimes(self): start = datetime.datetime(2016, 2, 3, 15, 0, 0) end = datetime.datetime(2016, 2, 5, 15, 0, 0) experiment_1 = Experiment.objects.create( name='Integrity testing', assigned=start.date(), start=start, end=end, completed=end.date(), estimated_time=end - start, ) experiment_2 = Experiment.objects.create( name='Taste testing', assigned=start.date(), start=start, end=end, completed=end.date(), estimated_time=end - start, ) Result.objects.create( experiment=experiment_1, result_time=datetime.datetime(2016, 2, 4, 15, 0, 0), ) Result.objects.create( experiment=experiment_1, result_time=datetime.datetime(2016, 3, 10, 2, 0, 0), ) Result.objects.create( experiment=experiment_2, result_time=datetime.datetime(2016, 1, 8, 5, 0, 0), ) within_experiment_time = [F('experiment__start'), F('experiment__end')] queryset = Result.objects.filter(result_time__range=within_experiment_time) self.assertQuerysetEqual(queryset, ["<Result: Result at 2016-02-04 15:00:00>"]) within_experiment_time = [F('experiment__start'), F('experiment__end')] queryset = Result.objects.filter(result_time__range=within_experiment_time) self.assertQuerysetEqual(queryset, ["<Result: Result at 2016-02-04 15:00:00>"]) class FTests(SimpleTestCase): def test_deepcopy(self): f = F("foo") g = deepcopy(f) self.assertEqual(f.name, g.name) def test_deconstruct(self): f = F('name') path, args, kwargs = f.deconstruct() self.assertEqual(path, 'django.db.models.expressions.F') self.assertEqual(args, (f.name,)) self.assertEqual(kwargs, {}) def test_equal(self): f = F('name') same_f = F('name') other_f = F('username') self.assertEqual(f, same_f) self.assertNotEqual(f, other_f) def test_hash(self): d = {F('name'): 'Bob'} self.assertIn(F('name'), d) self.assertEqual(d[F('name')], 'Bob') def test_not_equal_Value(self): f = F('name') value = Value('name') self.assertNotEqual(f, value) self.assertNotEqual(value, f) class ExpressionsTests(TestCase): def test_F_reuse(self): f = F('id') n = Number.objects.create(integer=-1) c = Company.objects.create( name="Example Inc.", num_employees=2300, num_chairs=5, ceo=Employee.objects.create(firstname="Joe", lastname="Smith") ) c_qs = Company.objects.filter(id=f) self.assertEqual(c_qs.get(), c) # Reuse the same F-object for another queryset n_qs = Number.objects.filter(id=f) self.assertEqual(n_qs.get(), n) # The original query still works correctly self.assertEqual(c_qs.get(), c) def test_patterns_escape(self): r""" Special characters (e.g. %, _ and \) stored in database are properly escaped when using a pattern lookup with an expression refs #16731 """ Employee.objects.bulk_create([ Employee(firstname="%Joh\\nny", lastname="%Joh\\n"), Employee(firstname="Johnny", lastname="%John"), Employee(firstname="Jean-Claude", lastname="Claud_"), Employee(firstname="Jean-Claude", lastname="Claude"), Employee(firstname="Jean-Claude", lastname="Claude%"), Employee(firstname="Johnny", lastname="Joh\\n"), Employee(firstname="Johnny", lastname="John"), Employee(firstname="Johnny", lastname="_ohn"), ]) self.assertQuerysetEqual( Employee.objects.filter(firstname__contains=F('lastname')), ["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Jean-Claude Claude>", "<Employee: Johnny John>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__startswith=F('lastname')), ["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Johnny John>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__endswith=F('lastname')), ["<Employee: Jean-Claude Claude>"], ordered=False, ) def test_insensitive_patterns_escape(self): r""" Special characters (e.g. %, _ and \) stored in database are properly escaped when using a case insensitive pattern lookup with an expression -- refs #16731 """ Employee.objects.bulk_create([ Employee(firstname="%Joh\\nny", lastname="%joh\\n"), Employee(firstname="Johnny", lastname="%john"), Employee(firstname="Jean-Claude", lastname="claud_"), Employee(firstname="Jean-Claude", lastname="claude"), Employee(firstname="Jean-Claude", lastname="claude%"), Employee(firstname="Johnny", lastname="joh\\n"), Employee(firstname="Johnny", lastname="john"), Employee(firstname="Johnny", lastname="_ohn"), ]) self.assertQuerysetEqual( Employee.objects.filter(firstname__icontains=F('lastname')), ["<Employee: %Joh\\nny %joh\\n>", "<Employee: Jean-Claude claude>", "<Employee: Johnny john>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__istartswith=F('lastname')), ["<Employee: %Joh\\nny %joh\\n>", "<Employee: Johnny john>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__iendswith=F('lastname')), ["<Employee: Jean-Claude claude>"], ordered=False, ) @isolate_apps('expressions') class SimpleExpressionTests(SimpleTestCase): def test_equal(self): self.assertEqual(Expression(), Expression()) self.assertEqual( Expression(IntegerField()), Expression(output_field=IntegerField()) ) self.assertEqual(Expression(IntegerField()), mock.ANY) self.assertNotEqual( Expression(IntegerField()), Expression(CharField()) ) class TestModel(Model): field = IntegerField() other_field = IntegerField() self.assertNotEqual( Expression(TestModel._meta.get_field('field')), Expression(TestModel._meta.get_field('other_field')), ) def test_hash(self): self.assertEqual(hash(Expression()), hash(Expression())) self.assertEqual( hash(Expression(IntegerField())), hash(Expression(output_field=IntegerField())) ) self.assertNotEqual( hash(Expression(IntegerField())), hash(Expression(CharField())), ) class TestModel(Model): field = IntegerField() other_field = IntegerField() self.assertNotEqual( hash(Expression(TestModel._meta.get_field('field'))), hash(Expression(TestModel._meta.get_field('other_field'))), ) class ExpressionsNumericTests(TestCase): @classmethod def setUpTestData(cls): Number(integer=-1).save() Number(integer=42).save() Number(integer=1337).save() Number.objects.update(float=F('integer')) def test_fill_with_value_from_same_object(self): """ We can fill a value in all objects with an other value of the same object. """ self.assertQuerysetEqual( Number.objects.all(), ['<Number: -1, -1.000>', '<Number: 42, 42.000>', '<Number: 1337, 1337.000>'], ordered=False ) def test_increment_value(self): """ We can increment a value of all objects in a query set. """ self.assertEqual(Number.objects.filter(integer__gt=0).update(integer=F('integer') + 1), 2) self.assertQuerysetEqual( Number.objects.all(), ['<Number: -1, -1.000>', '<Number: 43, 42.000>', '<Number: 1338, 1337.000>'], ordered=False ) def test_filter_not_equals_other_field(self): """ We can filter for objects, where a value is not equals the value of an other field. """ self.assertEqual(Number.objects.filter(integer__gt=0).update(integer=F('integer') + 1), 2) self.assertQuerysetEqual( Number.objects.exclude(float=F('integer')), ['<Number: 43, 42.000>', '<Number: 1338, 1337.000>'], ordered=False ) def test_complex_expressions(self): """ Complex expressions of different connection types are possible. """ n = Number.objects.create(integer=10, float=123.45) self.assertEqual(Number.objects.filter(pk=n.pk).update( float=F('integer') + F('float') * 2), 1) self.assertEqual(Number.objects.get(pk=n.pk).integer, 10) self.assertEqual(Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3)) class ExpressionOperatorTests(TestCase): @classmethod def setUpTestData(cls): cls.n = Number.objects.create(integer=42, float=15.5) cls.n1 = Number.objects.create(integer=-42, float=-15.5) def test_lefthand_addition(self): # LH Addition of floats and integers Number.objects.filter(pk=self.n.pk).update( integer=F('integer') + 15, float=F('float') + 42.7 ) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3)) def test_lefthand_subtraction(self): # LH Subtraction of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') - 15, float=F('float') - 42.7) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3)) def test_lefthand_multiplication(self): # Multiplication of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') * 15, float=F('float') * 42.7) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3)) def test_lefthand_division(self): # LH Division of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') / 2, float=F('float') / 42.7) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3)) def test_lefthand_modulo(self): # LH Modulo arithmetic on integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') % 20) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2) def test_lefthand_bitwise_and(self): # LH Bitwise ands on integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitand(56)) Number.objects.filter(pk=self.n1.pk).update(integer=F('integer').bitand(-56)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -64) def test_lefthand_bitwise_left_shift_operator(self): Number.objects.update(integer=F('integer').bitleftshift(2)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 168) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -168) def test_lefthand_bitwise_right_shift_operator(self): Number.objects.update(integer=F('integer').bitrightshift(2)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 10) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -11) def test_lefthand_bitwise_or(self): # LH Bitwise or on integers Number.objects.update(integer=F('integer').bitor(48)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -10) def test_lefthand_power(self): # LH Power arithmetic operation on floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') ** 2, float=F('float') ** 1.5) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 1764) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(61.02, places=2)) @unittest.skipIf(connection.vendor == 'oracle', "Oracle doesn't support bitwise XOR.") def test_lefthand_bitwise_xor(self): Number.objects.update(integer=F('integer').bitxor(48)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 26) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -26) @unittest.skipIf(connection.vendor == 'oracle', "Oracle doesn't support bitwise XOR.") def test_lefthand_bitwise_xor_null(self): employee = Employee.objects.create(firstname='John', lastname='Doe') Employee.objects.update(salary=F('salary').bitxor(48)) employee.refresh_from_db() self.assertIsNone(employee.salary) @unittest.skipUnless(connection.vendor == 'oracle', "Oracle doesn't support bitwise XOR.") def test_lefthand_bitwise_xor_not_supported(self): msg = 'Bitwise XOR is not supported in Oracle.' with self.assertRaisesMessage(NotSupportedError, msg): Number.objects.update(integer=F('integer').bitxor(48)) def test_right_hand_addition(self): # Right hand operators Number.objects.filter(pk=self.n.pk).update(integer=15 + F('integer'), float=42.7 + F('float')) # RH Addition of floats and integers self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3)) def test_right_hand_subtraction(self): Number.objects.filter(pk=self.n.pk).update(integer=15 - F('integer'), float=42.7 - F('float')) # RH Subtraction of floats and integers self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3)) def test_right_hand_multiplication(self): # RH Multiplication of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=15 * F('integer'), float=42.7 * F('float')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3)) def test_right_hand_division(self): # RH Division of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=640 / F('integer'), float=42.7 / F('float')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3)) def test_right_hand_modulo(self): # RH Modulo arithmetic on integers Number.objects.filter(pk=self.n.pk).update(integer=69 % F('integer')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27) def test_righthand_power(self): # RH Power arithmetic operation on floats and integers Number.objects.filter(pk=self.n.pk).update(integer=2 ** F('integer'), float=1.5 ** F('float')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 4398046511104) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(536.308, places=3)) class FTimeDeltaTests(TestCase): @classmethod def setUpTestData(cls): cls.sday = sday = datetime.date(2010, 6, 25) cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000) midnight = datetime.time(0) delta0 = datetime.timedelta(0) delta1 = datetime.timedelta(microseconds=253000) delta2 = datetime.timedelta(seconds=44) delta3 = datetime.timedelta(hours=21, minutes=8) delta4 = datetime.timedelta(days=10) delta5 = datetime.timedelta(days=90) # Test data is set so that deltas and delays will be # strictly increasing. cls.deltas = [] cls.delays = [] cls.days_long = [] # e0: started same day as assigned, zero duration end = stime + delta0 cls.e0 = Experiment.objects.create( name='e0', assigned=sday, start=stime, end=end, completed=end.date(), estimated_time=delta0, ) cls.deltas.append(delta0) cls.delays.append(cls.e0.start - datetime.datetime.combine(cls.e0.assigned, midnight)) cls.days_long.append(cls.e0.completed - cls.e0.assigned) # e1: started one day after assigned, tiny duration, data # set so that end time has no fractional seconds, which # tests an edge case on sqlite. delay = datetime.timedelta(1) end = stime + delay + delta1 e1 = Experiment.objects.create( name='e1', assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta1, ) cls.deltas.append(delta1) cls.delays.append(e1.start - datetime.datetime.combine(e1.assigned, midnight)) cls.days_long.append(e1.completed - e1.assigned) # e2: started three days after assigned, small duration end = stime + delta2 e2 = Experiment.objects.create( name='e2', assigned=sday - datetime.timedelta(3), start=stime, end=end, completed=end.date(), estimated_time=datetime.timedelta(hours=1), ) cls.deltas.append(delta2) cls.delays.append(e2.start - datetime.datetime.combine(e2.assigned, midnight)) cls.days_long.append(e2.completed - e2.assigned) # e3: started four days after assigned, medium duration delay = datetime.timedelta(4) end = stime + delay + delta3 e3 = Experiment.objects.create( name='e3', assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta3, ) cls.deltas.append(delta3) cls.delays.append(e3.start - datetime.datetime.combine(e3.assigned, midnight)) cls.days_long.append(e3.completed - e3.assigned) # e4: started 10 days after assignment, long duration end = stime + delta4 e4 = Experiment.objects.create( name='e4', assigned=sday - datetime.timedelta(10), start=stime, end=end, completed=end.date(), estimated_time=delta4 - datetime.timedelta(1), ) cls.deltas.append(delta4) cls.delays.append(e4.start - datetime.datetime.combine(e4.assigned, midnight)) cls.days_long.append(e4.completed - e4.assigned) # e5: started a month after assignment, very long duration delay = datetime.timedelta(30) end = stime + delay + delta5 e5 = Experiment.objects.create( name='e5', assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta5, ) cls.deltas.append(delta5) cls.delays.append(e5.start - datetime.datetime.combine(e5.assigned, midnight)) cls.days_long.append(e5.completed - e5.assigned) cls.expnames = [e.name for e in Experiment.objects.all()] def test_multiple_query_compilation(self): # Ticket #21643 queryset = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1)) q1 = str(queryset.query) q2 = str(queryset.query) self.assertEqual(q1, q2) def test_query_clone(self): # Ticket #21643 - Crash when compiling query more than once qs = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1)) qs2 = qs.all() list(qs) list(qs2) # Intentionally no assert def test_delta_add(self): for i, delta in enumerate(self.deltas): test_set = [e.name for e in Experiment.objects.filter(end__lt=F('start') + delta)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(end__lt=delta + F('start'))] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(end__lte=F('start') + delta)] self.assertEqual(test_set, self.expnames[:i + 1]) def test_delta_subtract(self): for i, delta in enumerate(self.deltas): test_set = [e.name for e in Experiment.objects.filter(start__gt=F('end') - delta)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(start__gte=F('end') - delta)] self.assertEqual(test_set, self.expnames[:i + 1]) def test_exclude(self): for i, delta in enumerate(self.deltas): test_set = [e.name for e in Experiment.objects.exclude(end__lt=F('start') + delta)] self.assertEqual(test_set, self.expnames[i:]) test_set = [e.name for e in Experiment.objects.exclude(end__lte=F('start') + delta)] self.assertEqual(test_set, self.expnames[i + 1:]) def test_date_comparison(self): for i, days in enumerate(self.days_long): test_set = [e.name for e in Experiment.objects.filter(completed__lt=F('assigned') + days)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(completed__lte=F('assigned') + days)] self.assertEqual(test_set, self.expnames[:i + 1]) @skipUnlessDBFeature("supports_mixed_date_datetime_comparisons") def test_mixed_comparisons1(self): for i, delay in enumerate(self.delays): test_set = [e.name for e in Experiment.objects.filter(assigned__gt=F('start') - delay)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(assigned__gte=F('start') - delay)] self.assertEqual(test_set, self.expnames[:i + 1]) def test_mixed_comparisons2(self): for i, delay in enumerate(self.delays): delay = datetime.timedelta(delay.days) test_set = [e.name for e in Experiment.objects.filter(start__lt=F('assigned') + delay)] self.assertEqual(test_set, self.expnames[:i]) test_set = [ e.name for e in Experiment.objects.filter(start__lte=F('assigned') + delay + datetime.timedelta(1)) ] self.assertEqual(test_set, self.expnames[:i + 1]) def test_delta_update(self): for delta in self.deltas: exps = Experiment.objects.all() expected_durations = [e.duration() for e in exps] expected_starts = [e.start + delta for e in exps] expected_ends = [e.end + delta for e in exps] Experiment.objects.update(start=F('start') + delta, end=F('end') + delta) exps = Experiment.objects.all() new_starts = [e.start for e in exps] new_ends = [e.end for e in exps] new_durations = [e.duration() for e in exps] self.assertEqual(expected_starts, new_starts) self.assertEqual(expected_ends, new_ends) self.assertEqual(expected_durations, new_durations) def test_invalid_operator(self): with self.assertRaises(DatabaseError): list(Experiment.objects.filter(start=F('start') * datetime.timedelta(0))) def test_durationfield_add(self): zeros = [e.name for e in Experiment.objects.filter(start=F('start') + F('estimated_time'))] self.assertEqual(zeros, ['e0']) end_less = [e.name for e in Experiment.objects.filter(end__lt=F('start') + F('estimated_time'))] self.assertEqual(end_less, ['e2']) delta_math = [ e.name for e in Experiment.objects.filter(end__gte=F('start') + F('estimated_time') + datetime.timedelta(hours=1)) ] self.assertEqual(delta_math, ['e4']) queryset = Experiment.objects.annotate(shifted=ExpressionWrapper( F('start') + Value(None, output_field=DurationField()), output_field=DateTimeField(), )) self.assertIsNone(queryset.first().shifted) def test_duration_expressions(self): for delta in self.deltas: qs = Experiment.objects.annotate(duration=F('estimated_time') + delta) for obj in qs: self.assertEqual(obj.duration, obj.estimated_time + delta) @skipUnlessDBFeature('supports_temporal_subtraction') def test_date_subtraction(self): queryset = Experiment.objects.annotate( completion_duration=ExpressionWrapper( F('completed') - F('assigned'), output_field=DurationField() ) ) at_least_5_days = {e.name for e in queryset.filter(completion_duration__gte=datetime.timedelta(days=5))} self.assertEqual(at_least_5_days, {'e3', 'e4', 'e5'}) at_least_120_days = {e.name for e in queryset.filter(completion_duration__gte=datetime.timedelta(days=120))} self.assertEqual(at_least_120_days, {'e5'}) less_than_5_days = {e.name for e in queryset.filter(completion_duration__lt=datetime.timedelta(days=5))} self.assertEqual(less_than_5_days, {'e0', 'e1', 'e2'}) queryset = Experiment.objects.annotate(difference=ExpressionWrapper( F('completed') - Value(None, output_field=DateField()), output_field=DurationField(), )) self.assertIsNone(queryset.first().difference) queryset = Experiment.objects.annotate(shifted=ExpressionWrapper( F('completed') - Value(None, output_field=DurationField()), output_field=DateField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_date_subquery_subtraction(self): subquery = Experiment.objects.filter(pk=OuterRef('pk')).values('completed') queryset = Experiment.objects.annotate( difference=ExpressionWrapper( subquery - F('completed'), output_field=DurationField(), ), ).filter(difference=datetime.timedelta()) self.assertTrue(queryset.exists()) @skipUnlessDBFeature('supports_temporal_subtraction') def test_date_case_subtraction(self): queryset = Experiment.objects.annotate( date_case=Case( When(Q(name='e0'), then=F('completed')), output_field=DateField(), ), completed_value=Value( self.e0.completed, output_field=DateField(), ), difference=ExpressionWrapper( F('date_case') - F('completed_value'), output_field=DurationField(), ), ).filter(difference=datetime.timedelta()) self.assertEqual(queryset.get(), self.e0) @skipUnlessDBFeature('supports_temporal_subtraction') def test_time_subtraction(self): Time.objects.create(time=datetime.time(12, 30, 15, 2345)) queryset = Time.objects.annotate( difference=ExpressionWrapper( F('time') - Value(datetime.time(11, 15, 0), output_field=TimeField()), output_field=DurationField(), ) ) self.assertEqual( queryset.get().difference, datetime.timedelta(hours=1, minutes=15, seconds=15, microseconds=2345) ) queryset = Time.objects.annotate(difference=ExpressionWrapper( F('time') - Value(None, output_field=TimeField()), output_field=DurationField(), )) self.assertIsNone(queryset.first().difference) queryset = Time.objects.annotate(shifted=ExpressionWrapper( F('time') - Value(None, output_field=DurationField()), output_field=TimeField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_time_subquery_subtraction(self): Time.objects.create(time=datetime.time(12, 30, 15, 2345)) subquery = Time.objects.filter(pk=OuterRef('pk')).values('time') queryset = Time.objects.annotate( difference=ExpressionWrapper( subquery - F('time'), output_field=DurationField(), ), ).filter(difference=datetime.timedelta()) self.assertTrue(queryset.exists()) @skipUnlessDBFeature('supports_temporal_subtraction') def test_datetime_subtraction(self): under_estimate = [ e.name for e in Experiment.objects.filter(estimated_time__gt=F('end') - F('start')) ] self.assertEqual(under_estimate, ['e2']) over_estimate = [ e.name for e in Experiment.objects.filter(estimated_time__lt=F('end') - F('start')) ] self.assertEqual(over_estimate, ['e4']) queryset = Experiment.objects.annotate(difference=ExpressionWrapper( F('start') - Value(None, output_field=DateTimeField()), output_field=DurationField(), )) self.assertIsNone(queryset.first().difference) queryset = Experiment.objects.annotate(shifted=ExpressionWrapper( F('start') - Value(None, output_field=DurationField()), output_field=DateTimeField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_datetime_subquery_subtraction(self): subquery = Experiment.objects.filter(pk=OuterRef('pk')).values('start') queryset = Experiment.objects.annotate( difference=ExpressionWrapper( subquery - F('start'), output_field=DurationField(), ), ).filter(difference=datetime.timedelta()) self.assertTrue(queryset.exists()) @skipUnlessDBFeature('supports_temporal_subtraction') def test_datetime_subtraction_microseconds(self): delta = datetime.timedelta(microseconds=8999999999999999) Experiment.objects.update(end=F('start') + delta) qs = Experiment.objects.annotate( delta=ExpressionWrapper(F('end') - F('start'), output_field=DurationField()) ) for e in qs: self.assertEqual(e.delta, delta) def test_duration_with_datetime(self): # Exclude e1 which has very high precision so we can test this on all # backends regardless of whether or not it supports # microsecond_precision. over_estimate = Experiment.objects.exclude(name='e1').filter( completed__gt=self.stime + F('estimated_time'), ).order_by('name') self.assertQuerysetEqual(over_estimate, ['e3', 'e4', 'e5'], lambda e: e.name) def test_duration_with_datetime_microseconds(self): delta = datetime.timedelta(microseconds=8999999999999999) qs = Experiment.objects.annotate(dt=ExpressionWrapper( F('start') + delta, output_field=DateTimeField(), )) for e in qs: self.assertEqual(e.dt, e.start + delta) def test_date_minus_duration(self): more_than_4_days = Experiment.objects.filter( assigned__lt=F('completed') - Value(datetime.timedelta(days=4), output_field=DurationField()) ) self.assertQuerysetEqual(more_than_4_days, ['e3', 'e4', 'e5'], lambda e: e.name) def test_negative_timedelta_update(self): # subtract 30 seconds, 30 minutes, 2 hours and 2 days experiments = Experiment.objects.filter(name='e0').annotate( start_sub_seconds=F('start') + datetime.timedelta(seconds=-30), ).annotate( start_sub_minutes=F('start_sub_seconds') + datetime.timedelta(minutes=-30), ).annotate( start_sub_hours=F('start_sub_minutes') + datetime.timedelta(hours=-2), ).annotate( new_start=F('start_sub_hours') + datetime.timedelta(days=-2), ) expected_start = datetime.datetime(2010, 6, 23, 9, 45, 0) # subtract 30 microseconds experiments = experiments.annotate(new_start=F('new_start') + datetime.timedelta(microseconds=-30)) expected_start += datetime.timedelta(microseconds=+746970) experiments.update(start=F('new_start')) e0 = Experiment.objects.get(name='e0') self.assertEqual(e0.start, expected_start) class ValueTests(TestCase): def test_update_TimeField_using_Value(self): Time.objects.create() Time.objects.update(time=Value(datetime.time(1), output_field=TimeField())) self.assertEqual(Time.objects.get().time, datetime.time(1)) def test_update_UUIDField_using_Value(self): UUID.objects.create() UUID.objects.update(uuid=Value(uuid.UUID('12345678901234567890123456789012'), output_field=UUIDField())) self.assertEqual(UUID.objects.get().uuid, uuid.UUID('12345678901234567890123456789012')) def test_deconstruct(self): value = Value('name') path, args, kwargs = value.deconstruct() self.assertEqual(path, 'django.db.models.expressions.Value') self.assertEqual(args, (value.value,)) self.assertEqual(kwargs, {}) def test_deconstruct_output_field(self): value = Value('name', output_field=CharField()) path, args, kwargs = value.deconstruct() self.assertEqual(path, 'django.db.models.expressions.Value') self.assertEqual(args, (value.value,)) self.assertEqual(len(kwargs), 1) self.assertEqual(kwargs['output_field'].deconstruct(), CharField().deconstruct()) def test_equal(self): value = Value('name') self.assertEqual(value, Value('name')) self.assertNotEqual(value, Value('username')) def test_hash(self): d = {Value('name'): 'Bob'} self.assertIn(Value('name'), d) self.assertEqual(d[Value('name')], 'Bob') def test_equal_output_field(self): value = Value('name', output_field=CharField()) same_value = Value('name', output_field=CharField()) other_value = Value('name', output_field=TimeField()) no_output_field = Value('name') self.assertEqual(value, same_value) self.assertNotEqual(value, other_value) self.assertNotEqual(value, no_output_field) def test_raise_empty_expressionlist(self): msg = 'ExpressionList requires at least one expression' with self.assertRaisesMessage(ValueError, msg): ExpressionList() class FieldTransformTests(TestCase): @classmethod def setUpTestData(cls): cls.sday = sday = datetime.date(2010, 6, 25) cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000) cls.ex1 = Experiment.objects.create( name='Experiment 1', assigned=sday, completed=sday + datetime.timedelta(2), estimated_time=datetime.timedelta(2), start=stime, end=stime + datetime.timedelta(2), ) def test_month_aggregation(self): self.assertEqual( Experiment.objects.aggregate(month_count=Count('assigned__month')), {'month_count': 1} ) def test_transform_in_values(self): self.assertQuerysetEqual( Experiment.objects.values('assigned__month'), ["{'assigned__month': 6}"] ) def test_multiple_transforms_in_values(self): self.assertQuerysetEqual( Experiment.objects.values('end__date__month'), ["{'end__date__month': 6}"] ) class ReprTests(SimpleTestCase): def test_expressions(self): self.assertEqual( repr(Case(When(a=1))), "<Case: CASE WHEN <Q: (AND: ('a', 1))> THEN Value(None), ELSE Value(None)>" ) self.assertEqual( repr(When(Q(age__gte=18), then=Value('legal'))), "<When: WHEN <Q: (AND: ('age__gte', 18))> THEN Value(legal)>" ) self.assertEqual(repr(Col('alias', 'field')), "Col(alias, field)") self.assertEqual(repr(F('published')), "F(published)") self.assertEqual(repr(F('cost') + F('tax')), "<CombinedExpression: F(cost) + F(tax)>") self.assertEqual( repr(ExpressionWrapper(F('cost') + F('tax'), IntegerField())), "ExpressionWrapper(F(cost) + F(tax))" ) self.assertEqual(repr(Func('published', function='TO_CHAR')), "Func(F(published), function=TO_CHAR)") self.assertEqual(repr(OrderBy(Value(1))), 'OrderBy(Value(1), descending=False)') self.assertEqual(repr(Random()), "Random()") self.assertEqual(repr(RawSQL('table.col', [])), "RawSQL(table.col, [])") self.assertEqual(repr(Ref('sum_cost', Sum('cost'))), "Ref(sum_cost, Sum(F(cost)))") self.assertEqual(repr(Value(1)), "Value(1)") self.assertEqual( repr(ExpressionList(F('col'), F('anothercol'))), 'ExpressionList(F(col), F(anothercol))' ) self.assertEqual( repr(ExpressionList(OrderBy(F('col'), descending=False))), 'ExpressionList(OrderBy(F(col), descending=False))' ) def test_functions(self): self.assertEqual(repr(Coalesce('a', 'b')), "Coalesce(F(a), F(b))") self.assertEqual(repr(Concat('a', 'b')), "Concat(ConcatPair(F(a), F(b)))") self.assertEqual(repr(Length('a')), "Length(F(a))") self.assertEqual(repr(Lower('a')), "Lower(F(a))") self.assertEqual(repr(Substr('a', 1, 3)), "Substr(F(a), Value(1), Value(3))") self.assertEqual(repr(Upper('a')), "Upper(F(a))") def test_aggregates(self): self.assertEqual(repr(Avg('a')), "Avg(F(a))") self.assertEqual(repr(Count('a')), "Count(F(a))") self.assertEqual(repr(Count('*')), "Count('*')") self.assertEqual(repr(Max('a')), "Max(F(a))") self.assertEqual(repr(Min('a')), "Min(F(a))") self.assertEqual(repr(StdDev('a')), "StdDev(F(a), sample=False)") self.assertEqual(repr(Sum('a')), "Sum(F(a))") self.assertEqual(repr(Variance('a', sample=True)), "Variance(F(a), sample=True)") def test_distinct_aggregates(self): self.assertEqual(repr(Count('a', distinct=True)), "Count(F(a), distinct=True)") self.assertEqual(repr(Count('*', distinct=True)), "Count('*', distinct=True)") def test_filtered_aggregates(self): filter = Q(a=1) self.assertEqual(repr(Avg('a', filter=filter)), "Avg(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(Count('a', filter=filter)), "Count(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(Max('a', filter=filter)), "Max(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(Min('a', filter=filter)), "Min(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(StdDev('a', filter=filter)), "StdDev(F(a), filter=(AND: ('a', 1)), sample=False)") self.assertEqual(repr(Sum('a', filter=filter)), "Sum(F(a), filter=(AND: ('a', 1)))") self.assertEqual( repr(Variance('a', sample=True, filter=filter)), "Variance(F(a), filter=(AND: ('a', 1)), sample=True)" ) self.assertEqual( repr(Count('a', filter=filter, distinct=True)), "Count(F(a), distinct=True, filter=(AND: ('a', 1)))" ) class CombinableTests(SimpleTestCase): bitwise_msg = 'Use .bitand() and .bitor() for bitwise logical operations.' def test_negation(self): c = Combinable() self.assertEqual(-c, c * -1) def test_and(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): Combinable() & Combinable() def test_or(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): Combinable() | Combinable() def test_reversed_and(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): object() & Combinable() def test_reversed_or(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): object() | Combinable() class ExpressionWrapperTests(SimpleTestCase): def test_empty_group_by(self): expr = ExpressionWrapper(Value(3), output_field=IntegerField()) self.assertEqual(expr.get_group_by_cols(alias=None), []) def test_non_empty_group_by(self): expr = ExpressionWrapper(Lower(Value('f')), output_field=IntegerField()) group_by_cols = expr.get_group_by_cols(alias=None) self.assertEqual(group_by_cols, [expr.expression]) self.assertEqual(group_by_cols[0].output_field, expr.output_field)
abee02e5b6ccdbab937db4da7c1061e8d3950f2b5d4b7df219d816f096d50b39
import decimal from django.core.management.color import no_style from django.db import NotSupportedError, connection, transaction from django.db.backends.base.operations import BaseDatabaseOperations from django.db.models import DurationField from django.test import ( SimpleTestCase, TestCase, TransactionTestCase, override_settings, skipIfDBFeature, ) from django.utils import timezone from ..models import Author, Book class SimpleDatabaseOperationTests(SimpleTestCase): may_require_msg = 'subclasses of BaseDatabaseOperations may require a %s() method' def setUp(self): self.ops = BaseDatabaseOperations(connection=connection) def test_deferrable_sql(self): self.assertEqual(self.ops.deferrable_sql(), '') def test_end_transaction_rollback(self): self.assertEqual(self.ops.end_transaction_sql(success=False), 'ROLLBACK;') def test_no_limit_value(self): with self.assertRaisesMessage(NotImplementedError, self.may_require_msg % 'no_limit_value'): self.ops.no_limit_value() def test_quote_name(self): with self.assertRaisesMessage(NotImplementedError, self.may_require_msg % 'quote_name'): self.ops.quote_name('a') def test_regex_lookup(self): with self.assertRaisesMessage(NotImplementedError, self.may_require_msg % 'regex_lookup'): self.ops.regex_lookup(lookup_type='regex') def test_set_time_zone_sql(self): self.assertEqual(self.ops.set_time_zone_sql(), '') def test_sql_flush(self): msg = 'subclasses of BaseDatabaseOperations must provide an sql_flush() method' with self.assertRaisesMessage(NotImplementedError, msg): self.ops.sql_flush(None, None) def test_pk_default_value(self): self.assertEqual(self.ops.pk_default_value(), 'DEFAULT') def test_tablespace_sql(self): self.assertEqual(self.ops.tablespace_sql(None), '') def test_sequence_reset_by_name_sql(self): self.assertEqual(self.ops.sequence_reset_by_name_sql(None, []), []) def test_adapt_unknown_value_decimal(self): value = decimal.Decimal('3.14') self.assertEqual( self.ops.adapt_unknown_value(value), self.ops.adapt_decimalfield_value(value) ) def test_adapt_unknown_value_date(self): value = timezone.now().date() self.assertEqual(self.ops.adapt_unknown_value(value), self.ops.adapt_datefield_value(value)) def test_adapt_unknown_value_time(self): value = timezone.now().time() self.assertEqual(self.ops.adapt_unknown_value(value), self.ops.adapt_timefield_value(value)) def test_adapt_timefield_value_none(self): self.assertIsNone(self.ops.adapt_timefield_value(None)) def test_adapt_datetimefield_value(self): self.assertIsNone(self.ops.adapt_datetimefield_value(None)) def test_adapt_timefield_value(self): msg = 'Django does not support timezone-aware times.' with self.assertRaisesMessage(ValueError, msg): self.ops.adapt_timefield_value(timezone.make_aware(timezone.now())) @override_settings(USE_TZ=False) def test_adapt_timefield_value_unaware(self): now = timezone.now() self.assertEqual(self.ops.adapt_timefield_value(now), str(now)) def test_date_extract_sql(self): with self.assertRaisesMessage(NotImplementedError, self.may_require_msg % 'date_extract_sql'): self.ops.date_extract_sql(None, None) def test_time_extract_sql(self): with self.assertRaisesMessage(NotImplementedError, self.may_require_msg % 'date_extract_sql'): self.ops.time_extract_sql(None, None) def test_date_trunc_sql(self): with self.assertRaisesMessage(NotImplementedError, self.may_require_msg % 'date_trunc_sql'): self.ops.date_trunc_sql(None, None) def test_time_trunc_sql(self): with self.assertRaisesMessage(NotImplementedError, self.may_require_msg % 'time_trunc_sql'): self.ops.time_trunc_sql(None, None) def test_datetime_trunc_sql(self): with self.assertRaisesMessage(NotImplementedError, self.may_require_msg % 'datetime_trunc_sql'): self.ops.datetime_trunc_sql(None, None, None) def test_datetime_cast_date_sql(self): with self.assertRaisesMessage(NotImplementedError, self.may_require_msg % 'datetime_cast_date_sql'): self.ops.datetime_cast_date_sql(None, None) def test_datetime_cast_time_sql(self): with self.assertRaisesMessage(NotImplementedError, self.may_require_msg % 'datetime_cast_time_sql'): self.ops.datetime_cast_time_sql(None, None) def test_datetime_extract_sql(self): with self.assertRaisesMessage(NotImplementedError, self.may_require_msg % 'datetime_extract_sql'): self.ops.datetime_extract_sql(None, None, None) def test_json_cast_text_sql(self): msg = self.may_require_msg % 'json_cast_text_sql' with self.assertRaisesMessage(NotImplementedError, msg): self.ops.json_cast_text_sql(None) class DatabaseOperationTests(TestCase): def setUp(self): self.ops = BaseDatabaseOperations(connection=connection) @skipIfDBFeature('supports_over_clause') def test_window_frame_raise_not_supported_error(self): msg = 'This backend does not support window expressions.' with self.assertRaisesMessage(NotSupportedError, msg): self.ops.window_frame_rows_start_end() @skipIfDBFeature('can_distinct_on_fields') def test_distinct_on_fields(self): msg = 'DISTINCT ON fields is not supported by this database backend' with self.assertRaisesMessage(NotSupportedError, msg): self.ops.distinct_sql(['a', 'b'], None) @skipIfDBFeature('supports_temporal_subtraction') def test_subtract_temporals(self): duration_field = DurationField() duration_field_internal_type = duration_field.get_internal_type() msg = ( 'This backend does not support %s subtraction.' % duration_field_internal_type ) with self.assertRaisesMessage(NotSupportedError, msg): self.ops.subtract_temporals(duration_field_internal_type, None, None) class SqlFlushTests(TransactionTestCase): available_apps = ['backends'] def test_sql_flush_no_tables(self): self.assertEqual(connection.ops.sql_flush(no_style(), []), []) def test_execute_sql_flush_statements(self): with transaction.atomic(): author = Author.objects.create(name='George Orwell') Book.objects.create(author=author) author = Author.objects.create(name='Harper Lee') Book.objects.create(author=author) Book.objects.create(author=author) self.assertIs(Author.objects.exists(), True) self.assertIs(Book.objects.exists(), True) sql_list = connection.ops.sql_flush( no_style(), [Author._meta.db_table, Book._meta.db_table], reset_sequences=True, allow_cascade=True, ) connection.ops.execute_sql_flush(sql_list) with transaction.atomic(): self.assertIs(Author.objects.exists(), False) self.assertIs(Book.objects.exists(), False) if connection.features.supports_sequence_reset: author = Author.objects.create(name='F. Scott Fitzgerald') self.assertEqual(author.pk, 1) book = Book.objects.create(author=author) self.assertEqual(book.pk, 1)
00db69458310e2c4039b7be74a842facb53bd82212434ac7679fb9e1f1ff96e1
import functools import inspect @functools.lru_cache(maxsize=512) def _get_signature(func): return inspect.signature(func) def get_func_args(func): sig = _get_signature(func) return [ arg_name for arg_name, param in sig.parameters.items() if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD ] def get_func_full_args(func): """ Return a list of (argument name, default value) tuples. If the argument does not have a default value, omit it in the tuple. Arguments such as *args and **kwargs are also included. """ sig = _get_signature(func) args = [] for arg_name, param in sig.parameters.items(): name = arg_name # Ignore 'self' if name == 'self': continue if param.kind == inspect.Parameter.VAR_POSITIONAL: name = '*' + name elif param.kind == inspect.Parameter.VAR_KEYWORD: name = '**' + name if param.default != inspect.Parameter.empty: args.append((name, param.default)) else: args.append((name,)) return args def func_accepts_kwargs(func): return any( p for p in _get_signature(func).parameters.values() if p.kind == p.VAR_KEYWORD ) def func_accepts_var_args(func): """ Return True if function 'func' accepts positional arguments *args. """ return any( p for p in _get_signature(func).parameters.values() if p.kind == p.VAR_POSITIONAL ) def method_has_no_args(meth): """Return True if a method only accepts 'self'.""" count = len([ p for p in _get_signature(meth).parameters.values() if p.kind == p.POSITIONAL_OR_KEYWORD ]) return count == 0 if inspect.ismethod(meth) else count == 1 def func_supports_parameter(func, parameter): return parameter in _get_signature(func).parameters
1edd382e6e9de61fbeffa0790240913921212b25a9960aa29fe6130b53032247
import copy import inspect import warnings from functools import partialmethod from itertools import chain import django from django.apps import apps from django.conf import settings from django.core import checks from django.core.exceptions import ( NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, MultipleObjectsReturned, ObjectDoesNotExist, ValidationError, ) from django.db import ( DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, DatabaseError, connection, connections, router, transaction, ) from django.db.models import ( NOT_PROVIDED, ExpressionWrapper, IntegerField, Max, Value, ) from django.db.models.constants import LOOKUP_SEP from django.db.models.constraints import CheckConstraint, UniqueConstraint from django.db.models.deletion import CASCADE, Collector from django.db.models.fields.related import ( ForeignObjectRel, OneToOneField, lazy_related_operation, resolve_relation, ) from django.db.models.functions import Coalesce from django.db.models.manager import Manager from django.db.models.options import Options from django.db.models.query import F, Q from django.db.models.signals import ( class_prepared, post_init, post_save, pre_init, pre_save, ) from django.db.models.utils import make_model_tuple from django.utils.encoding import force_str from django.utils.hashable import make_hashable from django.utils.text import capfirst, get_text_list from django.utils.translation import gettext_lazy as _ class Deferred: def __repr__(self): return '<Deferred field>' def __str__(self): return '<Deferred field>' DEFERRED = Deferred() def subclass_exception(name, bases, module, attached_to): """ Create exception subclass. Used by ModelBase below. The exception is created in a way that allows it to be pickled, assuming that the returned exception class will be added as an attribute to the 'attached_to' class. """ return type(name, bases, { '__module__': module, '__qualname__': '%s.%s' % (attached_to.__qualname__, name), }) def _has_contribute_to_class(value): # Only call contribute_to_class() if it's bound. return not inspect.isclass(value) and hasattr(value, 'contribute_to_class') class ModelBase(type): """Metaclass for all models.""" def __new__(cls, name, bases, attrs, **kwargs): super_new = super().__new__ # Also ensure initialization is only performed for subclasses of Model # (excluding Model class itself). parents = [b for b in bases if isinstance(b, ModelBase)] if not parents: return super_new(cls, name, bases, attrs) # Create the class. module = attrs.pop('__module__') new_attrs = {'__module__': module} classcell = attrs.pop('__classcell__', None) if classcell is not None: new_attrs['__classcell__'] = classcell attr_meta = attrs.pop('Meta', None) # Pass all attrs without a (Django-specific) contribute_to_class() # method to type.__new__() so that they're properly initialized # (i.e. __set_name__()). contributable_attrs = {} for obj_name, obj in attrs.items(): if _has_contribute_to_class(obj): contributable_attrs[obj_name] = obj else: new_attrs[obj_name] = obj new_class = super_new(cls, name, bases, new_attrs, **kwargs) abstract = getattr(attr_meta, 'abstract', False) meta = attr_meta or getattr(new_class, 'Meta', None) base_meta = getattr(new_class, '_meta', None) app_label = None # Look for an application configuration to attach the model to. app_config = apps.get_containing_app_config(module) if getattr(meta, 'app_label', None) is None: if app_config is None: if not abstract: raise RuntimeError( "Model class %s.%s doesn't declare an explicit " "app_label and isn't in an application in " "INSTALLED_APPS." % (module, name) ) else: app_label = app_config.label new_class.add_to_class('_meta', Options(meta, app_label)) if not abstract: new_class.add_to_class( 'DoesNotExist', subclass_exception( 'DoesNotExist', tuple( x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract ) or (ObjectDoesNotExist,), module, attached_to=new_class)) new_class.add_to_class( 'MultipleObjectsReturned', subclass_exception( 'MultipleObjectsReturned', tuple( x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract ) or (MultipleObjectsReturned,), module, attached_to=new_class)) if base_meta and not base_meta.abstract: # Non-abstract child classes inherit some attributes from their # non-abstract parent (unless an ABC comes before it in the # method resolution order). if not hasattr(meta, 'ordering'): new_class._meta.ordering = base_meta.ordering if not hasattr(meta, 'get_latest_by'): new_class._meta.get_latest_by = base_meta.get_latest_by is_proxy = new_class._meta.proxy # If the model is a proxy, ensure that the base class # hasn't been swapped out. if is_proxy and base_meta and base_meta.swapped: raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped)) # Add remaining attributes (those with a contribute_to_class() method) # to the class. for obj_name, obj in contributable_attrs.items(): new_class.add_to_class(obj_name, obj) # All the fields of any type declared on this model new_fields = chain( new_class._meta.local_fields, new_class._meta.local_many_to_many, new_class._meta.private_fields ) field_names = {f.name for f in new_fields} # Basic setup for proxy models. if is_proxy: base = None for parent in [kls for kls in parents if hasattr(kls, '_meta')]: if parent._meta.abstract: if parent._meta.fields: raise TypeError( "Abstract base class containing model fields not " "permitted for proxy model '%s'." % name ) else: continue if base is None: base = parent elif parent._meta.concrete_model is not base._meta.concrete_model: raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name) if base is None: raise TypeError("Proxy model '%s' has no non-abstract model base class." % name) new_class._meta.setup_proxy(base) new_class._meta.concrete_model = base._meta.concrete_model else: new_class._meta.concrete_model = new_class # Collect the parent links for multi-table inheritance. parent_links = {} for base in reversed([new_class] + parents): # Conceptually equivalent to `if base is Model`. if not hasattr(base, '_meta'): continue # Skip concrete parent classes. if base != new_class and not base._meta.abstract: continue # Locate OneToOneField instances. for field in base._meta.local_fields: if isinstance(field, OneToOneField) and field.remote_field.parent_link: related = resolve_relation(new_class, field.remote_field.model) parent_links[make_model_tuple(related)] = field # Track fields inherited from base models. inherited_attributes = set() # Do the appropriate setup for any model parents. for base in new_class.mro(): if base not in parents or not hasattr(base, '_meta'): # Things without _meta aren't functional models, so they're # uninteresting parents. inherited_attributes.update(base.__dict__) continue parent_fields = base._meta.local_fields + base._meta.local_many_to_many if not base._meta.abstract: # Check for clashes between locally declared fields and those # on the base classes. for field in parent_fields: if field.name in field_names: raise FieldError( 'Local field %r in class %r clashes with field of ' 'the same name from base class %r.' % ( field.name, name, base.__name__, ) ) else: inherited_attributes.add(field.name) # Concrete classes... base = base._meta.concrete_model base_key = make_model_tuple(base) if base_key in parent_links: field = parent_links[base_key] elif not is_proxy: attr_name = '%s_ptr' % base._meta.model_name field = OneToOneField( base, on_delete=CASCADE, name=attr_name, auto_created=True, parent_link=True, ) if attr_name in field_names: raise FieldError( "Auto-generated field '%s' in class %r for " "parent_link to base class %r clashes with " "declared field of the same name." % ( attr_name, name, base.__name__, ) ) # Only add the ptr field if it's not already present; # e.g. migrations will already have it specified if not hasattr(new_class, attr_name): new_class.add_to_class(attr_name, field) else: field = None new_class._meta.parents[base] = field else: base_parents = base._meta.parents.copy() # Add fields from abstract base class if it wasn't overridden. for field in parent_fields: if (field.name not in field_names and field.name not in new_class.__dict__ and field.name not in inherited_attributes): new_field = copy.deepcopy(field) new_class.add_to_class(field.name, new_field) # Replace parent links defined on this base by the new # field. It will be appropriately resolved if required. if field.one_to_one: for parent, parent_link in base_parents.items(): if field == parent_link: base_parents[parent] = new_field # Pass any non-abstract parent classes onto child. new_class._meta.parents.update(base_parents) # Inherit private fields (like GenericForeignKey) from the parent # class for field in base._meta.private_fields: if field.name in field_names: if not base._meta.abstract: raise FieldError( 'Local field %r in class %r clashes with field of ' 'the same name from base class %r.' % ( field.name, name, base.__name__, ) ) else: field = copy.deepcopy(field) if not base._meta.abstract: field.mti_inherited = True new_class.add_to_class(field.name, field) # Copy indexes so that index names are unique when models extend an # abstract model. new_class._meta.indexes = [copy.deepcopy(idx) for idx in new_class._meta.indexes] if abstract: # Abstract base models can't be instantiated and don't appear in # the list of models for an app. We do the final setup for them a # little differently from normal models. attr_meta.abstract = False new_class.Meta = attr_meta return new_class new_class._prepare() new_class._meta.apps.register_model(new_class._meta.app_label, new_class) return new_class def add_to_class(cls, name, value): if _has_contribute_to_class(value): value.contribute_to_class(cls, name) else: setattr(cls, name, value) def _prepare(cls): """Create some methods once self._meta has been populated.""" opts = cls._meta opts._prepare(cls) if opts.order_with_respect_to: cls.get_next_in_order = partialmethod(cls._get_next_or_previous_in_order, is_next=True) cls.get_previous_in_order = partialmethod(cls._get_next_or_previous_in_order, is_next=False) # Defer creating accessors on the foreign class until it has been # created and registered. If remote_field is None, we're ordering # with respect to a GenericForeignKey and don't know what the # foreign class is - we'll add those accessors later in # contribute_to_class(). if opts.order_with_respect_to.remote_field: wrt = opts.order_with_respect_to remote = wrt.remote_field.model lazy_related_operation(make_foreign_order_accessors, cls, remote) # Give the class a docstring -- its definition. if cls.__doc__ is None: cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join(f.name for f in opts.fields)) get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get(opts.label_lower) if get_absolute_url_override: setattr(cls, 'get_absolute_url', get_absolute_url_override) if not opts.managers: if any(f.name == 'objects' for f in opts.fields): raise ValueError( "Model %s must specify a custom Manager, because it has a " "field named 'objects'." % cls.__name__ ) manager = Manager() manager.auto_created = True cls.add_to_class('objects', manager) # Set the name of _meta.indexes. This can't be done in # Options.contribute_to_class() because fields haven't been added to # the model at that point. for index in cls._meta.indexes: if not index.name: index.set_name_with_model(cls) class_prepared.send(sender=cls) @property def _base_manager(cls): return cls._meta.base_manager @property def _default_manager(cls): return cls._meta.default_manager class ModelStateFieldsCacheDescriptor: def __get__(self, instance, cls=None): if instance is None: return self res = instance.fields_cache = {} return res class ModelState: """Store model instance state.""" db = None # If true, uniqueness validation checks will consider this a new, unsaved # object. Necessary for correct validation of new instances of objects with # explicit (non-auto) PKs. This impacts validation only; it has no effect # on the actual save. adding = True fields_cache = ModelStateFieldsCacheDescriptor() class Model(metaclass=ModelBase): def __init__(self, *args, **kwargs): # Alias some things as locals to avoid repeat global lookups cls = self.__class__ opts = self._meta _setattr = setattr _DEFERRED = DEFERRED pre_init.send(sender=cls, args=args, kwargs=kwargs) # Set up the storage for instance state self._state = ModelState() # There is a rather weird disparity here; if kwargs, it's set, then args # overrides it. It should be one or the other; don't duplicate the work # The reason for the kwargs check is that standard iterator passes in by # args, and instantiation for iteration is 33% faster. if len(args) > len(opts.concrete_fields): # Daft, but matches old exception sans the err msg. raise IndexError("Number of args exceeds number of fields") if not kwargs: fields_iter = iter(opts.concrete_fields) # The ordering of the zip calls matter - zip throws StopIteration # when an iter throws it. So if the first iter throws it, the second # is *not* consumed. We rely on this, so don't change the order # without changing the logic. for val, field in zip(args, fields_iter): if val is _DEFERRED: continue _setattr(self, field.attname, val) else: # Slower, kwargs-ready version. fields_iter = iter(opts.fields) for val, field in zip(args, fields_iter): if val is _DEFERRED: continue _setattr(self, field.attname, val) kwargs.pop(field.name, None) # Now we're left with the unprocessed fields that *must* come from # keywords, or default. for field in fields_iter: is_related_object = False # Virtual field if field.attname not in kwargs and field.column is None: continue if kwargs: if isinstance(field.remote_field, ForeignObjectRel): try: # Assume object instance was passed in. rel_obj = kwargs.pop(field.name) is_related_object = True except KeyError: try: # Object instance wasn't passed in -- must be an ID. val = kwargs.pop(field.attname) except KeyError: val = field.get_default() else: try: val = kwargs.pop(field.attname) except KeyError: # This is done with an exception rather than the # default argument on pop because we don't want # get_default() to be evaluated, and then not used. # Refs #12057. val = field.get_default() else: val = field.get_default() if is_related_object: # If we are passed a related instance, set it using the # field.name instead of field.attname (e.g. "user" instead of # "user_id") so that the object gets properly cached (and type # checked) by the RelatedObjectDescriptor. if rel_obj is not _DEFERRED: _setattr(self, field.name, rel_obj) else: if val is not _DEFERRED: _setattr(self, field.attname, val) if kwargs: property_names = opts._property_names for prop in tuple(kwargs): try: # Any remaining kwargs must correspond to properties or # virtual fields. if prop in property_names or opts.get_field(prop): if kwargs[prop] is not _DEFERRED: _setattr(self, prop, kwargs[prop]) del kwargs[prop] except (AttributeError, FieldDoesNotExist): pass for kwarg in kwargs: raise TypeError("%s() got an unexpected keyword argument '%s'" % (cls.__name__, kwarg)) super().__init__() post_init.send(sender=cls, instance=self) @classmethod def from_db(cls, db, field_names, values): if len(values) != len(cls._meta.concrete_fields): values_iter = iter(values) values = [ next(values_iter) if f.attname in field_names else DEFERRED for f in cls._meta.concrete_fields ] new = cls(*values) new._state.adding = False new._state.db = db return new def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self) def __str__(self): return '%s object (%s)' % (self.__class__.__name__, self.pk) def __eq__(self, other): if not isinstance(other, Model): return NotImplemented if self._meta.concrete_model != other._meta.concrete_model: return False my_pk = self.pk if my_pk is None: return self is other return my_pk == other.pk def __hash__(self): if self.pk is None: raise TypeError("Model instances without primary key value are unhashable") return hash(self.pk) def __reduce__(self): data = self.__getstate__() data[DJANGO_VERSION_PICKLE_KEY] = django.__version__ class_id = self._meta.app_label, self._meta.object_name return model_unpickle, (class_id,), data def __getstate__(self): """Hook to allow choosing the attributes to pickle.""" return self.__dict__ def __setstate__(self, state): pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY) if pickled_version: if pickled_version != django.__version__: warnings.warn( "Pickled model instance's Django version %s does not " "match the current version %s." % (pickled_version, django.__version__), RuntimeWarning, stacklevel=2, ) else: warnings.warn( "Pickled model instance's Django version is not specified.", RuntimeWarning, stacklevel=2, ) self.__dict__.update(state) def _get_pk_val(self, meta=None): meta = meta or self._meta return getattr(self, meta.pk.attname) def _set_pk_val(self, value): for parent_link in self._meta.parents.values(): if parent_link and parent_link != self._meta.pk: setattr(self, parent_link.target_field.attname, value) return setattr(self, self._meta.pk.attname, value) pk = property(_get_pk_val, _set_pk_val) def get_deferred_fields(self): """ Return a set containing names of deferred fields for this instance. """ return { f.attname for f in self._meta.concrete_fields if f.attname not in self.__dict__ } def refresh_from_db(self, using=None, fields=None): """ Reload field values from the database. By default, the reloading happens from the database this instance was loaded from, or by the read router if this instance wasn't loaded from any database. The using parameter will override the default. Fields can be used to specify which fields to reload. The fields should be an iterable of field attnames. If fields is None, then all non-deferred fields are reloaded. When accessing deferred fields of an instance, the deferred loading of the field will call this method. """ if fields is None: self._prefetched_objects_cache = {} else: prefetched_objects_cache = getattr(self, '_prefetched_objects_cache', ()) for field in fields: if field in prefetched_objects_cache: del prefetched_objects_cache[field] fields.remove(field) if not fields: return if any(LOOKUP_SEP in f for f in fields): raise ValueError( 'Found "%s" in fields argument. Relations and transforms ' 'are not allowed in fields.' % LOOKUP_SEP) hints = {'instance': self} db_instance_qs = self.__class__._base_manager.db_manager(using, hints=hints).filter(pk=self.pk) # Use provided fields, if not set then reload all non-deferred fields. deferred_fields = self.get_deferred_fields() if fields is not None: fields = list(fields) db_instance_qs = db_instance_qs.only(*fields) elif deferred_fields: fields = [f.attname for f in self._meta.concrete_fields if f.attname not in deferred_fields] db_instance_qs = db_instance_qs.only(*fields) db_instance = db_instance_qs.get() non_loaded_fields = db_instance.get_deferred_fields() for field in self._meta.concrete_fields: if field.attname in non_loaded_fields: # This field wasn't refreshed - skip ahead. continue setattr(self, field.attname, getattr(db_instance, field.attname)) # Clear cached foreign keys. if field.is_relation and field.is_cached(self): field.delete_cached_value(self) # Clear cached relations. for field in self._meta.related_objects: if field.is_cached(self): field.delete_cached_value(self) self._state.db = db_instance._state.db def serializable_value(self, field_name): """ Return the value of the field name for this instance. If the field is a foreign key, return the id value instead of the object. If there's no Field object with this name on the model, return the model attribute's value. Used to serialize a field's value (in the serializer, or form output, for example). Normally, you would just access the attribute directly and not use this method. """ try: field = self._meta.get_field(field_name) except FieldDoesNotExist: return getattr(self, field_name) return getattr(self, field.attname) def save(self, force_insert=False, force_update=False, using=None, update_fields=None): """ Save the current instance. Override this in a subclass if you want to control the saving process. The 'force_insert' and 'force_update' parameters can be used to insist that the "save" must be an SQL insert or update (or equivalent for non-SQL backends), respectively. Normally, they should not be set. """ # Ensure that a model instance without a PK hasn't been assigned to # a ForeignKey or OneToOneField on this model. If the field is # nullable, allowing the save() would result in silent data loss. for field in self._meta.concrete_fields: # If the related field isn't cached, then an instance hasn't # been assigned and there's no need to worry about this check. if field.is_relation and field.is_cached(self): obj = getattr(self, field.name, None) if not obj: continue # A pk may have been assigned manually to a model instance not # saved to the database (or auto-generated in a case like # UUIDField), but we allow the save to proceed and rely on the # database to raise an IntegrityError if applicable. If # constraints aren't supported by the database, there's the # unavoidable risk of data corruption. if obj.pk is None: # Remove the object from a related instance cache. if not field.remote_field.multiple: field.remote_field.delete_cached_value(obj) raise ValueError( "save() prohibited to prevent data loss due to " "unsaved related object '%s'." % field.name ) elif getattr(self, field.attname) is None: # Use pk from related object if it has been saved after # an assignment. setattr(self, field.attname, obj.pk) # If the relationship's pk/to_field was changed, clear the # cached relationship. if getattr(obj, field.target_field.attname) != getattr(self, field.attname): field.delete_cached_value(self) using = using or router.db_for_write(self.__class__, instance=self) if force_insert and (force_update or update_fields): raise ValueError("Cannot force both insert and updating in model saving.") deferred_fields = self.get_deferred_fields() if update_fields is not None: # If update_fields is empty, skip the save. We do also check for # no-op saves later on for inheritance cases. This bailout is # still needed for skipping signal sending. if not update_fields: return update_fields = frozenset(update_fields) field_names = set() for field in self._meta.fields: if not field.primary_key: field_names.add(field.name) if field.name != field.attname: field_names.add(field.attname) non_model_fields = update_fields.difference(field_names) if non_model_fields: raise ValueError("The following fields do not exist in this " "model or are m2m fields: %s" % ', '.join(non_model_fields)) # If saving to the same database, and this model is deferred, then # automatically do an "update_fields" save on the loaded fields. elif not force_insert and deferred_fields and using == self._state.db: field_names = set() for field in self._meta.concrete_fields: if not field.primary_key and not hasattr(field, 'through'): field_names.add(field.attname) loaded_fields = field_names.difference(deferred_fields) if loaded_fields: update_fields = frozenset(loaded_fields) self.save_base(using=using, force_insert=force_insert, force_update=force_update, update_fields=update_fields) save.alters_data = True def save_base(self, raw=False, force_insert=False, force_update=False, using=None, update_fields=None): """ Handle the parts of saving which should be done only once per save, yet need to be done in raw saves, too. This includes some sanity checks and signal sending. The 'raw' argument is telling save_base not to save any parent models and not to do any changes to the values before save. This is used by fixture loading. """ using = using or router.db_for_write(self.__class__, instance=self) assert not (force_insert and (force_update or update_fields)) assert update_fields is None or update_fields cls = origin = self.__class__ # Skip proxies, but keep the origin as the proxy model. if cls._meta.proxy: cls = cls._meta.concrete_model meta = cls._meta if not meta.auto_created: pre_save.send( sender=origin, instance=self, raw=raw, using=using, update_fields=update_fields, ) # A transaction isn't needed if one query is issued. if meta.parents: context_manager = transaction.atomic(using=using, savepoint=False) else: context_manager = transaction.mark_for_rollback_on_error(using=using) with context_manager: parent_inserted = False if not raw: parent_inserted = self._save_parents(cls, using, update_fields) updated = self._save_table( raw, cls, force_insert or parent_inserted, force_update, using, update_fields, ) # Store the database on which the object was saved self._state.db = using # Once saved, this is no longer a to-be-added instance. self._state.adding = False # Signal that the save is complete if not meta.auto_created: post_save.send( sender=origin, instance=self, created=(not updated), update_fields=update_fields, raw=raw, using=using, ) save_base.alters_data = True def _save_parents(self, cls, using, update_fields): """Save all the parents of cls using values from self.""" meta = cls._meta inserted = False for parent, field in meta.parents.items(): # Make sure the link fields are synced between parent and self. if (field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None): setattr(self, parent._meta.pk.attname, getattr(self, field.attname)) parent_inserted = self._save_parents(cls=parent, using=using, update_fields=update_fields) updated = self._save_table( cls=parent, using=using, update_fields=update_fields, force_insert=parent_inserted, ) if not updated: inserted = True # Set the parent's PK value to self. if field: setattr(self, field.attname, self._get_pk_val(parent._meta)) # Since we didn't have an instance of the parent handy set # attname directly, bypassing the descriptor. Invalidate # the related object cache, in case it's been accidentally # populated. A fresh instance will be re-built from the # database if necessary. if field.is_cached(self): field.delete_cached_value(self) return inserted def _save_table(self, raw=False, cls=None, force_insert=False, force_update=False, using=None, update_fields=None): """ Do the heavy-lifting involved in saving. Update or insert the data for a single table. """ meta = cls._meta non_pks = [f for f in meta.local_concrete_fields if not f.primary_key] if update_fields: non_pks = [f for f in non_pks if f.name in update_fields or f.attname in update_fields] pk_val = self._get_pk_val(meta) if pk_val is None: pk_val = meta.pk.get_pk_value_on_save(self) setattr(self, meta.pk.attname, pk_val) pk_set = pk_val is not None if not pk_set and (force_update or update_fields): raise ValueError("Cannot force an update in save() with no primary key.") updated = False # Skip an UPDATE when adding an instance and primary key has a default. if ( not raw and not force_insert and self._state.adding and meta.pk.default and meta.pk.default is not NOT_PROVIDED ): force_insert = True # If possible, try an UPDATE. If that doesn't update anything, do an INSERT. if pk_set and not force_insert: base_qs = cls._base_manager.using(using) values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False))) for f in non_pks] forced_update = update_fields or force_update updated = self._do_update(base_qs, using, pk_val, values, update_fields, forced_update) if force_update and not updated: raise DatabaseError("Forced update did not affect any rows.") if update_fields and not updated: raise DatabaseError("Save with update_fields did not affect any rows.") if not updated: if meta.order_with_respect_to: # If this is a model with an order_with_respect_to # autopopulate the _order field field = meta.order_with_respect_to filter_args = field.get_filter_kwargs_for_object(self) self._order = cls._base_manager.using(using).filter(**filter_args).aggregate( _order__max=Coalesce( ExpressionWrapper(Max('_order') + Value(1), output_field=IntegerField()), Value(0), ), )['_order__max'] fields = meta.local_concrete_fields if not pk_set: fields = [f for f in fields if f is not meta.auto_field] returning_fields = meta.db_returning_fields results = self._do_insert(cls._base_manager, using, fields, returning_fields, raw) if results: for value, field in zip(results[0], returning_fields): setattr(self, field.attname, value) return updated def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update): """ Try to update the model. Return True if the model was updated (if an update query was done and a matching row was found in the DB). """ filtered = base_qs.filter(pk=pk_val) if not values: # We can end up here when saving a model in inheritance chain where # update_fields doesn't target any field in current model. In that # case we just say the update succeeded. Another case ending up here # is a model with just PK - in that case check that the PK still # exists. return update_fields is not None or filtered.exists() if self._meta.select_on_save and not forced_update: return ( filtered.exists() and # It may happen that the object is deleted from the DB right after # this check, causing the subsequent UPDATE to return zero matching # rows. The same result can occur in some rare cases when the # database returns zero despite the UPDATE being executed # successfully (a row is matched and updated). In order to # distinguish these two cases, the object's existence in the # database is again checked for if the UPDATE query returns 0. (filtered._update(values) > 0 or filtered.exists()) ) return filtered._update(values) > 0 def _do_insert(self, manager, using, fields, returning_fields, raw): """ Do an INSERT. If returning_fields is defined then this method should return the newly created data for the model. """ return manager._insert( [self], fields=fields, returning_fields=returning_fields, using=using, raw=raw, ) def delete(self, using=None, keep_parents=False): using = using or router.db_for_write(self.__class__, instance=self) assert self.pk is not None, ( "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname) ) collector = Collector(using=using) collector.collect([self], keep_parents=keep_parents) return collector.delete() delete.alters_data = True def _get_FIELD_display(self, field): value = getattr(self, field.attname) choices_dict = dict(make_hashable(field.flatchoices)) # force_str() to coerce lazy strings. return force_str(choices_dict.get(make_hashable(value), value), strings_only=True) def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs): if not self.pk: raise ValueError("get_next/get_previous cannot be used on unsaved objects.") op = 'gt' if is_next else 'lt' order = '' if is_next else '-' param = getattr(self, field.attname) q = Q(**{'%s__%s' % (field.name, op): param}) q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk}) qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by( '%s%s' % (order, field.name), '%spk' % order ) try: return qs[0] except IndexError: raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name) def _get_next_or_previous_in_order(self, is_next): cachename = "__%s_order_cache" % is_next if not hasattr(self, cachename): op = 'gt' if is_next else 'lt' order = '_order' if is_next else '-_order' order_field = self._meta.order_with_respect_to filter_args = order_field.get_filter_kwargs_for_object(self) obj = self.__class__._default_manager.filter(**filter_args).filter(**{ '_order__%s' % op: self.__class__._default_manager.values('_order').filter(**{ self._meta.pk.name: self.pk }) }).order_by(order)[:1].get() setattr(self, cachename, obj) return getattr(self, cachename) def prepare_database_save(self, field): if self.pk is None: raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self) return getattr(self, field.remote_field.get_related_field().attname) def clean(self): """ Hook for doing any extra model-wide validation after clean() has been called on every field by self.clean_fields. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field defined by NON_FIELD_ERRORS. """ pass def validate_unique(self, exclude=None): """ Check unique constraints on the model and raise ValidationError if any failed. """ unique_checks, date_checks = self._get_unique_checks(exclude=exclude) errors = self._perform_unique_checks(unique_checks) date_errors = self._perform_date_checks(date_checks) for k, v in date_errors.items(): errors.setdefault(k, []).extend(v) if errors: raise ValidationError(errors) def _get_unique_checks(self, exclude=None): """ Return a list of checks to perform. Since validate_unique() could be called from a ModelForm, some fields may have been excluded; we can't perform a unique check on a model that is missing fields involved in that check. Fields that did not validate should also be excluded, but they need to be passed in via the exclude argument. """ if exclude is None: exclude = [] unique_checks = [] unique_togethers = [(self.__class__, self._meta.unique_together)] constraints = [(self.__class__, self._meta.total_unique_constraints)] for parent_class in self._meta.get_parent_list(): if parent_class._meta.unique_together: unique_togethers.append((parent_class, parent_class._meta.unique_together)) if parent_class._meta.total_unique_constraints: constraints.append( (parent_class, parent_class._meta.total_unique_constraints) ) for model_class, unique_together in unique_togethers: for check in unique_together: if not any(name in exclude for name in check): # Add the check if the field isn't excluded. unique_checks.append((model_class, tuple(check))) for model_class, model_constraints in constraints: for constraint in model_constraints: if not any(name in exclude for name in constraint.fields): unique_checks.append((model_class, constraint.fields)) # These are checks for the unique_for_<date/year/month>. date_checks = [] # Gather a list of checks for fields declared as unique and add them to # the list of checks. fields_with_class = [(self.__class__, self._meta.local_fields)] for parent_class in self._meta.get_parent_list(): fields_with_class.append((parent_class, parent_class._meta.local_fields)) for model_class, fields in fields_with_class: for f in fields: name = f.name if name in exclude: continue if f.unique: unique_checks.append((model_class, (name,))) if f.unique_for_date and f.unique_for_date not in exclude: date_checks.append((model_class, 'date', name, f.unique_for_date)) if f.unique_for_year and f.unique_for_year not in exclude: date_checks.append((model_class, 'year', name, f.unique_for_year)) if f.unique_for_month and f.unique_for_month not in exclude: date_checks.append((model_class, 'month', name, f.unique_for_month)) return unique_checks, date_checks def _perform_unique_checks(self, unique_checks): errors = {} for model_class, unique_check in unique_checks: # Try to look up an existing object with the same values as this # object's values for all the unique field. lookup_kwargs = {} for field_name in unique_check: f = self._meta.get_field(field_name) lookup_value = getattr(self, f.attname) # TODO: Handle multiple backends with different feature flags. if (lookup_value is None or (lookup_value == '' and connection.features.interprets_empty_strings_as_nulls)): # no value, skip the lookup continue if f.primary_key and not self._state.adding: # no need to check for unique primary key when editing continue lookup_kwargs[str(field_name)] = lookup_value # some fields were skipped, no reason to do the check if len(unique_check) != len(lookup_kwargs): continue qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) # Note that we need to use the pk as defined by model_class, not # self.pk. These can be different fields because model inheritance # allows single model to have effectively multiple primary keys. # Refs #17615. model_class_pk = self._get_pk_val(model_class._meta) if not self._state.adding and model_class_pk is not None: qs = qs.exclude(pk=model_class_pk) if qs.exists(): if len(unique_check) == 1: key = unique_check[0] else: key = NON_FIELD_ERRORS errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check)) return errors def _perform_date_checks(self, date_checks): errors = {} for model_class, lookup_type, field, unique_for in date_checks: lookup_kwargs = {} # there's a ticket to add a date lookup, we can remove this special # case if that makes it's way in date = getattr(self, unique_for) if date is None: continue if lookup_type == 'date': lookup_kwargs['%s__day' % unique_for] = date.day lookup_kwargs['%s__month' % unique_for] = date.month lookup_kwargs['%s__year' % unique_for] = date.year else: lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type) lookup_kwargs[field] = getattr(self, field) qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) if not self._state.adding and self.pk is not None: qs = qs.exclude(pk=self.pk) if qs.exists(): errors.setdefault(field, []).append( self.date_error_message(lookup_type, field, unique_for) ) return errors def date_error_message(self, lookup_type, field_name, unique_for): opts = self._meta field = opts.get_field(field_name) return ValidationError( message=field.error_messages['unique_for_date'], code='unique_for_date', params={ 'model': self, 'model_name': capfirst(opts.verbose_name), 'lookup_type': lookup_type, 'field': field_name, 'field_label': capfirst(field.verbose_name), 'date_field': unique_for, 'date_field_label': capfirst(opts.get_field(unique_for).verbose_name), } ) def unique_error_message(self, model_class, unique_check): opts = model_class._meta params = { 'model': self, 'model_class': model_class, 'model_name': capfirst(opts.verbose_name), 'unique_check': unique_check, } # A unique field if len(unique_check) == 1: field = opts.get_field(unique_check[0]) params['field_label'] = capfirst(field.verbose_name) return ValidationError( message=field.error_messages['unique'], code='unique', params=params, ) # unique_together else: field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check] params['field_labels'] = get_text_list(field_labels, _('and')) return ValidationError( message=_("%(model_name)s with this %(field_labels)s already exists."), code='unique_together', params=params, ) def full_clean(self, exclude=None, validate_unique=True): """ Call clean_fields(), clean(), and validate_unique() on the model. Raise a ValidationError for any errors that occur. """ errors = {} if exclude is None: exclude = [] else: exclude = list(exclude) try: self.clean_fields(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) # Form.clean() is run even if other validation fails, so do the # same with Model.clean() for consistency. try: self.clean() except ValidationError as e: errors = e.update_error_dict(errors) # Run unique checks, but only for fields that passed validation. if validate_unique: for name in errors: if name != NON_FIELD_ERRORS and name not in exclude: exclude.append(name) try: self.validate_unique(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) if errors: raise ValidationError(errors) def clean_fields(self, exclude=None): """ Clean all fields and raise a ValidationError containing a dict of all validation errors if any occur. """ if exclude is None: exclude = [] errors = {} for f in self._meta.fields: if f.name in exclude: continue # Skip validation for empty fields with blank=True. The developer # is responsible for making sure they have a valid value. raw_value = getattr(self, f.attname) if f.blank and raw_value in f.empty_values: continue try: setattr(self, f.attname, f.clean(raw_value, self)) except ValidationError as e: errors[f.name] = e.error_list if errors: raise ValidationError(errors) @classmethod def check(cls, **kwargs): errors = [*cls._check_swappable(), *cls._check_model(), *cls._check_managers(**kwargs)] if not cls._meta.swapped: databases = kwargs.get('databases') or [] errors += [ *cls._check_fields(**kwargs), *cls._check_m2m_through_same_relationship(), *cls._check_long_column_names(databases), ] clash_errors = ( *cls._check_id_field(), *cls._check_field_name_clashes(), *cls._check_model_name_db_lookup_clashes(), *cls._check_property_name_related_field_accessor_clashes(), *cls._check_single_primary_key(), ) errors.extend(clash_errors) # If there are field name clashes, hide consequent column name # clashes. if not clash_errors: errors.extend(cls._check_column_name_clashes()) errors += [ *cls._check_index_together(), *cls._check_unique_together(), *cls._check_indexes(databases), *cls._check_ordering(), *cls._check_constraints(databases), ] return errors @classmethod def _check_swappable(cls): """Check if the swapped model exists.""" errors = [] if cls._meta.swapped: try: apps.get_model(cls._meta.swapped) except ValueError: errors.append( checks.Error( "'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable, id='models.E001', ) ) except LookupError: app_label, model_name = cls._meta.swapped.split('.') errors.append( checks.Error( "'%s' references '%s.%s', which has not been " "installed, or is abstract." % ( cls._meta.swappable, app_label, model_name ), id='models.E002', ) ) return errors @classmethod def _check_model(cls): errors = [] if cls._meta.proxy: if cls._meta.local_fields or cls._meta.local_many_to_many: errors.append( checks.Error( "Proxy model '%s' contains model fields." % cls.__name__, id='models.E017', ) ) return errors @classmethod def _check_managers(cls, **kwargs): """Perform all manager checks.""" errors = [] for manager in cls._meta.managers: errors.extend(manager.check(**kwargs)) return errors @classmethod def _check_fields(cls, **kwargs): """Perform all field checks.""" errors = [] for field in cls._meta.local_fields: errors.extend(field.check(**kwargs)) for field in cls._meta.local_many_to_many: errors.extend(field.check(from_model=cls, **kwargs)) return errors @classmethod def _check_m2m_through_same_relationship(cls): """ Check if no relationship model is used by more than one m2m field. """ errors = [] seen_intermediary_signatures = [] fields = cls._meta.local_many_to_many # Skip when the target model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase)) # Skip when the relationship model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase)) for f in fields: signature = (f.remote_field.model, cls, f.remote_field.through, f.remote_field.through_fields) if signature in seen_intermediary_signatures: errors.append( checks.Error( "The model has two identical many-to-many relations " "through the intermediate model '%s'." % f.remote_field.through._meta.label, obj=cls, id='models.E003', ) ) else: seen_intermediary_signatures.append(signature) return errors @classmethod def _check_id_field(cls): """Check if `id` field is a primary key.""" fields = [f for f in cls._meta.local_fields if f.name == 'id' and f != cls._meta.pk] # fields is empty or consists of the invalid "id" field if fields and not fields[0].primary_key and cls._meta.pk.name == 'id': return [ checks.Error( "'id' can only be used as a field name if the field also " "sets 'primary_key=True'.", obj=cls, id='models.E004', ) ] else: return [] @classmethod def _check_field_name_clashes(cls): """Forbid field shadowing in multi-table inheritance.""" errors = [] used_fields = {} # name or attname -> field # Check that multi-inheritance doesn't cause field name shadowing. for parent in cls._meta.get_parent_list(): for f in parent._meta.local_fields: clash = used_fields.get(f.name) or used_fields.get(f.attname) or None if clash: errors.append( checks.Error( "The field '%s' from parent model " "'%s' clashes with the field '%s' " "from parent model '%s'." % ( clash.name, clash.model._meta, f.name, f.model._meta ), obj=cls, id='models.E005', ) ) used_fields[f.name] = f used_fields[f.attname] = f # Check that fields defined in the model don't clash with fields from # parents, including auto-generated fields like multi-table inheritance # child accessors. for parent in cls._meta.get_parent_list(): for f in parent._meta.get_fields(): if f not in used_fields: used_fields[f.name] = f for f in cls._meta.local_fields: clash = used_fields.get(f.name) or used_fields.get(f.attname) or None # Note that we may detect clash between user-defined non-unique # field "id" and automatically added unique field "id", both # defined at the same model. This special case is considered in # _check_id_field and here we ignore it. id_conflict = f.name == "id" and clash and clash.name == "id" and clash.model == cls if clash and not id_conflict: errors.append( checks.Error( "The field '%s' clashes with the field '%s' " "from model '%s'." % ( f.name, clash.name, clash.model._meta ), obj=f, id='models.E006', ) ) used_fields[f.name] = f used_fields[f.attname] = f return errors @classmethod def _check_column_name_clashes(cls): # Store a list of column names which have already been used by other fields. used_column_names = [] errors = [] for f in cls._meta.local_fields: _, column_name = f.get_attname_column() # Ensure the column name is not already in use. if column_name and column_name in used_column_names: errors.append( checks.Error( "Field '%s' has column name '%s' that is used by " "another field." % (f.name, column_name), hint="Specify a 'db_column' for the field.", obj=cls, id='models.E007' ) ) else: used_column_names.append(column_name) return errors @classmethod def _check_model_name_db_lookup_clashes(cls): errors = [] model_name = cls.__name__ if model_name.startswith('_') or model_name.endswith('_'): errors.append( checks.Error( "The model name '%s' cannot start or end with an underscore " "as it collides with the query lookup syntax." % model_name, obj=cls, id='models.E023' ) ) elif LOOKUP_SEP in model_name: errors.append( checks.Error( "The model name '%s' cannot contain double underscores as " "it collides with the query lookup syntax." % model_name, obj=cls, id='models.E024' ) ) return errors @classmethod def _check_property_name_related_field_accessor_clashes(cls): errors = [] property_names = cls._meta._property_names related_field_accessors = ( f.get_attname() for f in cls._meta._get_fields(reverse=False) if f.is_relation and f.related_model is not None ) for accessor in related_field_accessors: if accessor in property_names: errors.append( checks.Error( "The property '%s' clashes with a related field " "accessor." % accessor, obj=cls, id='models.E025', ) ) return errors @classmethod def _check_single_primary_key(cls): errors = [] if sum(1 for f in cls._meta.local_fields if f.primary_key) > 1: errors.append( checks.Error( "The model cannot have more than one field with " "'primary_key=True'.", obj=cls, id='models.E026', ) ) return errors @classmethod def _check_index_together(cls): """Check the value of "index_together" option.""" if not isinstance(cls._meta.index_together, (tuple, list)): return [ checks.Error( "'index_together' must be a list or tuple.", obj=cls, id='models.E008', ) ] elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.index_together): return [ checks.Error( "All 'index_together' elements must be lists or tuples.", obj=cls, id='models.E009', ) ] else: errors = [] for fields in cls._meta.index_together: errors.extend(cls._check_local_fields(fields, "index_together")) return errors @classmethod def _check_unique_together(cls): """Check the value of "unique_together" option.""" if not isinstance(cls._meta.unique_together, (tuple, list)): return [ checks.Error( "'unique_together' must be a list or tuple.", obj=cls, id='models.E010', ) ] elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.unique_together): return [ checks.Error( "All 'unique_together' elements must be lists or tuples.", obj=cls, id='models.E011', ) ] else: errors = [] for fields in cls._meta.unique_together: errors.extend(cls._check_local_fields(fields, "unique_together")) return errors @classmethod def _check_indexes(cls, databases): """Check fields, names, and conditions of indexes.""" errors = [] for index in cls._meta.indexes: # Index name can't start with an underscore or a number, restricted # for cross-database compatibility with Oracle. if index.name[0] == '_' or index.name[0].isdigit(): errors.append( checks.Error( "The index name '%s' cannot start with an underscore " "or a number." % index.name, obj=cls, id='models.E033', ), ) if len(index.name) > index.max_name_length: errors.append( checks.Error( "The index name '%s' cannot be longer than %d " "characters." % (index.name, index.max_name_length), obj=cls, id='models.E034', ), ) for db in databases: if not router.allow_migrate_model(db, cls): continue connection = connections[db] if not ( connection.features.supports_partial_indexes or 'supports_partial_indexes' in cls._meta.required_db_features ) and any(index.condition is not None for index in cls._meta.indexes): errors.append( checks.Warning( '%s does not support indexes with conditions.' % connection.display_name, hint=( "Conditions will be ignored. Silence this warning " "if you don't care about it." ), obj=cls, id='models.W037', ) ) if not ( connection.features.supports_covering_indexes or 'supports_covering_indexes' in cls._meta.required_db_features ) and any(index.include for index in cls._meta.indexes): errors.append( checks.Warning( '%s does not support indexes with non-key columns.' % connection.display_name, hint=( "Non-key columns will be ignored. Silence this " "warning if you don't care about it." ), obj=cls, id='models.W040', ) ) fields = [field for index in cls._meta.indexes for field, _ in index.fields_orders] fields += [include for index in cls._meta.indexes for include in index.include] errors.extend(cls._check_local_fields(fields, 'indexes')) return errors @classmethod def _check_local_fields(cls, fields, option): from django.db import models # In order to avoid hitting the relation tree prematurely, we use our # own fields_map instead of using get_field() forward_fields_map = {} for field in cls._meta._get_fields(reverse=False): forward_fields_map[field.name] = field if hasattr(field, 'attname'): forward_fields_map[field.attname] = field errors = [] for field_name in fields: try: field = forward_fields_map[field_name] except KeyError: errors.append( checks.Error( "'%s' refers to the nonexistent field '%s'." % ( option, field_name, ), obj=cls, id='models.E012', ) ) else: if isinstance(field.remote_field, models.ManyToManyRel): errors.append( checks.Error( "'%s' refers to a ManyToManyField '%s', but " "ManyToManyFields are not permitted in '%s'." % ( option, field_name, option, ), obj=cls, id='models.E013', ) ) elif field not in cls._meta.local_fields: errors.append( checks.Error( "'%s' refers to field '%s' which is not local to model '%s'." % (option, field_name, cls._meta.object_name), hint="This issue may be caused by multi-table inheritance.", obj=cls, id='models.E016', ) ) return errors @classmethod def _check_ordering(cls): """ Check "ordering" option -- is it a list of strings and do all fields exist? """ if cls._meta._ordering_clash: return [ checks.Error( "'ordering' and 'order_with_respect_to' cannot be used together.", obj=cls, id='models.E021', ), ] if cls._meta.order_with_respect_to or not cls._meta.ordering: return [] if not isinstance(cls._meta.ordering, (list, tuple)): return [ checks.Error( "'ordering' must be a tuple or list (even if you want to order by only one field).", obj=cls, id='models.E014', ) ] errors = [] fields = cls._meta.ordering # Skip expressions and '?' fields. fields = (f for f in fields if isinstance(f, str) and f != '?') # Convert "-field" to "field". fields = ((f[1:] if f.startswith('-') else f) for f in fields) # Separate related fields and non-related fields. _fields = [] related_fields = [] for f in fields: if LOOKUP_SEP in f: related_fields.append(f) else: _fields.append(f) fields = _fields # Check related fields. for field in related_fields: _cls = cls fld = None for part in field.split(LOOKUP_SEP): try: # pk is an alias that won't be found by opts.get_field. if part == 'pk': fld = _cls._meta.pk else: fld = _cls._meta.get_field(part) if fld.is_relation: _cls = fld.get_path_info()[-1].to_opts.model else: _cls = None except (FieldDoesNotExist, AttributeError): if fld is None or ( fld.get_transform(part) is None and fld.get_lookup(part) is None ): errors.append( checks.Error( "'ordering' refers to the nonexistent field, " "related field, or lookup '%s'." % field, obj=cls, id='models.E015', ) ) # Skip ordering on pk. This is always a valid order_by field # but is an alias and therefore won't be found by opts.get_field. fields = {f for f in fields if f != 'pk'} # Check for invalid or nonexistent fields in ordering. invalid_fields = [] # Any field name that is not present in field_names does not exist. # Also, ordering by m2m fields is not allowed. opts = cls._meta valid_fields = set(chain.from_iterable( (f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),) for f in chain(opts.fields, opts.related_objects) )) invalid_fields.extend(fields - valid_fields) for invalid_field in invalid_fields: errors.append( checks.Error( "'ordering' refers to the nonexistent field, related " "field, or lookup '%s'." % invalid_field, obj=cls, id='models.E015', ) ) return errors @classmethod def _check_long_column_names(cls, databases): """ Check that any auto-generated column names are shorter than the limits for each database in which the model will be created. """ if not databases: return [] errors = [] allowed_len = None db_alias = None # Find the minimum max allowed length among all specified db_aliases. for db in databases: # skip databases where the model won't be created if not router.allow_migrate_model(db, cls): continue connection = connections[db] max_name_length = connection.ops.max_name_length() if max_name_length is None or connection.features.truncates_names: continue else: if allowed_len is None: allowed_len = max_name_length db_alias = db elif max_name_length < allowed_len: allowed_len = max_name_length db_alias = db if allowed_len is None: return errors for f in cls._meta.local_fields: _, column_name = f.get_attname_column() # Check if auto-generated name for the field is too long # for the database. if f.db_column is None and column_name is not None and len(column_name) > allowed_len: errors.append( checks.Error( 'Autogenerated column name too long for field "%s". ' 'Maximum length is "%s" for database "%s".' % (column_name, allowed_len, db_alias), hint="Set the column name manually using 'db_column'.", obj=cls, id='models.E018', ) ) for f in cls._meta.local_many_to_many: # Skip nonexistent models. if isinstance(f.remote_field.through, str): continue # Check if auto-generated name for the M2M field is too long # for the database. for m2m in f.remote_field.through._meta.local_fields: _, rel_name = m2m.get_attname_column() if m2m.db_column is None and rel_name is not None and len(rel_name) > allowed_len: errors.append( checks.Error( 'Autogenerated column name too long for M2M field ' '"%s". Maximum length is "%s" for database "%s".' % (rel_name, allowed_len, db_alias), hint=( "Use 'through' to create a separate model for " "M2M and then set column_name using 'db_column'." ), obj=cls, id='models.E019', ) ) return errors @classmethod def _get_expr_references(cls, expr): if isinstance(expr, Q): for child in expr.children: if isinstance(child, tuple): lookup, value = child yield tuple(lookup.split(LOOKUP_SEP)) yield from cls._get_expr_references(value) else: yield from cls._get_expr_references(child) elif isinstance(expr, F): yield tuple(expr.name.split(LOOKUP_SEP)) elif hasattr(expr, 'get_source_expressions'): for src_expr in expr.get_source_expressions(): yield from cls._get_expr_references(src_expr) @classmethod def _check_constraints(cls, databases): errors = [] for db in databases: if not router.allow_migrate_model(db, cls): continue connection = connections[db] if not ( connection.features.supports_table_check_constraints or 'supports_table_check_constraints' in cls._meta.required_db_features ) and any( isinstance(constraint, CheckConstraint) for constraint in cls._meta.constraints ): errors.append( checks.Warning( '%s does not support check constraints.' % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id='models.W027', ) ) if not ( connection.features.supports_partial_indexes or 'supports_partial_indexes' in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.condition is not None for constraint in cls._meta.constraints ): errors.append( checks.Warning( '%s does not support unique constraints with ' 'conditions.' % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id='models.W036', ) ) if not ( connection.features.supports_deferrable_unique_constraints or 'supports_deferrable_unique_constraints' in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.deferrable is not None for constraint in cls._meta.constraints ): errors.append( checks.Warning( '%s does not support deferrable unique constraints.' % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id='models.W038', ) ) if not ( connection.features.supports_covering_indexes or 'supports_covering_indexes' in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.include for constraint in cls._meta.constraints ): errors.append( checks.Warning( '%s does not support unique constraints with non-key ' 'columns.' % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id='models.W039', ) ) fields = set(chain.from_iterable( (*constraint.fields, *constraint.include) for constraint in cls._meta.constraints if isinstance(constraint, UniqueConstraint) )) references = set() for constraint in cls._meta.constraints: if isinstance(constraint, UniqueConstraint): if ( connection.features.supports_partial_indexes or 'supports_partial_indexes' not in cls._meta.required_db_features ) and isinstance(constraint.condition, Q): references.update(cls._get_expr_references(constraint.condition)) elif isinstance(constraint, CheckConstraint): if ( connection.features.supports_table_check_constraints or 'supports_table_check_constraints' not in cls._meta.required_db_features ) and isinstance(constraint.check, Q): references.update(cls._get_expr_references(constraint.check)) for field_name, *lookups in references: # pk is an alias that won't be found by opts.get_field. if field_name != 'pk': fields.add(field_name) if not lookups: # If it has no lookups it cannot result in a JOIN. continue try: if field_name == 'pk': field = cls._meta.pk else: field = cls._meta.get_field(field_name) if not field.is_relation or field.many_to_many or field.one_to_many: continue except FieldDoesNotExist: continue # JOIN must happen at the first lookup. first_lookup = lookups[0] if ( field.get_transform(first_lookup) is None and field.get_lookup(first_lookup) is None ): errors.append( checks.Error( "'constraints' refers to the joined field '%s'." % LOOKUP_SEP.join([field_name] + lookups), obj=cls, id='models.E041', ) ) errors.extend(cls._check_local_fields(fields, 'constraints')) return errors ############################################ # HELPER FUNCTIONS (CURRIED MODEL METHODS) # ############################################ # ORDERING METHODS ######################### def method_set_order(self, ordered_obj, id_list, using=None): if using is None: using = DEFAULT_DB_ALIAS order_wrt = ordered_obj._meta.order_with_respect_to filter_args = order_wrt.get_forward_related_filter(self) ordered_obj.objects.db_manager(using).filter(**filter_args).bulk_update([ ordered_obj(pk=pk, _order=order) for order, pk in enumerate(id_list) ], ['_order']) def method_get_order(self, ordered_obj): order_wrt = ordered_obj._meta.order_with_respect_to filter_args = order_wrt.get_forward_related_filter(self) pk_name = ordered_obj._meta.pk.name return ordered_obj.objects.filter(**filter_args).values_list(pk_name, flat=True) def make_foreign_order_accessors(model, related_model): setattr( related_model, 'get_%s_order' % model.__name__.lower(), partialmethod(method_get_order, model) ) setattr( related_model, 'set_%s_order' % model.__name__.lower(), partialmethod(method_set_order, model) ) ######## # MISC # ######## def model_unpickle(model_id): """Used to unpickle Model subclasses with deferred fields.""" if isinstance(model_id, tuple): model = apps.get_model(*model_id) else: # Backwards compat - the model was cached directly in earlier versions. model = model_id return model.__new__(model) model_unpickle.__safe_for_unpickle__ = True
5e0317a92d048715a2927ad547cf79ad86607dd6b32d5f6035a659d298df8080
import copy import datetime import inspect from decimal import Decimal from django.core.exceptions import EmptyResultSet, FieldError from django.db import NotSupportedError, connection from django.db.models import fields from django.db.models.constants import LOOKUP_SEP from django.db.models.query_utils import Q from django.utils.deconstruct import deconstructible from django.utils.functional import cached_property from django.utils.hashable import make_hashable class SQLiteNumericMixin: """ Some expressions with output_field=DecimalField() must be cast to numeric to be properly filtered. """ def as_sqlite(self, compiler, connection, **extra_context): sql, params = self.as_sql(compiler, connection, **extra_context) try: if self.output_field.get_internal_type() == 'DecimalField': sql = 'CAST(%s AS NUMERIC)' % sql except FieldError: pass return sql, params class Combinable: """ Provide the ability to combine one or two objects with some connector. For example F('foo') + F('bar'). """ # Arithmetic connectors ADD = '+' SUB = '-' MUL = '*' DIV = '/' POW = '^' # The following is a quoted % operator - it is quoted because it can be # used in strings that also have parameter substitution. MOD = '%%' # Bitwise operators - note that these are generated by .bitand() # and .bitor(), the '&' and '|' are reserved for boolean operator # usage. BITAND = '&' BITOR = '|' BITLEFTSHIFT = '<<' BITRIGHTSHIFT = '>>' BITXOR = '#' def _combine(self, other, connector, reversed): if not hasattr(other, 'resolve_expression'): # everything must be resolvable to an expression output_field = ( fields.DurationField() if isinstance(other, datetime.timedelta) else None ) other = Value(other, output_field=output_field) if reversed: return CombinedExpression(other, connector, self) return CombinedExpression(self, connector, other) ############# # OPERATORS # ############# def __neg__(self): return self._combine(-1, self.MUL, False) def __add__(self, other): return self._combine(other, self.ADD, False) def __sub__(self, other): return self._combine(other, self.SUB, False) def __mul__(self, other): return self._combine(other, self.MUL, False) def __truediv__(self, other): return self._combine(other, self.DIV, False) def __mod__(self, other): return self._combine(other, self.MOD, False) def __pow__(self, other): return self._combine(other, self.POW, False) def __and__(self, other): if getattr(self, 'conditional', False) and getattr(other, 'conditional', False): return Q(self) & Q(other) raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) def bitand(self, other): return self._combine(other, self.BITAND, False) def bitleftshift(self, other): return self._combine(other, self.BITLEFTSHIFT, False) def bitrightshift(self, other): return self._combine(other, self.BITRIGHTSHIFT, False) def bitxor(self, other): return self._combine(other, self.BITXOR, False) def __or__(self, other): if getattr(self, 'conditional', False) and getattr(other, 'conditional', False): return Q(self) | Q(other) raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) def bitor(self, other): return self._combine(other, self.BITOR, False) def __radd__(self, other): return self._combine(other, self.ADD, True) def __rsub__(self, other): return self._combine(other, self.SUB, True) def __rmul__(self, other): return self._combine(other, self.MUL, True) def __rtruediv__(self, other): return self._combine(other, self.DIV, True) def __rmod__(self, other): return self._combine(other, self.MOD, True) def __rpow__(self, other): return self._combine(other, self.POW, True) def __rand__(self, other): raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) def __ror__(self, other): raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) @deconstructible class BaseExpression: """Base class for all query expressions.""" # aggregate specific fields is_summary = False _output_field_resolved_to_none = False # Can the expression be used in a WHERE clause? filterable = True # Can the expression can be used as a source expression in Window? window_compatible = False def __init__(self, output_field=None): if output_field is not None: self.output_field = output_field def __getstate__(self): state = self.__dict__.copy() state.pop('convert_value', None) return state def get_db_converters(self, connection): return ( [] if self.convert_value is self._convert_value_noop else [self.convert_value] ) + self.output_field.get_db_converters(connection) def get_source_expressions(self): return [] def set_source_expressions(self, exprs): assert not exprs def _parse_expressions(self, *expressions): return [ arg if hasattr(arg, 'resolve_expression') else ( F(arg) if isinstance(arg, str) else Value(arg) ) for arg in expressions ] def as_sql(self, compiler, connection): """ Responsible for returning a (sql, [params]) tuple to be included in the current query. Different backends can provide their own implementation, by providing an `as_{vendor}` method and patching the Expression: ``` def override_as_sql(self, compiler, connection): # custom logic return super().as_sql(compiler, connection) setattr(Expression, 'as_' + connection.vendor, override_as_sql) ``` Arguments: * compiler: the query compiler responsible for generating the query. Must have a compile method, returning a (sql, [params]) tuple. Calling compiler(value) will return a quoted `value`. * connection: the database connection used for the current query. Return: (sql, params) Where `sql` is a string containing ordered sql parameters to be replaced with the elements of the list `params`. """ raise NotImplementedError("Subclasses must implement as_sql()") @cached_property def contains_aggregate(self): return any(expr and expr.contains_aggregate for expr in self.get_source_expressions()) @cached_property def contains_over_clause(self): return any(expr and expr.contains_over_clause for expr in self.get_source_expressions()) @cached_property def contains_column_references(self): return any(expr and expr.contains_column_references for expr in self.get_source_expressions()) def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): """ Provide the chance to do any preprocessing or validation before being added to the query. Arguments: * query: the backend query implementation * allow_joins: boolean allowing or denying use of joins in this query * reuse: a set of reusable joins for multijoins * summarize: a terminal aggregate clause * for_save: whether this expression about to be used in a save or update Return: an Expression to be added to the query. """ c = self.copy() c.is_summary = summarize c.set_source_expressions([ expr.resolve_expression(query, allow_joins, reuse, summarize) if expr else None for expr in c.get_source_expressions() ]) return c @property def conditional(self): return isinstance(self.output_field, fields.BooleanField) @property def field(self): return self.output_field @cached_property def output_field(self): """Return the output type of this expressions.""" output_field = self._resolve_output_field() if output_field is None: self._output_field_resolved_to_none = True raise FieldError('Cannot resolve expression type, unknown output_field') return output_field @cached_property def _output_field_or_none(self): """ Return the output field of this expression, or None if _resolve_output_field() didn't return an output type. """ try: return self.output_field except FieldError: if not self._output_field_resolved_to_none: raise def _resolve_output_field(self): """ Attempt to infer the output type of the expression. If the output fields of all source fields match then, simply infer the same type here. This isn't always correct, but it makes sense most of the time. Consider the difference between `2 + 2` and `2 / 3`. Inferring the type here is a convenience for the common case. The user should supply their own output_field with more complex computations. If a source's output field resolves to None, exclude it from this check. If all sources are None, then an error is raised higher up the stack in the output_field property. """ sources_iter = (source for source in self.get_source_fields() if source is not None) for output_field in sources_iter: for source in sources_iter: if not isinstance(output_field, source.__class__): raise FieldError( 'Expression contains mixed types: %s, %s. You must ' 'set output_field.' % ( output_field.__class__.__name__, source.__class__.__name__, ) ) return output_field @staticmethod def _convert_value_noop(value, expression, connection): return value @cached_property def convert_value(self): """ Expressions provide their own converters because users have the option of manually specifying the output_field which may be a different type from the one the database returns. """ field = self.output_field internal_type = field.get_internal_type() if internal_type == 'FloatField': return lambda value, expression, connection: None if value is None else float(value) elif internal_type.endswith('IntegerField'): return lambda value, expression, connection: None if value is None else int(value) elif internal_type == 'DecimalField': return lambda value, expression, connection: None if value is None else Decimal(value) return self._convert_value_noop def get_lookup(self, lookup): return self.output_field.get_lookup(lookup) def get_transform(self, name): return self.output_field.get_transform(name) def relabeled_clone(self, change_map): clone = self.copy() clone.set_source_expressions([ e.relabeled_clone(change_map) if e is not None else None for e in self.get_source_expressions() ]) return clone def copy(self): return copy.copy(self) def get_group_by_cols(self, alias=None): if not self.contains_aggregate: return [self] cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def get_source_fields(self): """Return the underlying field types used by this aggregate.""" return [e._output_field_or_none for e in self.get_source_expressions()] def asc(self, **kwargs): return OrderBy(self, **kwargs) def desc(self, **kwargs): return OrderBy(self, descending=True, **kwargs) def reverse_ordering(self): return self def flatten(self): """ Recursively yield this expression and all subexpressions, in depth-first order. """ yield self for expr in self.get_source_expressions(): if expr: yield from expr.flatten() def select_format(self, compiler, sql, params): """ Custom format for select clauses. For example, EXISTS expressions need to be wrapped in CASE WHEN on Oracle. """ if hasattr(self.output_field, 'select_format'): return self.output_field.select_format(compiler, sql, params) return sql, params @cached_property def identity(self): constructor_signature = inspect.signature(self.__init__) args, kwargs = self._constructor_args signature = constructor_signature.bind_partial(*args, **kwargs) signature.apply_defaults() arguments = signature.arguments.items() identity = [self.__class__] for arg, value in arguments: if isinstance(value, fields.Field): if value.name and value.model: value = (value.model._meta.label, value.name) else: value = type(value) else: value = make_hashable(value) identity.append((arg, value)) return tuple(identity) def __eq__(self, other): if not isinstance(other, BaseExpression): return NotImplemented return other.identity == self.identity def __hash__(self): return hash(self.identity) class Expression(BaseExpression, Combinable): """An expression that can be combined with other expressions.""" pass class CombinedExpression(SQLiteNumericMixin, Expression): def __init__(self, lhs, connector, rhs, output_field=None): super().__init__(output_field=output_field) self.connector = connector self.lhs = lhs self.rhs = rhs def __repr__(self): return "<{}: {}>".format(self.__class__.__name__, self) def __str__(self): return "{} {} {}".format(self.lhs, self.connector, self.rhs) def get_source_expressions(self): return [self.lhs, self.rhs] def set_source_expressions(self, exprs): self.lhs, self.rhs = exprs def as_sql(self, compiler, connection): expressions = [] expression_params = [] sql, params = compiler.compile(self.lhs) expressions.append(sql) expression_params.extend(params) sql, params = compiler.compile(self.rhs) expressions.append(sql) expression_params.extend(params) # order of precedence expression_wrapper = '(%s)' sql = connection.ops.combine_expression(self.connector, expressions) return expression_wrapper % sql, expression_params def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): lhs = self.lhs.resolve_expression(query, allow_joins, reuse, summarize, for_save) rhs = self.rhs.resolve_expression(query, allow_joins, reuse, summarize, for_save) if not isinstance(self, (DurationExpression, TemporalSubtraction)): try: lhs_type = lhs.output_field.get_internal_type() except (AttributeError, FieldError): lhs_type = None try: rhs_type = rhs.output_field.get_internal_type() except (AttributeError, FieldError): rhs_type = None if 'DurationField' in {lhs_type, rhs_type} and lhs_type != rhs_type: return DurationExpression(self.lhs, self.connector, self.rhs).resolve_expression( query, allow_joins, reuse, summarize, for_save, ) datetime_fields = {'DateField', 'DateTimeField', 'TimeField'} if self.connector == self.SUB and lhs_type in datetime_fields and lhs_type == rhs_type: return TemporalSubtraction(self.lhs, self.rhs).resolve_expression( query, allow_joins, reuse, summarize, for_save, ) c = self.copy() c.is_summary = summarize c.lhs = lhs c.rhs = rhs return c class DurationExpression(CombinedExpression): def compile(self, side, compiler, connection): try: output = side.output_field except FieldError: pass else: if output.get_internal_type() == 'DurationField': sql, params = compiler.compile(side) return connection.ops.format_for_duration_arithmetic(sql), params return compiler.compile(side) def as_sql(self, compiler, connection): if connection.features.has_native_duration_field: return super().as_sql(compiler, connection) connection.ops.check_expression_support(self) expressions = [] expression_params = [] sql, params = self.compile(self.lhs, compiler, connection) expressions.append(sql) expression_params.extend(params) sql, params = self.compile(self.rhs, compiler, connection) expressions.append(sql) expression_params.extend(params) # order of precedence expression_wrapper = '(%s)' sql = connection.ops.combine_duration_expression(self.connector, expressions) return expression_wrapper % sql, expression_params class TemporalSubtraction(CombinedExpression): output_field = fields.DurationField() def __init__(self, lhs, rhs): super().__init__(lhs, self.SUB, rhs) def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) lhs = compiler.compile(self.lhs) rhs = compiler.compile(self.rhs) return connection.ops.subtract_temporals(self.lhs.output_field.get_internal_type(), lhs, rhs) @deconstructible class F(Combinable): """An object capable of resolving references to existing query objects.""" def __init__(self, name): """ Arguments: * name: the name of the field this expression references """ self.name = name def __repr__(self): return "{}({})".format(self.__class__.__name__, self.name) def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): return query.resolve_ref(self.name, allow_joins, reuse, summarize) def asc(self, **kwargs): return OrderBy(self, **kwargs) def desc(self, **kwargs): return OrderBy(self, descending=True, **kwargs) def __eq__(self, other): return self.__class__ == other.__class__ and self.name == other.name def __hash__(self): return hash(self.name) class ResolvedOuterRef(F): """ An object that contains a reference to an outer query. In this case, the reference to the outer query has been resolved because the inner query has been used as a subquery. """ contains_aggregate = False def as_sql(self, *args, **kwargs): raise ValueError( 'This queryset contains a reference to an outer query and may ' 'only be used in a subquery.' ) def resolve_expression(self, *args, **kwargs): col = super().resolve_expression(*args, **kwargs) # FIXME: Rename possibly_multivalued to multivalued and fix detection # for non-multivalued JOINs (e.g. foreign key fields). This should take # into account only many-to-many and one-to-many relationships. col.possibly_multivalued = LOOKUP_SEP in self.name return col def relabeled_clone(self, relabels): return self def get_group_by_cols(self, alias=None): return [] class OuterRef(F): contains_aggregate = False def resolve_expression(self, *args, **kwargs): if isinstance(self.name, self.__class__): return self.name return ResolvedOuterRef(self.name) def relabeled_clone(self, relabels): return self class Func(SQLiteNumericMixin, Expression): """An SQL function call.""" function = None template = '%(function)s(%(expressions)s)' arg_joiner = ', ' arity = None # The number of arguments the function accepts. def __init__(self, *expressions, output_field=None, **extra): if self.arity is not None and len(expressions) != self.arity: raise TypeError( "'%s' takes exactly %s %s (%s given)" % ( self.__class__.__name__, self.arity, "argument" if self.arity == 1 else "arguments", len(expressions), ) ) super().__init__(output_field=output_field) self.source_expressions = self._parse_expressions(*expressions) self.extra = extra def __repr__(self): args = self.arg_joiner.join(str(arg) for arg in self.source_expressions) extra = {**self.extra, **self._get_repr_options()} if extra: extra = ', '.join(str(key) + '=' + str(val) for key, val in sorted(extra.items())) return "{}({}, {})".format(self.__class__.__name__, args, extra) return "{}({})".format(self.__class__.__name__, args) def _get_repr_options(self): """Return a dict of extra __init__() options to include in the repr.""" return {} def get_source_expressions(self): return self.source_expressions def set_source_expressions(self, exprs): self.source_expressions = exprs def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize for pos, arg in enumerate(c.source_expressions): c.source_expressions[pos] = arg.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c def as_sql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context): connection.ops.check_expression_support(self) sql_parts = [] params = [] for arg in self.source_expressions: arg_sql, arg_params = compiler.compile(arg) sql_parts.append(arg_sql) params.extend(arg_params) data = {**self.extra, **extra_context} # Use the first supplied value in this order: the parameter to this # method, a value supplied in __init__()'s **extra (the value in # `data`), or the value defined on the class. if function is not None: data['function'] = function else: data.setdefault('function', self.function) template = template or data.get('template', self.template) arg_joiner = arg_joiner or data.get('arg_joiner', self.arg_joiner) data['expressions'] = data['field'] = arg_joiner.join(sql_parts) return template % data, params def copy(self): copy = super().copy() copy.source_expressions = self.source_expressions[:] copy.extra = self.extra.copy() return copy class Value(Expression): """Represent a wrapped value as a node within an expression.""" def __init__(self, value, output_field=None): """ Arguments: * value: the value this expression represents. The value will be added into the sql parameter list and properly quoted. * output_field: an instance of the model field type that this expression will return, such as IntegerField() or CharField(). """ super().__init__(output_field=output_field) self.value = value def __repr__(self): return "{}({})".format(self.__class__.__name__, self.value) def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) val = self.value output_field = self._output_field_or_none if output_field is not None: if self.for_save: val = output_field.get_db_prep_save(val, connection=connection) else: val = output_field.get_db_prep_value(val, connection=connection) if hasattr(output_field, 'get_placeholder'): return output_field.get_placeholder(val, compiler, connection), [val] if val is None: # cx_Oracle does not always convert None to the appropriate # NULL type (like in case expressions using numbers), so we # use a literal SQL NULL return 'NULL', [] return '%s', [val] def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) c.for_save = for_save return c def get_group_by_cols(self, alias=None): return [] class RawSQL(Expression): def __init__(self, sql, params, output_field=None): if output_field is None: output_field = fields.Field() self.sql, self.params = sql, params super().__init__(output_field=output_field) def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params) def as_sql(self, compiler, connection): return '(%s)' % self.sql, self.params def get_group_by_cols(self, alias=None): return [self] def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): # Resolve parents fields used in raw SQL. for parent in query.model._meta.get_parent_list(): for parent_field in parent._meta.local_fields: _, column_name = parent_field.get_attname_column() if column_name.lower() in self.sql.lower(): query.resolve_ref(parent_field.name, allow_joins, reuse, summarize) break return super().resolve_expression(query, allow_joins, reuse, summarize, for_save) class Star(Expression): def __repr__(self): return "'*'" def as_sql(self, compiler, connection): return '*', [] class Random(Expression): output_field = fields.FloatField() def __repr__(self): return "Random()" def as_sql(self, compiler, connection): return connection.ops.random_function_sql(), [] class Col(Expression): contains_column_references = True possibly_multivalued = False def __init__(self, alias, target, output_field=None): if output_field is None: output_field = target super().__init__(output_field=output_field) self.alias, self.target = alias, target def __repr__(self): alias, target = self.alias, self.target identifiers = (alias, str(target)) if alias else (str(target),) return '{}({})'.format(self.__class__.__name__, ', '.join(identifiers)) def as_sql(self, compiler, connection): alias, column = self.alias, self.target.column identifiers = (alias, column) if alias else (column,) sql = '.'.join(map(compiler.quote_name_unless_alias, identifiers)) return sql, [] def relabeled_clone(self, relabels): if self.alias is None: return self return self.__class__(relabels.get(self.alias, self.alias), self.target, self.output_field) def get_group_by_cols(self, alias=None): return [self] def get_db_converters(self, connection): if self.target == self.output_field: return self.output_field.get_db_converters(connection) return (self.output_field.get_db_converters(connection) + self.target.get_db_converters(connection)) class Ref(Expression): """ Reference to column alias of the query. For example, Ref('sum_cost') in qs.annotate(sum_cost=Sum('cost')) query. """ def __init__(self, refs, source): super().__init__() self.refs, self.source = refs, source def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source) def get_source_expressions(self): return [self.source] def set_source_expressions(self, exprs): self.source, = exprs def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): # The sub-expression `source` has already been resolved, as this is # just a reference to the name of `source`. return self def relabeled_clone(self, relabels): return self def as_sql(self, compiler, connection): return connection.ops.quote_name(self.refs), [] def get_group_by_cols(self, alias=None): return [self] class ExpressionList(Func): """ An expression containing multiple expressions. Can be used to provide a list of expressions as an argument to another expression, like an ordering clause. """ template = '%(expressions)s' def __init__(self, *expressions, **extra): if not expressions: raise ValueError('%s requires at least one expression.' % self.__class__.__name__) super().__init__(*expressions, **extra) def __str__(self): return self.arg_joiner.join(str(arg) for arg in self.source_expressions) class ExpressionWrapper(Expression): """ An expression that can wrap another expression so that it can provide extra context to the inner expression, such as the output_field. """ def __init__(self, expression, output_field): super().__init__(output_field=output_field) if getattr(expression, '_output_field_or_none', True) is None: expression = expression.copy() expression.output_field = output_field self.expression = expression def set_source_expressions(self, exprs): self.expression = exprs[0] def get_source_expressions(self): return [self.expression] def get_group_by_cols(self, alias=None): return self.expression.get_group_by_cols(alias=alias) def as_sql(self, compiler, connection): return self.expression.as_sql(compiler, connection) def __repr__(self): return "{}({})".format(self.__class__.__name__, self.expression) class When(Expression): template = 'WHEN %(condition)s THEN %(result)s' # This isn't a complete conditional expression, must be used in Case(). conditional = False def __init__(self, condition=None, then=None, **lookups): if lookups: if condition is None: condition, lookups = Q(**lookups), None elif getattr(condition, 'conditional', False): condition, lookups = Q(condition, **lookups), None if condition is None or not getattr(condition, 'conditional', False) or lookups: raise TypeError( 'When() supports a Q object, a boolean expression, or lookups ' 'as a condition.' ) if isinstance(condition, Q) and not condition: raise ValueError("An empty Q() can't be used as a When() condition.") super().__init__(output_field=None) self.condition = condition self.result = self._parse_expressions(then)[0] def __str__(self): return "WHEN %r THEN %r" % (self.condition, self.result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_source_expressions(self): return [self.condition, self.result] def set_source_expressions(self, exprs): self.condition, self.result = exprs def get_source_fields(self): # We're only interested in the fields of the result expressions. return [self.result._output_field_or_none] def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize if hasattr(c.condition, 'resolve_expression'): c.condition = c.condition.resolve_expression(query, allow_joins, reuse, summarize, False) c.result = c.result.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c def as_sql(self, compiler, connection, template=None, **extra_context): connection.ops.check_expression_support(self) template_params = extra_context sql_params = [] condition_sql, condition_params = compiler.compile(self.condition) template_params['condition'] = condition_sql sql_params.extend(condition_params) result_sql, result_params = compiler.compile(self.result) template_params['result'] = result_sql sql_params.extend(result_params) template = template or self.template return template % template_params, sql_params def get_group_by_cols(self, alias=None): # This is not a complete expression and cannot be used in GROUP BY. cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols class Case(Expression): """ An SQL searched CASE expression: CASE WHEN n > 0 THEN 'positive' WHEN n < 0 THEN 'negative' ELSE 'zero' END """ template = 'CASE %(cases)s ELSE %(default)s END' case_joiner = ' ' def __init__(self, *cases, default=None, output_field=None, **extra): if not all(isinstance(case, When) for case in cases): raise TypeError("Positional arguments must all be When objects.") super().__init__(output_field) self.cases = list(cases) self.default = self._parse_expressions(default)[0] self.extra = extra def __str__(self): return "CASE %s, ELSE %r" % (', '.join(str(c) for c in self.cases), self.default) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_source_expressions(self): return self.cases + [self.default] def set_source_expressions(self, exprs): *self.cases, self.default = exprs def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize for pos, case in enumerate(c.cases): c.cases[pos] = case.resolve_expression(query, allow_joins, reuse, summarize, for_save) c.default = c.default.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c def copy(self): c = super().copy() c.cases = c.cases[:] return c def as_sql(self, compiler, connection, template=None, case_joiner=None, **extra_context): connection.ops.check_expression_support(self) if not self.cases: return compiler.compile(self.default) template_params = {**self.extra, **extra_context} case_parts = [] sql_params = [] for case in self.cases: try: case_sql, case_params = compiler.compile(case) except EmptyResultSet: continue case_parts.append(case_sql) sql_params.extend(case_params) default_sql, default_params = compiler.compile(self.default) if not case_parts: return default_sql, default_params case_joiner = case_joiner or self.case_joiner template_params['cases'] = case_joiner.join(case_parts) template_params['default'] = default_sql sql_params.extend(default_params) template = template or template_params.get('template', self.template) sql = template % template_params if self._output_field_or_none is not None: sql = connection.ops.unification_cast_sql(self.output_field) % sql return sql, sql_params class Subquery(Expression): """ An explicit subquery. It may contain OuterRef() references to the outer query which will be resolved when it is applied to that query. """ template = '(%(subquery)s)' contains_aggregate = False def __init__(self, queryset, output_field=None, **extra): self.query = queryset.query self.extra = extra # Prevent the QuerySet from being evaluated. self.queryset = queryset._chain(_result_cache=[], prefetch_done=True) super().__init__(output_field) def __getstate__(self): state = super().__getstate__() args, kwargs = state['_constructor_args'] if args: args = (self.queryset, *args[1:]) else: kwargs['queryset'] = self.queryset state['_constructor_args'] = args, kwargs return state def get_source_expressions(self): return [self.query] def set_source_expressions(self, exprs): self.query = exprs[0] def _resolve_output_field(self): return self.query.output_field def copy(self): clone = super().copy() clone.query = clone.query.clone() return clone @property def external_aliases(self): return self.query.external_aliases def as_sql(self, compiler, connection, template=None, **extra_context): connection.ops.check_expression_support(self) template_params = {**self.extra, **extra_context} subquery_sql, sql_params = self.query.as_sql(compiler, connection) template_params['subquery'] = subquery_sql[1:-1] template = template or template_params.get('template', self.template) sql = template % template_params return sql, sql_params def get_group_by_cols(self, alias=None): if alias: return [Ref(alias, self)] external_cols = self.query.get_external_cols() if any(col.possibly_multivalued for col in external_cols): return [self] return external_cols class Exists(Subquery): template = 'EXISTS(%(subquery)s)' output_field = fields.BooleanField() def __init__(self, queryset, negated=False, **kwargs): # As a performance optimization, remove ordering since EXISTS doesn't # care about it, just whether or not a row matches. queryset = queryset.order_by() self.negated = negated super().__init__(queryset, **kwargs) def __invert__(self): clone = self.copy() clone.negated = not self.negated return clone def as_sql(self, compiler, connection, template=None, **extra_context): sql, params = super().as_sql(compiler, connection, template, **extra_context) if self.negated: sql = 'NOT {}'.format(sql) return sql, params def select_format(self, compiler, sql, params): # Wrap EXISTS() with a CASE WHEN expression if a database backend # (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP # BY list. if not compiler.connection.features.supports_boolean_expr_in_select_clause: sql = 'CASE WHEN {} THEN 1 ELSE 0 END'.format(sql) return sql, params class OrderBy(BaseExpression): template = '%(expression)s %(ordering)s' conditional = False def __init__(self, expression, descending=False, nulls_first=False, nulls_last=False): if nulls_first and nulls_last: raise ValueError('nulls_first and nulls_last are mutually exclusive') self.nulls_first = nulls_first self.nulls_last = nulls_last self.descending = descending if not hasattr(expression, 'resolve_expression'): raise ValueError('expression must be an expression type') self.expression = expression def __repr__(self): return "{}({}, descending={})".format( self.__class__.__name__, self.expression, self.descending) def set_source_expressions(self, exprs): self.expression = exprs[0] def get_source_expressions(self): return [self.expression] def as_sql(self, compiler, connection, template=None, **extra_context): template = template or self.template if connection.features.supports_order_by_nulls_modifier: if self.nulls_last: template = '%s NULLS LAST' % template elif self.nulls_first: template = '%s NULLS FIRST' % template else: if self.nulls_last and not ( self.descending and connection.features.order_by_nulls_first ): template = '%%(expression)s IS NULL, %s' % template elif self.nulls_first and not ( not self.descending and connection.features.order_by_nulls_first ): template = '%%(expression)s IS NOT NULL, %s' % template connection.ops.check_expression_support(self) expression_sql, params = compiler.compile(self.expression) placeholders = { 'expression': expression_sql, 'ordering': 'DESC' if self.descending else 'ASC', **extra_context, } template = template or self.template params *= template.count('%(expression)s') return (template % placeholders).rstrip(), params def as_oracle(self, compiler, connection): # Oracle doesn't allow ORDER BY EXISTS() unless it's wrapped in # a CASE WHEN. if isinstance(self.expression, Exists): copy = self.copy() copy.expression = Case( When(self.expression, then=True), default=False, output_field=fields.BooleanField(), ) return copy.as_sql(compiler, connection) return self.as_sql(compiler, connection) def get_group_by_cols(self, alias=None): cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def reverse_ordering(self): self.descending = not self.descending if self.nulls_first or self.nulls_last: self.nulls_first = not self.nulls_first self.nulls_last = not self.nulls_last return self def asc(self): self.descending = False def desc(self): self.descending = True class Window(Expression): template = '%(expression)s OVER (%(window)s)' # Although the main expression may either be an aggregate or an # expression with an aggregate function, the GROUP BY that will # be introduced in the query as a result is not desired. contains_aggregate = False contains_over_clause = True filterable = False def __init__(self, expression, partition_by=None, order_by=None, frame=None, output_field=None): self.partition_by = partition_by self.order_by = order_by self.frame = frame if not getattr(expression, 'window_compatible', False): raise ValueError( "Expression '%s' isn't compatible with OVER clauses." % expression.__class__.__name__ ) if self.partition_by is not None: if not isinstance(self.partition_by, (tuple, list)): self.partition_by = (self.partition_by,) self.partition_by = ExpressionList(*self.partition_by) if self.order_by is not None: if isinstance(self.order_by, (list, tuple)): self.order_by = ExpressionList(*self.order_by) elif not isinstance(self.order_by, BaseExpression): raise ValueError( 'order_by must be either an Expression or a sequence of ' 'expressions.' ) super().__init__(output_field=output_field) self.source_expression = self._parse_expressions(expression)[0] def _resolve_output_field(self): return self.source_expression.output_field def get_source_expressions(self): return [self.source_expression, self.partition_by, self.order_by, self.frame] def set_source_expressions(self, exprs): self.source_expression, self.partition_by, self.order_by, self.frame = exprs def as_sql(self, compiler, connection, template=None): connection.ops.check_expression_support(self) if not connection.features.supports_over_clause: raise NotSupportedError('This backend does not support window expressions.') expr_sql, params = compiler.compile(self.source_expression) window_sql, window_params = [], [] if self.partition_by is not None: sql_expr, sql_params = self.partition_by.as_sql( compiler=compiler, connection=connection, template='PARTITION BY %(expressions)s', ) window_sql.extend(sql_expr) window_params.extend(sql_params) if self.order_by is not None: window_sql.append(' ORDER BY ') order_sql, order_params = compiler.compile(self.order_by) window_sql.extend(order_sql) window_params.extend(order_params) if self.frame: frame_sql, frame_params = compiler.compile(self.frame) window_sql.append(' ' + frame_sql) window_params.extend(frame_params) params.extend(window_params) template = template or self.template return template % { 'expression': expr_sql, 'window': ''.join(window_sql).strip() }, params def __str__(self): return '{} OVER ({}{}{})'.format( str(self.source_expression), 'PARTITION BY ' + str(self.partition_by) if self.partition_by else '', 'ORDER BY ' + str(self.order_by) if self.order_by else '', str(self.frame or ''), ) def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self) def get_group_by_cols(self, alias=None): return [] class WindowFrame(Expression): """ Model the frame clause in window expressions. There are two types of frame clauses which are subclasses, however, all processing and validation (by no means intended to be complete) is done here. Thus, providing an end for a frame is optional (the default is UNBOUNDED FOLLOWING, which is the last row in the frame). """ template = '%(frame_type)s BETWEEN %(start)s AND %(end)s' def __init__(self, start=None, end=None): self.start = Value(start) self.end = Value(end) def set_source_expressions(self, exprs): self.start, self.end = exprs def get_source_expressions(self): return [self.start, self.end] def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) start, end = self.window_frame_start_end(connection, self.start.value, self.end.value) return self.template % { 'frame_type': self.frame_type, 'start': start, 'end': end, }, [] def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self) def get_group_by_cols(self, alias=None): return [] def __str__(self): if self.start.value is not None and self.start.value < 0: start = '%d %s' % (abs(self.start.value), connection.ops.PRECEDING) elif self.start.value is not None and self.start.value == 0: start = connection.ops.CURRENT_ROW else: start = connection.ops.UNBOUNDED_PRECEDING if self.end.value is not None and self.end.value > 0: end = '%d %s' % (self.end.value, connection.ops.FOLLOWING) elif self.end.value is not None and self.end.value == 0: end = connection.ops.CURRENT_ROW else: end = connection.ops.UNBOUNDED_FOLLOWING return self.template % { 'frame_type': self.frame_type, 'start': start, 'end': end, } def window_frame_start_end(self, connection, start, end): raise NotImplementedError('Subclasses must implement window_frame_start_end().') class RowRange(WindowFrame): frame_type = 'ROWS' def window_frame_start_end(self, connection, start, end): return connection.ops.window_frame_rows_start_end(start, end) class ValueRange(WindowFrame): frame_type = 'RANGE' def window_frame_start_end(self, connection, start, end): return connection.ops.window_frame_range_start_end(start, end)
359496c7337b6dc1df7d21965cadfcbe133bb4e2194131620f08e2f668ea0e66
from django.db import NotSupportedError from django.db.models.expressions import Func, Value from django.db.models.fields import CharField, IntegerField from django.db.models.functions import Coalesce from django.db.models.lookups import Transform class MySQLSHA2Mixin: def as_mysql(self, compiler, connection, **extra_content): return super().as_sql( compiler, connection, template='SHA2(%%(expressions)s, %s)' % self.function[3:], **extra_content, ) class OracleHashMixin: def as_oracle(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, template=( "LOWER(RAWTOHEX(STANDARD_HASH(UTL_I18N.STRING_TO_RAW(" "%(expressions)s, 'AL32UTF8'), '%(function)s')))" ), **extra_context, ) class PostgreSQLSHAMixin: def as_postgresql(self, compiler, connection, **extra_content): return super().as_sql( compiler, connection, template="ENCODE(DIGEST(%(expressions)s, '%(function)s'), 'hex')", function=self.function.lower(), **extra_content, ) class Chr(Transform): function = 'CHR' lookup_name = 'chr' def as_mysql(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, function='CHAR', template='%(function)s(%(expressions)s USING utf16)', **extra_context ) def as_oracle(self, compiler, connection, **extra_context): return super().as_sql( compiler, connection, template='%(function)s(%(expressions)s USING NCHAR_CS)', **extra_context ) def as_sqlite(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function='CHAR', **extra_context) class ConcatPair(Func): """ Concatenate two arguments together. This is used by `Concat` because not all backend databases support more than two arguments. """ function = 'CONCAT' def as_sqlite(self, compiler, connection, **extra_context): coalesced = self.coalesce() return super(ConcatPair, coalesced).as_sql( compiler, connection, template='%(expressions)s', arg_joiner=' || ', **extra_context ) def as_mysql(self, compiler, connection, **extra_context): # Use CONCAT_WS with an empty separator so that NULLs are ignored. return super().as_sql( compiler, connection, function='CONCAT_WS', template="%(function)s('', %(expressions)s)", **extra_context ) def coalesce(self): # null on either side results in null for expression, wrap with coalesce c = self.copy() c.set_source_expressions([ Coalesce(expression, Value('')) for expression in c.get_source_expressions() ]) return c class Concat(Func): """ Concatenate text fields together. Backends that result in an entire null expression when any arguments are null will wrap each argument in coalesce functions to ensure a non-null result. """ function = None template = "%(expressions)s" def __init__(self, *expressions, **extra): if len(expressions) < 2: raise ValueError('Concat must take at least two expressions') paired = self._paired(expressions) super().__init__(paired, **extra) def _paired(self, expressions): # wrap pairs of expressions in successive concat functions # exp = [a, b, c, d] # -> ConcatPair(a, ConcatPair(b, ConcatPair(c, d)))) if len(expressions) == 2: return ConcatPair(*expressions) return ConcatPair(expressions[0], self._paired(expressions[1:])) class Left(Func): function = 'LEFT' arity = 2 output_field = CharField() def __init__(self, expression, length, **extra): """ expression: the name of a field, or an expression returning a string length: the number of characters to return from the start of the string """ if not hasattr(length, 'resolve_expression'): if length < 1: raise ValueError("'length' must be greater than 0.") super().__init__(expression, length, **extra) def get_substr(self): return Substr(self.source_expressions[0], Value(1), self.source_expressions[1]) def as_oracle(self, compiler, connection, **extra_context): return self.get_substr().as_oracle(compiler, connection, **extra_context) def as_sqlite(self, compiler, connection, **extra_context): return self.get_substr().as_sqlite(compiler, connection, **extra_context) class Length(Transform): """Return the number of characters in the expression.""" function = 'LENGTH' lookup_name = 'length' output_field = IntegerField() def as_mysql(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function='CHAR_LENGTH', **extra_context) class Lower(Transform): function = 'LOWER' lookup_name = 'lower' class LPad(Func): function = 'LPAD' output_field = CharField() def __init__(self, expression, length, fill_text=Value(' '), **extra): if not hasattr(length, 'resolve_expression') and length is not None and length < 0: raise ValueError("'length' must be greater or equal to 0.") super().__init__(expression, length, fill_text, **extra) class LTrim(Transform): function = 'LTRIM' lookup_name = 'ltrim' class MD5(OracleHashMixin, Transform): function = 'MD5' lookup_name = 'md5' class Ord(Transform): function = 'ASCII' lookup_name = 'ord' output_field = IntegerField() def as_mysql(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function='ORD', **extra_context) def as_sqlite(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function='UNICODE', **extra_context) class Repeat(Func): function = 'REPEAT' output_field = CharField() def __init__(self, expression, number, **extra): if not hasattr(number, 'resolve_expression') and number is not None and number < 0: raise ValueError("'number' must be greater or equal to 0.") super().__init__(expression, number, **extra) def as_oracle(self, compiler, connection, **extra_context): expression, number = self.source_expressions length = None if number is None else Length(expression) * number rpad = RPad(expression, length, expression) return rpad.as_sql(compiler, connection, **extra_context) class Replace(Func): function = 'REPLACE' def __init__(self, expression, text, replacement=Value(''), **extra): super().__init__(expression, text, replacement, **extra) class Reverse(Transform): function = 'REVERSE' lookup_name = 'reverse' def as_oracle(self, compiler, connection, **extra_context): # REVERSE in Oracle is undocumented and doesn't support multi-byte # strings. Use a special subquery instead. return super().as_sql( compiler, connection, template=( '(SELECT LISTAGG(s) WITHIN GROUP (ORDER BY n DESC) FROM ' '(SELECT LEVEL n, SUBSTR(%(expressions)s, LEVEL, 1) s ' 'FROM DUAL CONNECT BY LEVEL <= LENGTH(%(expressions)s)) ' 'GROUP BY %(expressions)s)' ), **extra_context ) class Right(Left): function = 'RIGHT' def get_substr(self): return Substr(self.source_expressions[0], self.source_expressions[1] * Value(-1)) class RPad(LPad): function = 'RPAD' class RTrim(Transform): function = 'RTRIM' lookup_name = 'rtrim' class SHA1(OracleHashMixin, PostgreSQLSHAMixin, Transform): function = 'SHA1' lookup_name = 'sha1' class SHA224(MySQLSHA2Mixin, PostgreSQLSHAMixin, Transform): function = 'SHA224' lookup_name = 'sha224' def as_oracle(self, compiler, connection, **extra_context): raise NotSupportedError('SHA224 is not supported on Oracle.') class SHA256(MySQLSHA2Mixin, OracleHashMixin, PostgreSQLSHAMixin, Transform): function = 'SHA256' lookup_name = 'sha256' class SHA384(MySQLSHA2Mixin, OracleHashMixin, PostgreSQLSHAMixin, Transform): function = 'SHA384' lookup_name = 'sha384' class SHA512(MySQLSHA2Mixin, OracleHashMixin, PostgreSQLSHAMixin, Transform): function = 'SHA512' lookup_name = 'sha512' class StrIndex(Func): """ Return a positive integer corresponding to the 1-indexed position of the first occurrence of a substring inside another string, or 0 if the substring is not found. """ function = 'INSTR' arity = 2 output_field = IntegerField() def as_postgresql(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function='STRPOS', **extra_context) class Substr(Func): function = 'SUBSTRING' output_field = CharField() def __init__(self, expression, pos, length=None, **extra): """ expression: the name of a field, or an expression returning a string pos: an integer > 0, or an expression returning an integer length: an optional number of characters to return """ if not hasattr(pos, 'resolve_expression'): if pos < 1: raise ValueError("'pos' must be greater than 0") expressions = [expression, pos] if length is not None: expressions.append(length) super().__init__(*expressions, **extra) def as_sqlite(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function='SUBSTR', **extra_context) def as_oracle(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function='SUBSTR', **extra_context) class Trim(Transform): function = 'TRIM' lookup_name = 'trim' class Upper(Transform): function = 'UPPER' lookup_name = 'upper'
f0dd780ae7020fdeffb4b2be417e49da39bd0f8d12b0007cfc9457db0800facb
""" Create SQL statements for QuerySets. The code in here encapsulates all of the SQL construction so that QuerySets themselves do not have to (and could be backed by things other than SQL databases). The abstraction barrier only works one way: this module has to know all about the internals of models in order to get the information it needs. """ import copy import difflib import functools import inspect import sys import warnings from collections import Counter, namedtuple from collections.abc import Iterator, Mapping from itertools import chain, count, product from string import ascii_uppercase from django.core.exceptions import ( EmptyResultSet, FieldDoesNotExist, FieldError, ) from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections from django.db.models.aggregates import Count from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import BaseExpression, Col, F, OuterRef, Ref from django.db.models.fields import Field from django.db.models.fields.related_lookups import MultiColSource from django.db.models.lookups import Lookup from django.db.models.query_utils import ( Q, check_rel_lookup_compatibility, refs_expression, ) from django.db.models.sql.constants import INNER, LOUTER, ORDER_DIR, SINGLE from django.db.models.sql.datastructures import ( BaseTable, Empty, Join, MultiJoin, ) from django.db.models.sql.where import ( AND, OR, ExtraWhere, NothingNode, WhereNode, ) from django.utils.deprecation import RemovedInDjango40Warning from django.utils.functional import cached_property from django.utils.tree import Node __all__ = ['Query', 'RawQuery'] def get_field_names_from_opts(opts): return set(chain.from_iterable( (f.name, f.attname) if f.concrete else (f.name,) for f in opts.get_fields() )) def get_children_from_q(q): for child in q.children: if isinstance(child, Node): yield from get_children_from_q(child) else: yield child JoinInfo = namedtuple( 'JoinInfo', ('final_field', 'targets', 'opts', 'joins', 'path', 'transform_function') ) class RawQuery: """A single raw SQL query.""" def __init__(self, sql, using, params=None): self.params = params or () self.sql = sql self.using = using self.cursor = None # Mirror some properties of a normal query so that # the compiler can be used to process results. self.low_mark, self.high_mark = 0, None # Used for offset/limit self.extra_select = {} self.annotation_select = {} def chain(self, using): return self.clone(using) def clone(self, using): return RawQuery(self.sql, using, params=self.params) def get_columns(self): if self.cursor is None: self._execute_query() converter = connections[self.using].introspection.identifier_converter return [converter(column_meta[0]) for column_meta in self.cursor.description] def __iter__(self): # Always execute a new query for a new iterator. # This could be optimized with a cache at the expense of RAM. self._execute_query() if not connections[self.using].features.can_use_chunked_reads: # If the database can't use chunked reads we need to make sure we # evaluate the entire query up front. result = list(self.cursor) else: result = self.cursor return iter(result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) @property def params_type(self): return dict if isinstance(self.params, Mapping) else tuple def __str__(self): return self.sql % self.params_type(self.params) def _execute_query(self): connection = connections[self.using] # Adapt parameters to the database, as much as possible considering # that the target type isn't known. See #17755. params_type = self.params_type adapter = connection.ops.adapt_unknown_value if params_type is tuple: params = tuple(adapter(val) for val in self.params) elif params_type is dict: params = {key: adapter(val) for key, val in self.params.items()} else: raise RuntimeError("Unexpected params type: %s" % params_type) self.cursor = connection.cursor() self.cursor.execute(self.sql, params) class Query(BaseExpression): """A single SQL query.""" alias_prefix = 'T' subq_aliases = frozenset([alias_prefix]) compiler = 'SQLCompiler' def __init__(self, model, where=WhereNode, alias_cols=True): self.model = model self.alias_refcount = {} # alias_map is the most important data structure regarding joins. # It's used for recording which joins exist in the query and what # types they are. The key is the alias of the joined table (possibly # the table name) and the value is a Join-like object (see # sql.datastructures.Join for more information). self.alias_map = {} # Whether to provide alias to columns during reference resolving. self.alias_cols = alias_cols # Sometimes the query contains references to aliases in outer queries (as # a result of split_exclude). Correct alias quoting needs to know these # aliases too. # Map external tables to whether they are aliased. self.external_aliases = {} self.table_map = {} # Maps table names to list of aliases. self.default_cols = True self.default_ordering = True self.standard_ordering = True self.used_aliases = set() self.filter_is_sticky = False self.subquery = False # SQL-related attributes # Select and related select clauses are expressions to use in the # SELECT clause of the query. # The select is used for cases where we want to set up the select # clause to contain other than default fields (values(), subqueries...) # Note that annotations go to annotations dictionary. self.select = () self.where = where() self.where_class = where # The group_by attribute can have one of the following forms: # - None: no group by at all in the query # - A tuple of expressions: group by (at least) those expressions. # String refs are also allowed for now. # - True: group by all select fields of the model # See compiler.get_group_by() for details. self.group_by = None self.order_by = () self.low_mark, self.high_mark = 0, None # Used for offset/limit self.distinct = False self.distinct_fields = () self.select_for_update = False self.select_for_update_nowait = False self.select_for_update_skip_locked = False self.select_for_update_of = () self.select_for_no_key_update = False self.select_related = False # Arbitrary limit for select_related to prevents infinite recursion. self.max_depth = 5 # Holds the selects defined by a call to values() or values_list() # excluding annotation_select and extra_select. self.values_select = () # SQL annotation-related attributes self.annotations = {} # Maps alias -> Annotation Expression self.annotation_select_mask = None self._annotation_select_cache = None # Set combination attributes self.combinator = None self.combinator_all = False self.combined_queries = () # These are for extensions. The contents are more or less appended # verbatim to the appropriate clause. self.extra = {} # Maps col_alias -> (col_sql, params). self.extra_select_mask = None self._extra_select_cache = None self.extra_tables = () self.extra_order_by = () # A tuple that is a set of model field names and either True, if these # are the fields to defer, or False if these are the only fields to # load. self.deferred_loading = (frozenset(), True) self._filtered_relations = {} self.explain_query = False self.explain_format = None self.explain_options = {} @property def output_field(self): if len(self.select) == 1: select = self.select[0] return getattr(select, 'target', None) or select.field elif len(self.annotation_select) == 1: return next(iter(self.annotation_select.values())).output_field @property def has_select_fields(self): return bool(self.select or self.annotation_select_mask or self.extra_select_mask) @cached_property def base_table(self): for alias in self.alias_map: return alias def __str__(self): """ Return the query as a string of SQL with the parameter values substituted in (use sql_with_params() to see the unsubstituted string). Parameter values won't necessarily be quoted correctly, since that is done by the database interface at execution time. """ sql, params = self.sql_with_params() return sql % params def sql_with_params(self): """ Return the query as an SQL string and the parameters that will be substituted into the query. """ return self.get_compiler(DEFAULT_DB_ALIAS).as_sql() def __deepcopy__(self, memo): """Limit the amount of work when a Query is deepcopied.""" result = self.clone() memo[id(self)] = result return result def get_compiler(self, using=None, connection=None): if using is None and connection is None: raise ValueError("Need either using or connection") if using: connection = connections[using] return connection.ops.compiler(self.compiler)(self, connection, using) def get_meta(self): """ Return the Options instance (the model._meta) from which to start processing. Normally, this is self.model._meta, but it can be changed by subclasses. """ return self.model._meta def clone(self): """ Return a copy of the current Query. A lightweight alternative to to deepcopy(). """ obj = Empty() obj.__class__ = self.__class__ # Copy references to everything. obj.__dict__ = self.__dict__.copy() # Clone attributes that can't use shallow copy. obj.alias_refcount = self.alias_refcount.copy() obj.alias_map = self.alias_map.copy() obj.external_aliases = self.external_aliases.copy() obj.table_map = self.table_map.copy() obj.where = self.where.clone() obj.annotations = self.annotations.copy() if self.annotation_select_mask is None: obj.annotation_select_mask = None else: obj.annotation_select_mask = self.annotation_select_mask.copy() # _annotation_select_cache cannot be copied, as doing so breaks the # (necessary) state in which both annotations and # _annotation_select_cache point to the same underlying objects. # It will get re-populated in the cloned queryset the next time it's # used. obj._annotation_select_cache = None obj.extra = self.extra.copy() if self.extra_select_mask is None: obj.extra_select_mask = None else: obj.extra_select_mask = self.extra_select_mask.copy() if self._extra_select_cache is None: obj._extra_select_cache = None else: obj._extra_select_cache = self._extra_select_cache.copy() if self.select_related is not False: # Use deepcopy because select_related stores fields in nested # dicts. obj.select_related = copy.deepcopy(obj.select_related) if 'subq_aliases' in self.__dict__: obj.subq_aliases = self.subq_aliases.copy() obj.used_aliases = self.used_aliases.copy() obj._filtered_relations = self._filtered_relations.copy() # Clear the cached_property try: del obj.base_table except AttributeError: pass return obj def chain(self, klass=None): """ Return a copy of the current Query that's ready for another operation. The klass argument changes the type of the Query, e.g. UpdateQuery. """ obj = self.clone() if klass and obj.__class__ != klass: obj.__class__ = klass if not obj.filter_is_sticky: obj.used_aliases = set() obj.filter_is_sticky = False if hasattr(obj, '_setup_query'): obj._setup_query() return obj def relabeled_clone(self, change_map): clone = self.clone() clone.change_aliases(change_map) return clone def _get_col(self, target, field, alias): if not self.alias_cols: alias = None return target.get_col(alias, field) def rewrite_cols(self, annotation, col_cnt): # We must make sure the inner query has the referred columns in it. # If we are aggregating over an annotation, then Django uses Ref() # instances to note this. However, if we are annotating over a column # of a related model, then it might be that column isn't part of the # SELECT clause of the inner query, and we must manually make sure # the column is selected. An example case is: # .aggregate(Sum('author__awards')) # Resolving this expression results in a join to author, but there # is no guarantee the awards column of author is in the select clause # of the query. Thus we must manually add the column to the inner # query. orig_exprs = annotation.get_source_expressions() new_exprs = [] for expr in orig_exprs: # FIXME: These conditions are fairly arbitrary. Identify a better # method of having expressions decide which code path they should # take. if isinstance(expr, Ref): # Its already a Ref to subquery (see resolve_ref() for # details) new_exprs.append(expr) elif isinstance(expr, (WhereNode, Lookup)): # Decompose the subexpressions further. The code here is # copied from the else clause, but this condition must appear # before the contains_aggregate/is_summary condition below. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) else: # Reuse aliases of expressions already selected in subquery. for col_alias, selected_annotation in self.annotation_select.items(): if selected_annotation is expr: new_expr = Ref(col_alias, expr) break else: # An expression that is not selected the subquery. if isinstance(expr, Col) or (expr.contains_aggregate and not expr.is_summary): # Reference column or another aggregate. Select it # under a non-conflicting alias. col_cnt += 1 col_alias = '__col%d' % col_cnt self.annotations[col_alias] = expr self.append_annotation_mask([col_alias]) new_expr = Ref(col_alias, expr) else: # Some other expression not referencing database values # directly. Its subexpression might contain Cols. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) annotation.set_source_expressions(new_exprs) return annotation, col_cnt def get_aggregation(self, using, added_aggregate_names): """ Return the dictionary with the values of the existing aggregations. """ if not self.annotation_select: return {} existing_annotations = [ annotation for alias, annotation in self.annotations.items() if alias not in added_aggregate_names ] # Decide if we need to use a subquery. # # Existing annotations would cause incorrect results as get_aggregation() # must produce just one result and thus must not use GROUP BY. But we # aren't smart enough to remove the existing annotations from the # query, so those would force us to use GROUP BY. # # If the query has limit or distinct, or uses set operations, then # those operations must be done in a subquery so that the query # aggregates on the limit and/or distinct results instead of applying # the distinct and limit after the aggregation. if (isinstance(self.group_by, tuple) or self.is_sliced or existing_annotations or self.distinct or self.combinator): from django.db.models.sql.subqueries import AggregateQuery outer_query = AggregateQuery(self.model) inner_query = self.clone() inner_query.select_for_update = False inner_query.select_related = False inner_query.set_annotation_mask(self.annotation_select) if not self.is_sliced and not self.distinct_fields: # Queries with distinct_fields need ordering and when a limit # is applied we must take the slice from the ordered query. # Otherwise no need for ordering. inner_query.clear_ordering(True) if not inner_query.distinct: # If the inner query uses default select and it has some # aggregate annotations, then we must make sure the inner # query is grouped by the main model's primary key. However, # clearing the select clause can alter results if distinct is # used. has_existing_aggregate_annotations = any( annotation for annotation in existing_annotations if getattr(annotation, 'contains_aggregate', True) ) if inner_query.default_cols and has_existing_aggregate_annotations: inner_query.group_by = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),) inner_query.default_cols = False relabels = {t: 'subquery' for t in inner_query.alias_map} relabels[None] = 'subquery' # Remove any aggregates marked for reduction from the subquery # and move them to the outer AggregateQuery. col_cnt = 0 for alias, expression in list(inner_query.annotation_select.items()): annotation_select_mask = inner_query.annotation_select_mask if expression.is_summary: expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt) outer_query.annotations[alias] = expression.relabeled_clone(relabels) del inner_query.annotations[alias] annotation_select_mask.remove(alias) # Make sure the annotation_select wont use cached results. inner_query.set_annotation_mask(inner_query.annotation_select_mask) if inner_query.select == () and not inner_query.default_cols and not inner_query.annotation_select_mask: # In case of Model.objects[0:3].count(), there would be no # field selected in the inner query, yet we must use a subquery. # So, make sure at least one field is selected. inner_query.select = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),) try: outer_query.add_subquery(inner_query, using) except EmptyResultSet: return { alias: None for alias in outer_query.annotation_select } else: outer_query = self self.select = () self.default_cols = False self.extra = {} outer_query.clear_ordering(True) outer_query.clear_limits() outer_query.select_for_update = False outer_query.select_related = False compiler = outer_query.get_compiler(using) result = compiler.execute_sql(SINGLE) if result is None: result = [None] * len(outer_query.annotation_select) converters = compiler.get_converters(outer_query.annotation_select.values()) result = next(compiler.apply_converters((result,), converters)) return dict(zip(outer_query.annotation_select, result)) def get_count(self, using): """ Perform a COUNT() query using the current filter constraints. """ obj = self.clone() obj.add_annotation(Count('*'), alias='__count', is_summary=True) number = obj.get_aggregation(using, ['__count'])['__count'] if number is None: number = 0 return number def has_filters(self): return self.where def has_results(self, using): q = self.clone() if not q.distinct: if q.group_by is True: q.add_fields((f.attname for f in self.model._meta.concrete_fields), False) # Disable GROUP BY aliases to avoid orphaning references to the # SELECT clause which is about to be cleared. q.set_group_by(allow_aliases=False) q.clear_select_clause() q.clear_ordering(True) q.set_limits(high=1) compiler = q.get_compiler(using=using) return compiler.has_results() def explain(self, using, format=None, **options): q = self.clone() q.explain_query = True q.explain_format = format q.explain_options = options compiler = q.get_compiler(using=using) return '\n'.join(compiler.explain_query()) def combine(self, rhs, connector): """ Merge the 'rhs' query into the current one (with any 'rhs' effects being applied *after* (that is, "to the right of") anything in the current query. 'rhs' is not modified during a call to this function. The 'connector' parameter describes how to connect filters from the 'rhs' query. """ assert self.model == rhs.model, \ "Cannot combine queries on two different base models." assert not self.is_sliced, \ "Cannot combine queries once a slice has been taken." assert self.distinct == rhs.distinct, \ "Cannot combine a unique query with a non-unique query." assert self.distinct_fields == rhs.distinct_fields, \ "Cannot combine queries with different distinct fields." # Work out how to relabel the rhs aliases, if necessary. change_map = {} conjunction = (connector == AND) # Determine which existing joins can be reused. When combining the # query with AND we must recreate all joins for m2m filters. When # combining with OR we can reuse joins. The reason is that in AND # case a single row can't fulfill a condition like: # revrel__col=1 & revrel__col=2 # But, there might be two different related rows matching this # condition. In OR case a single True is enough, so single row is # enough, too. # # Note that we will be creating duplicate joins for non-m2m joins in # the AND case. The results will be correct but this creates too many # joins. This is something that could be fixed later on. reuse = set() if conjunction else set(self.alias_map) # Base table must be present in the query - this is the same # table on both sides. self.get_initial_alias() joinpromoter = JoinPromoter(connector, 2, False) joinpromoter.add_votes( j for j in self.alias_map if self.alias_map[j].join_type == INNER) rhs_votes = set() # Now, add the joins from rhs query into the new query (skipping base # table). rhs_tables = list(rhs.alias_map)[1:] for alias in rhs_tables: join = rhs.alias_map[alias] # If the left side of the join was already relabeled, use the # updated alias. join = join.relabeled_clone(change_map) new_alias = self.join(join, reuse=reuse) if join.join_type == INNER: rhs_votes.add(new_alias) # We can't reuse the same join again in the query. If we have two # distinct joins for the same connection in rhs query, then the # combined query must have two joins, too. reuse.discard(new_alias) if alias != new_alias: change_map[alias] = new_alias if not rhs.alias_refcount[alias]: # The alias was unused in the rhs query. Unref it so that it # will be unused in the new query, too. We have to add and # unref the alias so that join promotion has information of # the join type for the unused alias. self.unref_alias(new_alias) joinpromoter.add_votes(rhs_votes) joinpromoter.update_join_types(self) # Now relabel a copy of the rhs where-clause and add it to the current # one. w = rhs.where.clone() w.relabel_aliases(change_map) self.where.add(w, connector) # Selection columns and extra extensions are those provided by 'rhs'. if rhs.select: self.set_select([col.relabeled_clone(change_map) for col in rhs.select]) else: self.select = () if connector == OR: # It would be nice to be able to handle this, but the queries don't # really make sense (or return consistent value sets). Not worth # the extra complexity when you can write a real query instead. if self.extra and rhs.extra: raise ValueError("When merging querysets using 'or', you cannot have extra(select=...) on both sides.") self.extra.update(rhs.extra) extra_select_mask = set() if self.extra_select_mask is not None: extra_select_mask.update(self.extra_select_mask) if rhs.extra_select_mask is not None: extra_select_mask.update(rhs.extra_select_mask) if extra_select_mask: self.set_extra_mask(extra_select_mask) self.extra_tables += rhs.extra_tables # Ordering uses the 'rhs' ordering, unless it has none, in which case # the current ordering is used. self.order_by = rhs.order_by or self.order_by self.extra_order_by = rhs.extra_order_by or self.extra_order_by def deferred_to_data(self, target, callback): """ Convert the self.deferred_loading data structure to an alternate data structure, describing the field that *will* be loaded. This is used to compute the columns to select from the database and also by the QuerySet class to work out which fields are being initialized on each model. Models that have all their fields included aren't mentioned in the result, only those that have field restrictions in place. The "target" parameter is the instance that is populated (in place). The "callback" is a function that is called whenever a (model, field) pair need to be added to "target". It accepts three parameters: "target", and the model and list of fields being added for that model. """ field_names, defer = self.deferred_loading if not field_names: return orig_opts = self.get_meta() seen = {} must_include = {orig_opts.concrete_model: {orig_opts.pk}} for field_name in field_names: parts = field_name.split(LOOKUP_SEP) cur_model = self.model._meta.concrete_model opts = orig_opts for name in parts[:-1]: old_model = cur_model if name in self._filtered_relations: name = self._filtered_relations[name].relation_name source = opts.get_field(name) if is_reverse_o2o(source): cur_model = source.related_model else: cur_model = source.remote_field.model opts = cur_model._meta # Even if we're "just passing through" this model, we must add # both the current model's pk and the related reference field # (if it's not a reverse relation) to the things we select. if not is_reverse_o2o(source): must_include[old_model].add(source) add_to_dict(must_include, cur_model, opts.pk) field = opts.get_field(parts[-1]) is_reverse_object = field.auto_created and not field.concrete model = field.related_model if is_reverse_object else field.model model = model._meta.concrete_model if model == opts.model: model = cur_model if not is_reverse_o2o(field): add_to_dict(seen, model, field) if defer: # We need to load all fields for each model, except those that # appear in "seen" (for all models that appear in "seen"). The only # slight complexity here is handling fields that exist on parent # models. workset = {} for model, values in seen.items(): for field in model._meta.local_fields: if field not in values: m = field.model._meta.concrete_model add_to_dict(workset, m, field) for model, values in must_include.items(): # If we haven't included a model in workset, we don't add the # corresponding must_include fields for that model, since an # empty set means "include all fields". That's why there's no # "else" branch here. if model in workset: workset[model].update(values) for model, values in workset.items(): callback(target, model, values) else: for model, values in must_include.items(): if model in seen: seen[model].update(values) else: # As we've passed through this model, but not explicitly # included any fields, we have to make sure it's mentioned # so that only the "must include" fields are pulled in. seen[model] = values # Now ensure that every model in the inheritance chain is mentioned # in the parent list. Again, it must be mentioned to ensure that # only "must include" fields are pulled in. for model in orig_opts.get_parent_list(): seen.setdefault(model, set()) for model, values in seen.items(): callback(target, model, values) def table_alias(self, table_name, create=False, filtered_relation=None): """ Return a table alias for the given table_name and whether this is a new alias or not. If 'create' is true, a new alias is always created. Otherwise, the most recently created alias for the table (if one exists) is reused. """ alias_list = self.table_map.get(table_name) if not create and alias_list: alias = alias_list[0] self.alias_refcount[alias] += 1 return alias, False # Create a new alias for this table. if alias_list: alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1) alias_list.append(alias) else: # The first occurrence of a table uses the table name directly. alias = filtered_relation.alias if filtered_relation is not None else table_name self.table_map[table_name] = [alias] self.alias_refcount[alias] = 1 return alias, True def ref_alias(self, alias): """Increases the reference count for this alias.""" self.alias_refcount[alias] += 1 def unref_alias(self, alias, amount=1): """Decreases the reference count for this alias.""" self.alias_refcount[alias] -= amount def promote_joins(self, aliases): """ Promote recursively the join type of given aliases and its children to an outer join. If 'unconditional' is False, only promote the join if it is nullable or the parent join is an outer join. The children promotion is done to avoid join chains that contain a LOUTER b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted, then we must also promote b->c automatically, or otherwise the promotion of a->b doesn't actually change anything in the query results. """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type is None: # This is the base table (first FROM entry) - this table # isn't really joined at all in the query, so we should not # alter its join type. continue # Only the first alias (skipped above) should have None join_type assert self.alias_map[alias].join_type is not None parent_alias = self.alias_map[alias].parent_alias parent_louter = parent_alias and self.alias_map[parent_alias].join_type == LOUTER already_louter = self.alias_map[alias].join_type == LOUTER if ((self.alias_map[alias].nullable or parent_louter) and not already_louter): self.alias_map[alias] = self.alias_map[alias].promote() # Join type of 'alias' changed, so re-examine all aliases that # refer to this one. aliases.extend( join for join in self.alias_map if self.alias_map[join].parent_alias == alias and join not in aliases ) def demote_joins(self, aliases): """ Change join type from LOUTER to INNER for all joins in aliases. Similarly to promote_joins(), this method must ensure no join chains containing first an outer, then an inner join are generated. If we are demoting b->c join in chain a LOUTER b LOUTER c then we must demote a->b automatically, or otherwise the demotion of b->c doesn't actually change anything in the query results. . """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type == LOUTER: self.alias_map[alias] = self.alias_map[alias].demote() parent_alias = self.alias_map[alias].parent_alias if self.alias_map[parent_alias].join_type == INNER: aliases.append(parent_alias) def reset_refcounts(self, to_counts): """ Reset reference counts for aliases so that they match the value passed in `to_counts`. """ for alias, cur_refcount in self.alias_refcount.copy().items(): unref_amount = cur_refcount - to_counts.get(alias, 0) self.unref_alias(alias, unref_amount) def change_aliases(self, change_map): """ Change the aliases in change_map (which maps old-alias -> new-alias), relabelling any references to them in select columns and the where clause. """ assert set(change_map).isdisjoint(change_map.values()) # 1. Update references in "select" (normal columns plus aliases), # "group by" and "where". self.where.relabel_aliases(change_map) if isinstance(self.group_by, tuple): self.group_by = tuple([col.relabeled_clone(change_map) for col in self.group_by]) self.select = tuple([col.relabeled_clone(change_map) for col in self.select]) self.annotations = self.annotations and { key: col.relabeled_clone(change_map) for key, col in self.annotations.items() } # 2. Rename the alias in the internal table/alias datastructures. for old_alias, new_alias in change_map.items(): if old_alias not in self.alias_map: continue alias_data = self.alias_map[old_alias].relabeled_clone(change_map) self.alias_map[new_alias] = alias_data self.alias_refcount[new_alias] = self.alias_refcount[old_alias] del self.alias_refcount[old_alias] del self.alias_map[old_alias] table_aliases = self.table_map[alias_data.table_name] for pos, alias in enumerate(table_aliases): if alias == old_alias: table_aliases[pos] = new_alias break self.external_aliases = { # Table is aliased or it's being changed and thus is aliased. change_map.get(alias, alias): (aliased or alias in change_map) for alias, aliased in self.external_aliases.items() } def bump_prefix(self, outer_query): """ Change the alias prefix to the next letter in the alphabet in a way that the outer query's aliases and this query's aliases will not conflict. Even tables that previously had no alias will get an alias after this call. """ def prefix_gen(): """ Generate a sequence of characters in alphabetical order: -> 'A', 'B', 'C', ... When the alphabet is finished, the sequence will continue with the Cartesian product: -> 'AA', 'AB', 'AC', ... """ alphabet = ascii_uppercase prefix = chr(ord(self.alias_prefix) + 1) yield prefix for n in count(1): seq = alphabet[alphabet.index(prefix):] if prefix else alphabet for s in product(seq, repeat=n): yield ''.join(s) prefix = None if self.alias_prefix != outer_query.alias_prefix: # No clashes between self and outer query should be possible. return # Explicitly avoid infinite loop. The constant divider is based on how # much depth recursive subquery references add to the stack. This value # might need to be adjusted when adding or removing function calls from # the code path in charge of performing these operations. local_recursion_limit = sys.getrecursionlimit() // 16 for pos, prefix in enumerate(prefix_gen()): if prefix not in self.subq_aliases: self.alias_prefix = prefix break if pos > local_recursion_limit: raise RecursionError( 'Maximum recursion depth exceeded: too many subqueries.' ) self.subq_aliases = self.subq_aliases.union([self.alias_prefix]) outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases) self.change_aliases({ alias: '%s%d' % (self.alias_prefix, pos) for pos, alias in enumerate(self.alias_map) }) def get_initial_alias(self): """ Return the first alias for this query, after increasing its reference count. """ if self.alias_map: alias = self.base_table self.ref_alias(alias) else: alias = self.join(BaseTable(self.get_meta().db_table, None)) return alias def count_active_tables(self): """ Return the number of tables in this query with a non-zero reference count. After execution, the reference counts are zeroed, so tables added in compiler will not be seen by this method. """ return len([1 for count in self.alias_refcount.values() if count]) def join(self, join, reuse=None, reuse_with_filtered_relation=False): """ Return an alias for the 'join', either reusing an existing alias for that join or creating a new one. 'join' is either a sql.datastructures.BaseTable or Join. The 'reuse' parameter can be either None which means all joins are reusable, or it can be a set containing the aliases that can be reused. The 'reuse_with_filtered_relation' parameter is used when computing FilteredRelation instances. A join is always created as LOUTER if the lhs alias is LOUTER to make sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new joins are created as LOUTER if the join is nullable. """ if reuse_with_filtered_relation and reuse: reuse_aliases = [ a for a, j in self.alias_map.items() if a in reuse and j.equals(join, with_filtered_relation=False) ] else: reuse_aliases = [ a for a, j in self.alias_map.items() if (reuse is None or a in reuse) and j == join ] if reuse_aliases: if join.table_alias in reuse_aliases: reuse_alias = join.table_alias else: # Reuse the most recent alias of the joined table # (a many-to-many relation may be joined multiple times). reuse_alias = reuse_aliases[-1] self.ref_alias(reuse_alias) return reuse_alias # No reuse is possible, so we need a new alias. alias, _ = self.table_alias(join.table_name, create=True, filtered_relation=join.filtered_relation) if join.join_type: if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable: join_type = LOUTER else: join_type = INNER join.join_type = join_type join.table_alias = alias self.alias_map[alias] = join return alias def join_parent_model(self, opts, model, alias, seen): """ Make sure the given 'model' is joined in the query. If 'model' isn't a parent of 'opts' or if it is None this method is a no-op. The 'alias' is the root alias for starting the join, 'seen' is a dict of model -> alias of existing joins. It must also contain a mapping of None -> some alias. This will be returned in the no-op case. """ if model in seen: return seen[model] chain = opts.get_base_chain(model) if not chain: return alias curr_opts = opts for int_model in chain: if int_model in seen: curr_opts = int_model._meta alias = seen[int_model] continue # Proxy model have elements in base chain # with no parents, assign the new options # object and skip to the next base in that # case if not curr_opts.parents[int_model]: curr_opts = int_model._meta continue link_field = curr_opts.get_ancestor_link(int_model) join_info = self.setup_joins([link_field.name], curr_opts, alias) curr_opts = int_model._meta alias = seen[int_model] = join_info.joins[-1] return alias or seen[None] def add_annotation(self, annotation, alias, is_summary=False): """Add a single annotation expression to the Query.""" annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None, summarize=is_summary) self.append_annotation_mask([alias]) self.annotations[alias] = annotation def resolve_expression(self, query, *args, **kwargs): clone = self.clone() # Subqueries need to use a different set of aliases than the outer query. clone.bump_prefix(query) clone.subquery = True # It's safe to drop ordering if the queryset isn't using slicing, # distinct(*fields) or select_for_update(). if (self.low_mark == 0 and self.high_mark is None and not self.distinct_fields and not self.select_for_update): clone.clear_ordering(True) clone.where.resolve_expression(query, *args, **kwargs) for key, value in clone.annotations.items(): resolved = value.resolve_expression(query, *args, **kwargs) if hasattr(resolved, 'external_aliases'): resolved.external_aliases.update(clone.external_aliases) clone.annotations[key] = resolved # Outer query's aliases are considered external. for alias, table in query.alias_map.items(): clone.external_aliases[alias] = ( (isinstance(table, Join) and table.join_field.related_model._meta.db_table != alias) or (isinstance(table, BaseTable) and table.table_name != table.table_alias) ) return clone def get_external_cols(self): exprs = chain(self.annotations.values(), self.where.children) return [ col for col in self._gen_cols(exprs) if col.alias in self.external_aliases ] def as_sql(self, compiler, connection): sql, params = self.get_compiler(connection=connection).as_sql() if self.subquery: sql = '(%s)' % sql return sql, params def resolve_lookup_value(self, value, can_reuse, allow_joins): if hasattr(value, 'resolve_expression'): value = value.resolve_expression( self, reuse=can_reuse, allow_joins=allow_joins, ) elif isinstance(value, (list, tuple)): # The items of the iterable may be expressions and therefore need # to be resolved independently. return type(value)( self.resolve_lookup_value(sub_value, can_reuse, allow_joins) for sub_value in value ) return value def solve_lookup_type(self, lookup): """ Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains'). """ lookup_splitted = lookup.split(LOOKUP_SEP) if self.annotations: expression, expression_lookups = refs_expression(lookup_splitted, self.annotations) if expression: return expression_lookups, (), expression _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta()) field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)] if len(lookup_parts) > 1 and not field_parts: raise FieldError( 'Invalid lookup "%s" for model %s".' % (lookup, self.get_meta().model.__name__) ) return lookup_parts, field_parts, False def check_query_object_type(self, value, opts, field): """ Check whether the object passed while querying is of the correct type. If not, raise a ValueError specifying the wrong object. """ if hasattr(value, '_meta'): if not check_rel_lookup_compatibility(value._meta.model, opts, field): raise ValueError( 'Cannot query "%s": Must be "%s" instance.' % (value, opts.object_name)) def check_related_objects(self, field, value, opts): """Check the type of object passed to query relations.""" if field.is_relation: # Check that the field and the queryset use the same model in a # query like .filter(author=Author.objects.all()). For example, the # opts would be Author's (from the author field) and value.model # would be Author.objects.all() queryset's .model (Author also). # The field is the related field on the lhs side. if (isinstance(value, Query) and not value.has_select_fields and not check_rel_lookup_compatibility(value.model, opts, field)): raise ValueError( 'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' % (value.model._meta.object_name, opts.object_name) ) elif hasattr(value, '_meta'): self.check_query_object_type(value, opts, field) elif hasattr(value, '__iter__'): for v in value: self.check_query_object_type(v, opts, field) def check_filterable(self, expression): """Raise an error if expression cannot be used in a WHERE clause.""" if ( hasattr(expression, 'resolve_expression') and not getattr(expression, 'filterable', True) ): raise NotSupportedError( expression.__class__.__name__ + ' is disallowed in the filter ' 'clause.' ) if hasattr(expression, 'get_source_expressions'): for expr in expression.get_source_expressions(): self.check_filterable(expr) def build_lookup(self, lookups, lhs, rhs): """ Try to extract transforms and lookup from given lhs. The lhs value is something that works like SQLExpression. The rhs value is what the lookup is going to compare against. The lookups is a list of names to extract using get_lookup() and get_transform(). """ # __exact is the default lookup if one isn't given. *transforms, lookup_name = lookups or ['exact'] for name in transforms: lhs = self.try_transform(lhs, name) # First try get_lookup() so that the lookup takes precedence if the lhs # supports both transform and lookup for the name. lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: if lhs.field.is_relation: raise FieldError('Related Field got invalid lookup: {}'.format(lookup_name)) # A lookup wasn't found. Try to interpret the name as a transform # and do an Exact lookup against it. lhs = self.try_transform(lhs, lookup_name) lookup_name = 'exact' lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: return lookup = lookup_class(lhs, rhs) # Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all # uses of None as a query value unless the lookup supports it. if lookup.rhs is None and not lookup.can_use_none_as_rhs: if lookup_name not in ('exact', 'iexact'): raise ValueError("Cannot use None as a query value") return lhs.get_lookup('isnull')(lhs, True) # For Oracle '' is equivalent to null. The check must be done at this # stage because join promotion can't be done in the compiler. Using # DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here. # A similar thing is done in is_nullable(), too. if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and lookup_name == 'exact' and lookup.rhs == ''): return lhs.get_lookup('isnull')(lhs, True) return lookup def try_transform(self, lhs, name): """ Helper method for build_lookup(). Try to fetch and initialize a transform for name parameter from lhs. """ transform_class = lhs.get_transform(name) if transform_class: return transform_class(lhs) else: output_field = lhs.output_field.__class__ suggested_lookups = difflib.get_close_matches(name, output_field.get_lookups()) if suggested_lookups: suggestion = ', perhaps you meant %s?' % ' or '.join(suggested_lookups) else: suggestion = '.' raise FieldError( "Unsupported lookup '%s' for %s or join on the field not " "permitted%s" % (name, output_field.__name__, suggestion) ) def build_filter(self, filter_expr, branch_negated=False, current_negated=False, can_reuse=None, allow_joins=True, split_subq=True, reuse_with_filtered_relation=False, check_filterable=True): """ Build a WhereNode for a single filter clause but don't add it to this Query. Query.add_q() will then add this filter to the where Node. The 'branch_negated' tells us if the current branch contains any negations. This will be used to determine if subqueries are needed. The 'current_negated' is used to determine if the current filter is negated or not and this will be used to determine if IS NULL filtering is needed. The difference between current_negated and branch_negated is that branch_negated is set on first negation, but current_negated is flipped for each negation. Note that add_filter will not do any negating itself, that is done upper in the code by add_q(). The 'can_reuse' is a set of reusable joins for multijoins. If 'reuse_with_filtered_relation' is True, then only joins in can_reuse will be reused. The method will create a filter clause that can be added to the current query. However, if the filter isn't added to the query then the caller is responsible for unreffing the joins used. """ if isinstance(filter_expr, dict): raise FieldError("Cannot parse keyword query as dict") if isinstance(filter_expr, Q): return self._add_q( filter_expr, branch_negated=branch_negated, current_negated=current_negated, used_aliases=can_reuse, allow_joins=allow_joins, split_subq=split_subq, check_filterable=check_filterable, ) if hasattr(filter_expr, 'resolve_expression'): if not getattr(filter_expr, 'conditional', False): raise TypeError('Cannot filter against a non-conditional expression.') condition = self.build_lookup( ['exact'], filter_expr.resolve_expression(self, allow_joins=allow_joins), True ) clause = self.where_class() clause.add(condition, AND) return clause, [] arg, value = filter_expr if not arg: raise FieldError("Cannot parse keyword query %r" % arg) lookups, parts, reffed_expression = self.solve_lookup_type(arg) if check_filterable: self.check_filterable(reffed_expression) if not allow_joins and len(parts) > 1: raise FieldError("Joined field references are not permitted in this query") pre_joins = self.alias_refcount.copy() value = self.resolve_lookup_value(value, can_reuse, allow_joins) used_joins = {k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)} if check_filterable: self.check_filterable(value) clause = self.where_class() if reffed_expression: condition = self.build_lookup(lookups, reffed_expression, value) clause.add(condition, AND) return clause, [] opts = self.get_meta() alias = self.get_initial_alias() allow_many = not branch_negated or not split_subq try: join_info = self.setup_joins( parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many, reuse_with_filtered_relation=reuse_with_filtered_relation, ) # Prevent iterator from being consumed by check_related_objects() if isinstance(value, Iterator): value = list(value) self.check_related_objects(join_info.final_field, value, join_info.opts) # split_exclude() needs to know which joins were generated for the # lookup parts self._lookup_joins = join_info.joins except MultiJoin as e: return self.split_exclude(filter_expr, can_reuse, e.names_with_path) # Update used_joins before trimming since they are reused to determine # which joins could be later promoted to INNER. used_joins.update(join_info.joins) targets, alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path) if can_reuse is not None: can_reuse.update(join_list) if join_info.final_field.is_relation: # No support for transforms for relational fields num_lookups = len(lookups) if num_lookups > 1: raise FieldError('Related Field got invalid lookup: {}'.format(lookups[0])) if len(targets) == 1: col = self._get_col(targets[0], join_info.final_field, alias) else: col = MultiColSource(alias, targets, join_info.targets, join_info.final_field) else: col = self._get_col(targets[0], join_info.final_field, alias) condition = self.build_lookup(lookups, col, value) lookup_type = condition.lookup_name clause.add(condition, AND) require_outer = lookup_type == 'isnull' and condition.rhs is True and not current_negated if current_negated and (lookup_type != 'isnull' or condition.rhs is False) and condition.rhs is not None: require_outer = True if lookup_type != 'isnull': # The condition added here will be SQL like this: # NOT (col IS NOT NULL), where the first NOT is added in # upper layers of code. The reason for addition is that if col # is null, then col != someval will result in SQL "unknown" # which isn't the same as in Python. The Python None handling # is wanted, and it can be gotten by # (col IS NULL OR col != someval) # <=> # NOT (col IS NOT NULL AND col = someval). if ( self.is_nullable(targets[0]) or self.alias_map[join_list[-1]].join_type == LOUTER ): lookup_class = targets[0].get_lookup('isnull') col = self._get_col(targets[0], join_info.targets[0], alias) clause.add(lookup_class(col, False), AND) # If someval is a nullable column, someval IS NOT NULL is # added. if isinstance(value, Col) and self.is_nullable(value.target): lookup_class = value.target.get_lookup('isnull') clause.add(lookup_class(value, False), AND) return clause, used_joins if not require_outer else () def add_filter(self, filter_clause): self.add_q(Q(**{filter_clause[0]: filter_clause[1]})) def add_q(self, q_object): """ A preprocessor for the internal _add_q(). Responsible for doing final join promotion. """ # For join promotion this case is doing an AND for the added q_object # and existing conditions. So, any existing inner join forces the join # type to remain inner. Existing outer joins can however be demoted. # (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if # rel_a doesn't produce any rows, then the whole condition must fail. # So, demotion is OK. existing_inner = {a for a in self.alias_map if self.alias_map[a].join_type == INNER} clause, _ = self._add_q(q_object, self.used_aliases) if clause: self.where.add(clause, AND) self.demote_joins(existing_inner) def build_where(self, filter_expr): return self.build_filter(filter_expr, allow_joins=False)[0] def _add_q(self, q_object, used_aliases, branch_negated=False, current_negated=False, allow_joins=True, split_subq=True, check_filterable=True): """Add a Q-object to the current filter.""" connector = q_object.connector current_negated = current_negated ^ q_object.negated branch_negated = branch_negated or q_object.negated target_clause = self.where_class(connector=connector, negated=q_object.negated) joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated) for child in q_object.children: child_clause, needed_inner = self.build_filter( child, can_reuse=used_aliases, branch_negated=branch_negated, current_negated=current_negated, allow_joins=allow_joins, split_subq=split_subq, check_filterable=check_filterable, ) joinpromoter.add_votes(needed_inner) if child_clause: target_clause.add(child_clause, connector) needed_inner = joinpromoter.update_join_types(self) return target_clause, needed_inner def build_filtered_relation_q(self, q_object, reuse, branch_negated=False, current_negated=False): """Add a FilteredRelation object to the current filter.""" connector = q_object.connector current_negated ^= q_object.negated branch_negated = branch_negated or q_object.negated target_clause = self.where_class(connector=connector, negated=q_object.negated) for child in q_object.children: if isinstance(child, Node): child_clause = self.build_filtered_relation_q( child, reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, ) else: child_clause, _ = self.build_filter( child, can_reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, allow_joins=True, split_subq=False, reuse_with_filtered_relation=True, ) target_clause.add(child_clause, connector) return target_clause def add_filtered_relation(self, filtered_relation, alias): filtered_relation.alias = alias lookups = dict(get_children_from_q(filtered_relation.condition)) for lookup in chain((filtered_relation.relation_name,), lookups): lookup_parts, field_parts, _ = self.solve_lookup_type(lookup) shift = 2 if not lookup_parts else 1 if len(field_parts) > (shift + len(lookup_parts)): raise ValueError( "FilteredRelation's condition doesn't support nested " "relations (got %r)." % lookup ) self._filtered_relations[filtered_relation.alias] = filtered_relation def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False): """ Walk the list of names and turns them into PathInfo tuples. A single name in 'names' can generate multiple PathInfos (m2m, for example). 'names' is the path of names to travel, 'opts' is the model Options we start the name resolving from, 'allow_many' is as for setup_joins(). If fail_on_missing is set to True, then a name that can't be resolved will generate a FieldError. Return a list of PathInfo tuples. In addition return the final field (the last used join field) and target (which is a field guaranteed to contain the same value as the final field). Finally, return those names that weren't found (which are likely transforms and the final lookup). """ path, names_with_path = [], [] for pos, name in enumerate(names): cur_names_with_path = (name, []) if name == 'pk': name = opts.pk.name field = None filtered_relation = None try: field = opts.get_field(name) except FieldDoesNotExist: if name in self.annotation_select: field = self.annotation_select[name].output_field elif name in self._filtered_relations and pos == 0: filtered_relation = self._filtered_relations[name] field = opts.get_field(filtered_relation.relation_name) if field is not None: # Fields that contain one-to-many relations with a generic # model (like a GenericForeignKey) cannot generate reverse # relations and therefore cannot be used for reverse querying. if field.is_relation and not field.related_model: raise FieldError( "Field %r does not generate an automatic reverse " "relation and therefore cannot be used for reverse " "querying. If it is a GenericForeignKey, consider " "adding a GenericRelation." % name ) try: model = field.model._meta.concrete_model except AttributeError: # QuerySet.annotate() may introduce fields that aren't # attached to a model. model = None else: # We didn't find the current field, so move position back # one step. pos -= 1 if pos == -1 or fail_on_missing: available = sorted([ *get_field_names_from_opts(opts), *self.annotation_select, *self._filtered_relations, ]) raise FieldError("Cannot resolve keyword '%s' into field. " "Choices are: %s" % (name, ", ".join(available))) break # Check if we need any joins for concrete inheritance cases (the # field lives in parent, but we are currently in one of its # children) if model is not opts.model: path_to_parent = opts.get_path_to_parent(model) if path_to_parent: path.extend(path_to_parent) cur_names_with_path[1].extend(path_to_parent) opts = path_to_parent[-1].to_opts if hasattr(field, 'get_path_info'): pathinfos = field.get_path_info(filtered_relation) if not allow_many: for inner_pos, p in enumerate(pathinfos): if p.m2m: cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1]) names_with_path.append(cur_names_with_path) raise MultiJoin(pos + 1, names_with_path) last = pathinfos[-1] path.extend(pathinfos) final_field = last.join_field opts = last.to_opts targets = last.target_fields cur_names_with_path[1].extend(pathinfos) names_with_path.append(cur_names_with_path) else: # Local non-relational field. final_field = field targets = (field,) if fail_on_missing and pos + 1 != len(names): raise FieldError( "Cannot resolve keyword %r into field. Join on '%s'" " not permitted." % (names[pos + 1], name)) break return path, final_field, targets, names[pos + 1:] def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True, reuse_with_filtered_relation=False): """ Compute the necessary table joins for the passage through the fields given in 'names'. 'opts' is the Options class for the current model (which gives the table we are starting from), 'alias' is the alias for the table to start the joining from. The 'can_reuse' defines the reverse foreign key joins we can reuse. It can be None in which case all joins are reusable or a set of aliases that can be reused. Note that non-reverse foreign keys are always reusable when using setup_joins(). The 'reuse_with_filtered_relation' can be used to force 'can_reuse' parameter and force the relation on the given connections. If 'allow_many' is False, then any reverse foreign key seen will generate a MultiJoin exception. Return the final field involved in the joins, the target field (used for any 'where' constraint), the final 'opts' value, the joins, the field path traveled to generate the joins, and a transform function that takes a field and alias and is equivalent to `field.get_col(alias)` in the simple case but wraps field transforms if they were included in names. The target field is the field containing the concrete value. Final field can be something different, for example foreign key pointing to that value. Final field is needed for example in some value conversions (convert 'obj' in fk__id=obj to pk val using the foreign key field for example). """ joins = [alias] # The transform can't be applied yet, as joins must be trimmed later. # To avoid making every caller of this method look up transforms # directly, compute transforms here and create a partial that converts # fields to the appropriate wrapped version. def final_transformer(field, alias): return field.get_col(alias) # Try resolving all the names as fields first. If there's an error, # treat trailing names as lookups until a field can be resolved. last_field_exception = None for pivot in range(len(names), 0, -1): try: path, final_field, targets, rest = self.names_to_path( names[:pivot], opts, allow_many, fail_on_missing=True, ) except FieldError as exc: if pivot == 1: # The first item cannot be a lookup, so it's safe # to raise the field error here. raise else: last_field_exception = exc else: # The transforms are the remaining items that couldn't be # resolved into fields. transforms = names[pivot:] break for name in transforms: def transform(field, alias, *, name, previous): try: wrapped = previous(field, alias) return self.try_transform(wrapped, name) except FieldError: # FieldError is raised if the transform doesn't exist. if isinstance(final_field, Field) and last_field_exception: raise last_field_exception else: raise final_transformer = functools.partial(transform, name=name, previous=final_transformer) # Then, add the path to the query's joins. Note that we can't trim # joins at this stage - we will need the information about join type # of the trimmed joins. for join in path: if join.filtered_relation: filtered_relation = join.filtered_relation.clone() table_alias = filtered_relation.alias else: filtered_relation = None table_alias = None opts = join.to_opts if join.direct: nullable = self.is_nullable(join.join_field) else: nullable = True connection = Join( opts.db_table, alias, table_alias, INNER, join.join_field, nullable, filtered_relation=filtered_relation, ) reuse = can_reuse if join.m2m or reuse_with_filtered_relation else None alias = self.join( connection, reuse=reuse, reuse_with_filtered_relation=reuse_with_filtered_relation, ) joins.append(alias) if filtered_relation: filtered_relation.path = joins[:] return JoinInfo(final_field, targets, opts, joins, path, final_transformer) def trim_joins(self, targets, joins, path): """ The 'target' parameter is the final field being joined to, 'joins' is the full list of join aliases. The 'path' contain the PathInfos used to create the joins. Return the final target field and table alias and the new active joins. Always trim any direct join if the target column is already in the previous table. Can't trim reverse joins as it's unknown if there's anything on the other side of the join. """ joins = joins[:] for pos, info in enumerate(reversed(path)): if len(joins) == 1 or not info.direct: break if info.filtered_relation: break join_targets = {t.column for t in info.join_field.foreign_related_fields} cur_targets = {t.column for t in targets} if not cur_targets.issubset(join_targets): break targets_dict = {r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets} targets = tuple(targets_dict[t.column] for t in targets) self.unref_alias(joins.pop()) return targets, joins[-1], joins @classmethod def _gen_cols(cls, exprs): for expr in exprs: if isinstance(expr, Col): yield expr else: yield from cls._gen_cols(expr.get_source_expressions()) @classmethod def _gen_col_aliases(cls, exprs): yield from (expr.alias for expr in cls._gen_cols(exprs)) def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False): if not allow_joins and LOOKUP_SEP in name: raise FieldError("Joined field references are not permitted in this query") annotation = self.annotations.get(name) if annotation is not None: if not allow_joins: for alias in self._gen_col_aliases([annotation]): if isinstance(self.alias_map[alias], Join): raise FieldError( 'Joined field references are not permitted in ' 'this query' ) if summarize: # Summarize currently means we are doing an aggregate() query # which is executed as a wrapped subquery if any of the # aggregate() elements reference an existing annotation. In # that case we need to return a Ref to the subquery's annotation. return Ref(name, self.annotation_select[name]) else: return annotation else: field_list = name.split(LOOKUP_SEP) join_info = self.setup_joins(field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse) targets, final_alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path) if not allow_joins and len(join_list) > 1: raise FieldError('Joined field references are not permitted in this query') if len(targets) > 1: raise FieldError("Referencing multicolumn fields with F() objects " "isn't supported") # Verify that the last lookup in name is a field or a transform: # transform_function() raises FieldError if not. join_info.transform_function(targets[0], final_alias) if reuse is not None: reuse.update(join_list) return self._get_col(targets[0], join_info.targets[0], join_list[-1]) def split_exclude(self, filter_expr, can_reuse, names_with_path): """ When doing an exclude against any kind of N-to-many relation, we need to use a subquery. This method constructs the nested query, given the original exclude filter (filter_expr) and the portion up to the first N-to-many relation field. For example, if the origin filter is ~Q(child__name='foo'), filter_expr is ('child__name', 'foo') and can_reuse is a set of joins usable for filters in the original query. We will turn this into equivalent of: WHERE NOT (pk IN (SELECT parent_id FROM thetable WHERE name = 'foo' AND parent_id IS NOT NULL)) It might be worth it to consider using WHERE NOT EXISTS as that has saner null handling, and is easier for the backend's optimizer to handle. """ filter_lhs, filter_rhs = filter_expr if isinstance(filter_rhs, OuterRef): filter_expr = (filter_lhs, OuterRef(filter_rhs)) elif isinstance(filter_rhs, F): filter_expr = (filter_lhs, OuterRef(filter_rhs.name)) # Generate the inner query. query = Query(self.model) query._filtered_relations = self._filtered_relations query.add_filter(filter_expr) query.clear_ordering(True) # Try to have as simple as possible subquery -> trim leading joins from # the subquery. trimmed_prefix, contains_louter = query.trim_start(names_with_path) # Add extra check to make sure the selected field will not be null # since we are adding an IN <subquery> clause. This prevents the # database from tripping over IN (...,NULL,...) selects and returning # nothing col = query.select[0] select_field = col.target alias = col.alias if self.is_nullable(select_field): lookup_class = select_field.get_lookup('isnull') lookup = lookup_class(select_field.get_col(alias), False) query.where.add(lookup, AND) if alias in can_reuse: pk = select_field.model._meta.pk # Need to add a restriction so that outer query's filters are in effect for # the subquery, too. query.bump_prefix(self) lookup_class = select_field.get_lookup('exact') # Note that the query.select[0].alias is different from alias # due to bump_prefix above. lookup = lookup_class(pk.get_col(query.select[0].alias), pk.get_col(alias)) query.where.add(lookup, AND) query.external_aliases[alias] = True condition, needed_inner = self.build_filter( ('%s__in' % trimmed_prefix, query), current_negated=True, branch_negated=True, can_reuse=can_reuse) if contains_louter: or_null_condition, _ = self.build_filter( ('%s__isnull' % trimmed_prefix, True), current_negated=True, branch_negated=True, can_reuse=can_reuse) condition.add(or_null_condition, OR) # Note that the end result will be: # (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL. # This might look crazy but due to how IN works, this seems to be # correct. If the IS NOT NULL check is removed then outercol NOT # IN will return UNKNOWN. If the IS NULL check is removed, then if # outercol IS NULL we will not match the row. return condition, needed_inner def set_empty(self): self.where.add(NothingNode(), AND) def is_empty(self): return any(isinstance(c, NothingNode) for c in self.where.children) def set_limits(self, low=None, high=None): """ Adjust the limits on the rows retrieved. Use low/high to set these, as it makes it more Pythonic to read and write. When the SQL query is created, convert them to the appropriate offset and limit values. Apply any limits passed in here to the existing constraints. Add low to the current low value and clamp both to any existing high value. """ if high is not None: if self.high_mark is not None: self.high_mark = min(self.high_mark, self.low_mark + high) else: self.high_mark = self.low_mark + high if low is not None: if self.high_mark is not None: self.low_mark = min(self.high_mark, self.low_mark + low) else: self.low_mark = self.low_mark + low if self.low_mark == self.high_mark: self.set_empty() def clear_limits(self): """Clear any existing limits.""" self.low_mark, self.high_mark = 0, None @property def is_sliced(self): return self.low_mark != 0 or self.high_mark is not None def has_limit_one(self): return self.high_mark is not None and (self.high_mark - self.low_mark) == 1 def can_filter(self): """ Return True if adding filters to this instance is still possible. Typically, this means no limits or offsets have been put on the results. """ return not self.is_sliced def clear_select_clause(self): """Remove all fields from SELECT clause.""" self.select = () self.default_cols = False self.select_related = False self.set_extra_mask(()) self.set_annotation_mask(()) def clear_select_fields(self): """ Clear the list of fields to select (but not extra_select columns). Some queryset types completely replace any existing list of select columns. """ self.select = () self.values_select = () def add_select_col(self, col): self.select += col, self.values_select += col.output_field.name, def set_select(self, cols): self.default_cols = False self.select = tuple(cols) def add_distinct_fields(self, *field_names): """ Add and resolve the given fields to the query's "distinct on" clause. """ self.distinct_fields = field_names self.distinct = True def add_fields(self, field_names, allow_m2m=True): """ Add the given (model) fields to the select set. Add the field names in the order specified. """ alias = self.get_initial_alias() opts = self.get_meta() try: cols = [] for name in field_names: # Join promotion note - we must not remove any rows here, so # if there is no existing joins, use outer join. join_info = self.setup_joins(name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m) targets, final_alias, joins = self.trim_joins( join_info.targets, join_info.joins, join_info.path, ) for target in targets: cols.append(join_info.transform_function(target, final_alias)) if cols: self.set_select(cols) except MultiJoin: raise FieldError("Invalid field name: '%s'" % name) except FieldError: if LOOKUP_SEP in name: # For lookups spanning over relationships, show the error # from the model on which the lookup failed. raise else: names = sorted([ *get_field_names_from_opts(opts), *self.extra, *self.annotation_select, *self._filtered_relations ]) raise FieldError("Cannot resolve keyword %r into field. " "Choices are: %s" % (name, ", ".join(names))) def add_ordering(self, *ordering): """ Add items from the 'ordering' sequence to the query's "order by" clause. These items are either field names (not column names) -- possibly with a direction prefix ('-' or '?') -- or OrderBy expressions. If 'ordering' is empty, clear all ordering from the query. """ errors = [] for item in ordering: if isinstance(item, str): if '.' in item: warnings.warn( 'Passing column raw column aliases to order_by() is ' 'deprecated. Wrap %r in a RawSQL expression before ' 'passing it to order_by().' % item, category=RemovedInDjango40Warning, stacklevel=3, ) continue if item == '?': continue if item.startswith('-'): item = item[1:] if item in self.annotations: continue if self.extra and item in self.extra: continue # names_to_path() validates the lookup. A descriptive # FieldError will be raise if it's not. self.names_to_path(item.split(LOOKUP_SEP), self.model._meta) elif not hasattr(item, 'resolve_expression'): errors.append(item) if getattr(item, 'contains_aggregate', False): raise FieldError( 'Using an aggregate in order_by() without also including ' 'it in annotate() is not allowed: %s' % item ) if errors: raise FieldError('Invalid order_by arguments: %s' % errors) if ordering: self.order_by += ordering else: self.default_ordering = False def clear_ordering(self, force_empty): """ Remove any ordering settings. If 'force_empty' is True, there will be no ordering in the resulting query (not even the model's default). """ self.order_by = () self.extra_order_by = () if force_empty: self.default_ordering = False def set_group_by(self, allow_aliases=True): """ Expand the GROUP BY clause required by the query. This will usually be the set of all non-aggregate fields in the return data. If the database backend supports grouping by the primary key, and the query would be equivalent, the optimization will be made automatically. """ # Column names from JOINs to check collisions with aliases. if allow_aliases: column_names = set() seen_models = set() for join in list(self.alias_map.values())[1:]: # Skip base table. model = join.join_field.related_model if model not in seen_models: column_names.update({ field.column for field in model._meta.local_concrete_fields }) seen_models.add(model) group_by = list(self.select) if self.annotation_select: for alias, annotation in self.annotation_select.items(): signature = inspect.signature(annotation.get_group_by_cols) if 'alias' not in signature.parameters: annotation_class = annotation.__class__ msg = ( '`alias=None` must be added to the signature of ' '%s.%s.get_group_by_cols().' ) % (annotation_class.__module__, annotation_class.__qualname__) warnings.warn(msg, category=RemovedInDjango40Warning) group_by_cols = annotation.get_group_by_cols() else: if not allow_aliases or alias in column_names: alias = None group_by_cols = annotation.get_group_by_cols(alias=alias) group_by.extend(group_by_cols) self.group_by = tuple(group_by) def add_select_related(self, fields): """ Set up the select_related data structure so that we only select certain related models (as opposed to all models, when self.select_related=True). """ if isinstance(self.select_related, bool): field_dict = {} else: field_dict = self.select_related for field in fields: d = field_dict for part in field.split(LOOKUP_SEP): d = d.setdefault(part, {}) self.select_related = field_dict def add_extra(self, select, select_params, where, params, tables, order_by): """ Add data to the various extra_* attributes for user-created additions to the query. """ if select: # We need to pair any placeholder markers in the 'select' # dictionary with their parameters in 'select_params' so that # subsequent updates to the select dictionary also adjust the # parameters appropriately. select_pairs = {} if select_params: param_iter = iter(select_params) else: param_iter = iter([]) for name, entry in select.items(): entry = str(entry) entry_params = [] pos = entry.find("%s") while pos != -1: if pos == 0 or entry[pos - 1] != '%': entry_params.append(next(param_iter)) pos = entry.find("%s", pos + 2) select_pairs[name] = (entry, entry_params) self.extra.update(select_pairs) if where or params: self.where.add(ExtraWhere(where, params), AND) if tables: self.extra_tables += tuple(tables) if order_by: self.extra_order_by = order_by def clear_deferred_loading(self): """Remove any fields from the deferred loading set.""" self.deferred_loading = (frozenset(), True) def add_deferred_loading(self, field_names): """ Add the given list of model field names to the set of fields to exclude from loading from the database when automatic column selection is done. Add the new field names to any existing field names that are deferred (or removed from any existing field names that are marked as the only ones for immediate loading). """ # Fields on related models are stored in the literal double-underscore # format, so that we can use a set datastructure. We do the foo__bar # splitting and handling when computing the SQL column names (as part of # get_columns()). existing, defer = self.deferred_loading if defer: # Add to existing deferred names. self.deferred_loading = existing.union(field_names), True else: # Remove names from the set of any existing "immediate load" names. self.deferred_loading = existing.difference(field_names), False def add_immediate_loading(self, field_names): """ Add the given list of model field names to the set of fields to retrieve when the SQL is executed ("immediate loading" fields). The field names replace any existing immediate loading field names. If there are field names already specified for deferred loading, remove those names from the new field_names before storing the new names for immediate loading. (That is, immediate loading overrides any existing immediate values, but respects existing deferrals.) """ existing, defer = self.deferred_loading field_names = set(field_names) if 'pk' in field_names: field_names.remove('pk') field_names.add(self.get_meta().pk.name) if defer: # Remove any existing deferred names from the current set before # setting the new names. self.deferred_loading = field_names.difference(existing), False else: # Replace any existing "immediate load" field names. self.deferred_loading = frozenset(field_names), False def get_loaded_field_names(self): """ If any fields are marked to be deferred, return a dictionary mapping models to a set of names in those fields that will be loaded. If a model is not in the returned dictionary, none of its fields are deferred. If no fields are marked for deferral, return an empty dictionary. """ # We cache this because we call this function multiple times # (compiler.fill_related_selections, query.iterator) try: return self._loaded_field_names_cache except AttributeError: collection = {} self.deferred_to_data(collection, self.get_loaded_field_names_cb) self._loaded_field_names_cache = collection return collection def get_loaded_field_names_cb(self, target, model, fields): """Callback used by get_deferred_field_names().""" target[model] = {f.attname for f in fields} def set_annotation_mask(self, names): """Set the mask of annotations that will be returned by the SELECT.""" if names is None: self.annotation_select_mask = None else: self.annotation_select_mask = set(names) self._annotation_select_cache = None def append_annotation_mask(self, names): if self.annotation_select_mask is not None: self.set_annotation_mask(self.annotation_select_mask.union(names)) def set_extra_mask(self, names): """ Set the mask of extra select items that will be returned by SELECT. Don't remove them from the Query since they might be used later. """ if names is None: self.extra_select_mask = None else: self.extra_select_mask = set(names) self._extra_select_cache = None def set_values(self, fields): self.select_related = False self.clear_deferred_loading() self.clear_select_fields() if fields: field_names = [] extra_names = [] annotation_names = [] if not self.extra and not self.annotations: # Shortcut - if there are no extra or annotations, then # the values() clause must be just field names. field_names = list(fields) else: self.default_cols = False for f in fields: if f in self.extra_select: extra_names.append(f) elif f in self.annotation_select: annotation_names.append(f) else: field_names.append(f) self.set_extra_mask(extra_names) self.set_annotation_mask(annotation_names) else: field_names = [f.attname for f in self.model._meta.concrete_fields] # Selected annotations must be known before setting the GROUP BY # clause. if self.group_by is True: self.add_fields((f.attname for f in self.model._meta.concrete_fields), False) # Disable GROUP BY aliases to avoid orphaning references to the # SELECT clause which is about to be cleared. self.set_group_by(allow_aliases=False) self.clear_select_fields() elif self.group_by: # Resolve GROUP BY annotation references if they are not part of # the selected fields anymore. group_by = [] for expr in self.group_by: if isinstance(expr, Ref) and expr.refs not in field_names: expr = self.annotations[expr.refs] group_by.append(expr) self.group_by = tuple(group_by) self.values_select = tuple(field_names) self.add_fields(field_names, True) @property def annotation_select(self): """ Return the dictionary of aggregate columns that are not masked and should be used in the SELECT clause. Cache this result for performance. """ if self._annotation_select_cache is not None: return self._annotation_select_cache elif not self.annotations: return {} elif self.annotation_select_mask is not None: self._annotation_select_cache = { k: v for k, v in self.annotations.items() if k in self.annotation_select_mask } return self._annotation_select_cache else: return self.annotations @property def extra_select(self): if self._extra_select_cache is not None: return self._extra_select_cache if not self.extra: return {} elif self.extra_select_mask is not None: self._extra_select_cache = { k: v for k, v in self.extra.items() if k in self.extra_select_mask } return self._extra_select_cache else: return self.extra def trim_start(self, names_with_path): """ Trim joins from the start of the join path. The candidates for trim are the PathInfos in names_with_path structure that are m2m joins. Also set the select column so the start matches the join. This method is meant to be used for generating the subquery joins & cols in split_exclude(). Return a lookup usable for doing outerq.filter(lookup=self) and a boolean indicating if the joins in the prefix contain a LEFT OUTER join. _""" all_paths = [] for _, paths in names_with_path: all_paths.extend(paths) contains_louter = False # Trim and operate only on tables that were generated for # the lookup part of the query. That is, avoid trimming # joins generated for F() expressions. lookup_tables = [ t for t in self.alias_map if t in self._lookup_joins or t == self.base_table ] for trimmed_paths, path in enumerate(all_paths): if path.m2m: break if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER: contains_louter = True alias = lookup_tables[trimmed_paths] self.unref_alias(alias) # The path.join_field is a Rel, lets get the other side's field join_field = path.join_field.field # Build the filter prefix. paths_in_prefix = trimmed_paths trimmed_prefix = [] for name, path in names_with_path: if paths_in_prefix - len(path) < 0: break trimmed_prefix.append(name) paths_in_prefix -= len(path) trimmed_prefix.append( join_field.foreign_related_fields[0].name) trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix) # Lets still see if we can trim the first join from the inner query # (that is, self). We can't do this for: # - LEFT JOINs because we would miss those rows that have nothing on # the outer side, # - INNER JOINs from filtered relations because we would miss their # filters. first_join = self.alias_map[lookup_tables[trimmed_paths + 1]] if first_join.join_type != LOUTER and not first_join.filtered_relation: select_fields = [r[0] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths + 1] self.unref_alias(lookup_tables[trimmed_paths]) extra_restriction = join_field.get_extra_restriction( self.where_class, None, lookup_tables[trimmed_paths + 1]) if extra_restriction: self.where.add(extra_restriction, AND) else: # TODO: It might be possible to trim more joins from the start of the # inner query if it happens to have a longer join chain containing the # values in select_fields. Lets punt this one for now. select_fields = [r[1] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths] # The found starting point is likely a Join instead of a BaseTable reference. # But the first entry in the query's FROM clause must not be a JOIN. for table in self.alias_map: if self.alias_refcount[table] > 0: self.alias_map[table] = BaseTable(self.alias_map[table].table_name, table) break self.set_select([f.get_col(select_alias) for f in select_fields]) return trimmed_prefix, contains_louter def is_nullable(self, field): """ Check if the given field should be treated as nullable. Some backends treat '' as null and Django treats such fields as nullable for those backends. In such situations field.null can be False even if we should treat the field as nullable. """ # We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have # (nor should it have) knowledge of which connection is going to be # used. The proper fix would be to defer all decisions where # is_nullable() is needed to the compiler stage, but that is not easy # to do currently. return ( connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and field.empty_strings_allowed ) or field.null def get_order_dir(field, default='ASC'): """ Return the field name and direction for an order specification. For example, '-foo' is returned as ('foo', 'DESC'). The 'default' param is used to indicate which way no prefix (or a '+' prefix) should sort. The '-' prefix always sorts the opposite way. """ dirn = ORDER_DIR[default] if field[0] == '-': return field[1:], dirn[1] return field, dirn[0] def add_to_dict(data, key, value): """ Add "value" to the set of values for "key", whether or not "key" already exists. """ if key in data: data[key].add(value) else: data[key] = {value} def is_reverse_o2o(field): """ Check if the given field is reverse-o2o. The field is expected to be some sort of relation field or related object. """ return field.is_relation and field.one_to_one and not field.concrete class JoinPromoter: """ A class to abstract away join promotion problems for complex filter conditions. """ def __init__(self, connector, num_children, negated): self.connector = connector self.negated = negated if self.negated: if connector == AND: self.effective_connector = OR else: self.effective_connector = AND else: self.effective_connector = self.connector self.num_children = num_children # Maps of table alias to how many times it is seen as required for # inner and/or outer joins. self.votes = Counter() def add_votes(self, votes): """ Add single vote per item to self.votes. Parameter can be any iterable. """ self.votes.update(votes) def update_join_types(self, query): """ Change join types so that the generated query is as efficient as possible, but still correct. So, change as many joins as possible to INNER, but don't make OUTER joins INNER if that could remove results from the query. """ to_promote = set() to_demote = set() # The effective_connector is used so that NOT (a AND b) is treated # similarly to (a OR b) for join promotion. for table, votes in self.votes.items(): # We must use outer joins in OR case when the join isn't contained # in all of the joins. Otherwise the INNER JOIN itself could remove # valid results. Consider the case where a model with rel_a and # rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now, # if rel_a join doesn't produce any results is null (for example # reverse foreign key or null value in direct foreign key), and # there is a matching row in rel_b with col=2, then an INNER join # to rel_a would remove a valid match from the query. So, we need # to promote any existing INNER to LOUTER (it is possible this # promotion in turn will be demoted later on). if self.effective_connector == 'OR' and votes < self.num_children: to_promote.add(table) # If connector is AND and there is a filter that can match only # when there is a joinable row, then use INNER. For example, in # rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL # as join output, then the col=1 or col=2 can't match (as # NULL=anything is always false). # For the OR case, if all children voted for a join to be inner, # then we can use INNER for the join. For example: # (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell) # then if rel_a doesn't produce any rows, the whole condition # can't match. Hence we can safely use INNER join. if self.effective_connector == 'AND' or ( self.effective_connector == 'OR' and votes == self.num_children): to_demote.add(table) # Finally, what happens in cases where we have: # (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0 # Now, we first generate the OR clause, and promote joins for it # in the first if branch above. Both rel_a and rel_b are promoted # to LOUTER joins. After that we do the AND case. The OR case # voted no inner joins but the rel_a__col__gte=0 votes inner join # for rel_a. We demote it back to INNER join (in AND case a single # vote is enough). The demotion is OK, if rel_a doesn't produce # rows, then the rel_a__col__gte=0 clause can't be true, and thus # the whole clause must be false. So, it is safe to use INNER # join. # Note that in this example we could just as well have the __gte # clause and the OR clause swapped. Or we could replace the __gte # clause with an OR clause containing rel_a__col=1|rel_a__col=2, # and again we could safely demote to INNER. query.promote_joins(to_promote) query.demote_joins(to_demote) return to_demote
09415ccaad7a3fe9802099b13ded15c6bfa77f6671b6a8d8d08e6777c8ecdc2e
from django.db import ProgrammingError from django.utils.functional import cached_property class BaseDatabaseFeatures: gis_enabled = False allows_group_by_pk = False allows_group_by_selected_pks = False empty_fetchmany_value = [] update_can_self_select = True # Does the backend distinguish between '' and None? interprets_empty_strings_as_nulls = False # Does the backend allow inserting duplicate NULL rows in a nullable # unique field? All core backends implement this correctly, but other # databases such as SQL Server do not. supports_nullable_unique_constraints = True # Does the backend allow inserting duplicate rows when a unique_together # constraint exists and some fields are nullable but not all of them? supports_partially_nullable_unique_constraints = True # Does the backend support initially deferrable unique constraints? supports_deferrable_unique_constraints = False can_use_chunked_reads = True can_return_columns_from_insert = False can_return_rows_from_bulk_insert = False has_bulk_insert = True uses_savepoints = True can_release_savepoints = False # If True, don't use integer foreign keys referring to, e.g., positive # integer primary keys. related_fields_match_type = False allow_sliced_subqueries_with_in = True has_select_for_update = False has_select_for_update_nowait = False has_select_for_update_skip_locked = False has_select_for_update_of = False has_select_for_no_key_update = False # Does the database's SELECT FOR UPDATE OF syntax require a column rather # than a table? select_for_update_of_column = False # Does the default test database allow multiple connections? # Usually an indication that the test database is in-memory test_db_allows_multiple_connections = True # Can an object be saved without an explicit primary key? supports_unspecified_pk = False # Can a fixture contain forward references? i.e., are # FK constraints checked at the end of transaction, or # at the end of each save operation? supports_forward_references = True # Does the backend truncate names properly when they are too long? truncates_names = False # Is there a REAL datatype in addition to floats/doubles? has_real_datatype = False supports_subqueries_in_group_by = True # Is there a true datatype for uuid? has_native_uuid_field = False # Is there a true datatype for timedeltas? has_native_duration_field = False # Does the database driver supports same type temporal data subtraction # by returning the type used to store duration field? supports_temporal_subtraction = False # Does the __regex lookup support backreferencing and grouping? supports_regex_backreferencing = True # Can date/datetime lookups be performed using a string? supports_date_lookup_using_string = True # Can datetimes with timezones be used? supports_timezones = True # Does the database have a copy of the zoneinfo database? has_zoneinfo_database = True # When performing a GROUP BY, is an ORDER BY NULL required # to remove any ordering? requires_explicit_null_ordering_when_grouping = False # Does the backend order NULL values as largest or smallest? nulls_order_largest = False # Does the backend support NULLS FIRST and NULLS LAST in ORDER BY? supports_order_by_nulls_modifier = True # Does the backend orders NULLS FIRST by default? order_by_nulls_first = False # The database's limit on the number of query parameters. max_query_params = None # Can an object have an autoincrement primary key of 0? MySQL says No. allows_auto_pk_0 = True # Do we need to NULL a ForeignKey out, or can the constraint check be # deferred can_defer_constraint_checks = False # date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas supports_mixed_date_datetime_comparisons = True # Does the backend support tablespaces? Default to False because it isn't # in the SQL standard. supports_tablespaces = False # Does the backend reset sequences between tests? supports_sequence_reset = True # Can the backend introspect the default value of a column? can_introspect_default = True # Confirm support for introspected foreign keys # Every database can do this reliably, except MySQL, # which can't do it for MyISAM tables can_introspect_foreign_keys = True # Map fields which some backends may not be able to differentiate to the # field it's introspected as. introspected_field_types = { 'AutoField': 'AutoField', 'BigAutoField': 'BigAutoField', 'BigIntegerField': 'BigIntegerField', 'BinaryField': 'BinaryField', 'BooleanField': 'BooleanField', 'CharField': 'CharField', 'DurationField': 'DurationField', 'GenericIPAddressField': 'GenericIPAddressField', 'IntegerField': 'IntegerField', 'PositiveBigIntegerField': 'PositiveBigIntegerField', 'PositiveIntegerField': 'PositiveIntegerField', 'PositiveSmallIntegerField': 'PositiveSmallIntegerField', 'SmallAutoField': 'SmallAutoField', 'SmallIntegerField': 'SmallIntegerField', 'TimeField': 'TimeField', } # Can the backend introspect the column order (ASC/DESC) for indexes? supports_index_column_ordering = True # Does the backend support introspection of materialized views? can_introspect_materialized_views = False # Support for the DISTINCT ON clause can_distinct_on_fields = False # Does the backend prevent running SQL queries in broken transactions? atomic_transactions = True # Can we roll back DDL in a transaction? can_rollback_ddl = False # Does it support operations requiring references rename in a transaction? supports_atomic_references_rename = True # Can we issue more than one ALTER COLUMN clause in an ALTER TABLE? supports_combined_alters = False # Does it support foreign keys? supports_foreign_keys = True # Can it create foreign key constraints inline when adding columns? can_create_inline_fk = True # Does it support CHECK constraints? supports_column_check_constraints = True supports_table_check_constraints = True # Does the backend support introspection of CHECK constraints? can_introspect_check_constraints = True # Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value}) # parameter passing? Note this can be provided by the backend even if not # supported by the Python driver supports_paramstyle_pyformat = True # Does the backend require literal defaults, rather than parameterized ones? requires_literal_defaults = False # Does the backend require a connection reset after each material schema change? connection_persists_old_columns = False # What kind of error does the backend throw when accessing closed cursor? closed_cursor_error_class = ProgrammingError # Does 'a' LIKE 'A' match? has_case_insensitive_like = True # Suffix for backends that don't support "SELECT xxx;" queries. bare_select_suffix = '' # If NULL is implied on columns without needing to be explicitly specified implied_column_null = False # Does the backend support "select for update" queries with limit (and offset)? supports_select_for_update_with_limit = True # Does the backend ignore null expressions in GREATEST and LEAST queries unless # every expression is null? greatest_least_ignores_nulls = False # Can the backend clone databases for parallel test execution? # Defaults to False to allow third-party backends to opt-in. can_clone_databases = False # Does the backend consider table names with different casing to # be equal? ignores_table_name_case = False # Place FOR UPDATE right after FROM clause. Used on MSSQL. for_update_after_from = False # Combinatorial flags supports_select_union = True supports_select_intersection = True supports_select_difference = True supports_slicing_ordering_in_compound = False supports_parentheses_in_compound = True # Does the database support SQL 2003 FILTER (WHERE ...) in aggregate # expressions? supports_aggregate_filter_clause = False # Does the backend support indexing a TextField? supports_index_on_text_field = True # Does the backend support window expressions (expression OVER (...))? supports_over_clause = False supports_frame_range_fixed_distance = False only_supports_unbounded_with_preceding_and_following = False # Does the backend support CAST with precision? supports_cast_with_precision = True # How many second decimals does the database return when casting a value to # a type with time? time_cast_precision = 6 # SQL to create a procedure for use by the Django test suite. The # functionality of the procedure isn't important. create_test_procedure_without_params_sql = None create_test_procedure_with_int_param_sql = None # Does the backend support keyword parameters for cursor.callproc()? supports_callproc_kwargs = False # What formats does the backend EXPLAIN syntax support? supported_explain_formats = set() # Does DatabaseOperations.explain_query_prefix() raise ValueError if # unknown kwargs are passed to QuerySet.explain()? validates_explain_options = True # Does the backend support the default parameter in lead() and lag()? supports_default_in_lead_lag = True # Does the backend support ignoring constraint or uniqueness errors during # INSERT? supports_ignore_conflicts = True # Does this backend require casting the results of CASE expressions used # in UPDATE statements to ensure the expression has the correct type? requires_casted_case_in_updates = False # Does the backend support partial indexes (CREATE INDEX ... WHERE ...)? supports_partial_indexes = True supports_functions_in_partial_indexes = True # Does the backend support covering indexes (CREATE INDEX ... INCLUDE ...)? supports_covering_indexes = False # Does the database allow more than one constraint or index on the same # field(s)? allows_multiple_constraints_on_same_fields = True # Does the backend support boolean expressions in SELECT and GROUP BY # clauses? supports_boolean_expr_in_select_clause = True # Does the backend support JSONField? supports_json_field = True # Can the backend introspect a JSONField? can_introspect_json_field = True # Does the backend support primitives in JSONField? supports_primitives_in_json_field = True # Is there a true datatype for JSON? has_native_json_field = False # Does the backend use PostgreSQL-style JSON operators like '->'? has_json_operators = False def __init__(self, connection): self.connection = connection @cached_property def supports_explaining_query_execution(self): """Does this backend support explaining query execution?""" return self.connection.ops.explain_prefix is not None @cached_property def supports_transactions(self): """Confirm support for transactions.""" with self.connection.cursor() as cursor: cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)') self.connection.set_autocommit(False) cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)') self.connection.rollback() self.connection.set_autocommit(True) cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST') count, = cursor.fetchone() cursor.execute('DROP TABLE ROLLBACK_TEST') return count == 0 def allows_group_by_selected_pks_on_model(self, model): if not self.allows_group_by_selected_pks: return False return model._meta.managed
bad75b955d168e0153bf3380d45208d75d7b630dac603ba318c55b74d4ad0a19
import operator from django.db.backends.base.features import BaseDatabaseFeatures from django.utils.functional import cached_property class DatabaseFeatures(BaseDatabaseFeatures): empty_fetchmany_value = () allows_group_by_pk = True related_fields_match_type = True # MySQL doesn't support sliced subqueries with IN/ALL/ANY/SOME. allow_sliced_subqueries_with_in = False has_select_for_update = True supports_forward_references = False supports_regex_backreferencing = False supports_date_lookup_using_string = False supports_index_column_ordering = False supports_timezones = False requires_explicit_null_ordering_when_grouping = True allows_auto_pk_0 = False can_release_savepoints = True atomic_transactions = False can_clone_databases = True supports_temporal_subtraction = True supports_select_intersection = False supports_select_difference = False supports_slicing_ordering_in_compound = True supports_index_on_text_field = False has_case_insensitive_like = False create_test_procedure_without_params_sql = """ CREATE PROCEDURE test_procedure () BEGIN DECLARE V_I INTEGER; SET V_I = 1; END; """ create_test_procedure_with_int_param_sql = """ CREATE PROCEDURE test_procedure (P_I INTEGER) BEGIN DECLARE V_I INTEGER; SET V_I = P_I; END; """ # Neither MySQL nor MariaDB support partial indexes. supports_partial_indexes = False supports_order_by_nulls_modifier = False order_by_nulls_first = True @cached_property def _mysql_storage_engine(self): "Internal method used in Django tests. Don't rely on this from your code" return self.connection.mysql_server_data['default_storage_engine'] @cached_property def update_can_self_select(self): return self.connection.mysql_is_mariadb and self.connection.mysql_version >= (10, 3, 2) @cached_property def can_introspect_foreign_keys(self): "Confirm support for introspected foreign keys" return self._mysql_storage_engine != 'MyISAM' @cached_property def introspected_field_types(self): return { **super().introspected_field_types, 'BinaryField': 'TextField', 'BooleanField': 'IntegerField', 'DurationField': 'BigIntegerField', 'GenericIPAddressField': 'CharField', } @cached_property def can_return_columns_from_insert(self): return self.connection.mysql_is_mariadb and self.connection.mysql_version >= (10, 5, 0) can_return_rows_from_bulk_insert = property(operator.attrgetter('can_return_columns_from_insert')) @cached_property def has_zoneinfo_database(self): return self.connection.mysql_server_data['has_zoneinfo_database'] @cached_property def is_sql_auto_is_null_enabled(self): return self.connection.mysql_server_data['sql_auto_is_null'] @cached_property def supports_over_clause(self): if self.connection.mysql_is_mariadb: return True return self.connection.mysql_version >= (8, 0, 2) supports_frame_range_fixed_distance = property(operator.attrgetter('supports_over_clause')) @cached_property def supports_column_check_constraints(self): if self.connection.mysql_is_mariadb: return self.connection.mysql_version >= (10, 2, 1) return self.connection.mysql_version >= (8, 0, 16) supports_table_check_constraints = property(operator.attrgetter('supports_column_check_constraints')) @cached_property def can_introspect_check_constraints(self): if self.connection.mysql_is_mariadb: version = self.connection.mysql_version return (version >= (10, 2, 22) and version < (10, 3)) or version >= (10, 3, 10) return self.connection.mysql_version >= (8, 0, 16) @cached_property def has_select_for_update_skip_locked(self): return not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (8, 0, 1) @cached_property def has_select_for_update_nowait(self): if self.connection.mysql_is_mariadb: return self.connection.mysql_version >= (10, 3, 0) return self.connection.mysql_version >= (8, 0, 1) @cached_property def supports_explain_analyze(self): return self.connection.mysql_is_mariadb or self.connection.mysql_version >= (8, 0, 18) @cached_property def supported_explain_formats(self): # Alias MySQL's TRADITIONAL to TEXT for consistency with other # backends. formats = {'JSON', 'TEXT', 'TRADITIONAL'} if not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (8, 0, 16): formats.add('TREE') return formats @cached_property def supports_transactions(self): """ All storage engines except MyISAM support transactions. """ return self._mysql_storage_engine != 'MyISAM' @cached_property def ignores_table_name_case(self): return self.connection.mysql_server_data['lower_case_table_names'] @cached_property def supports_default_in_lead_lag(self): # To be added in https://jira.mariadb.org/browse/MDEV-12981. return not self.connection.mysql_is_mariadb @cached_property def supports_json_field(self): if self.connection.mysql_is_mariadb: return self.connection.mysql_version >= (10, 2, 7) return self.connection.mysql_version >= (5, 7, 8) @cached_property def can_introspect_json_field(self): if self.connection.mysql_is_mariadb: return self.supports_json_field and self.can_introspect_check_constraints return self.supports_json_field
0f3c41d5c603a2a883f5f534b89294298bb224462850ce4345884321f36f7a7b
""" SQLite backend for the sqlite3 module in the standard library. """ import datetime import decimal import functools import hashlib import json import math import operator import re import statistics import warnings from itertools import chain from sqlite3 import dbapi2 as Database import pytz from django.core.exceptions import ImproperlyConfigured from django.db import IntegrityError from django.db.backends import utils as backend_utils from django.db.backends.base.base import BaseDatabaseWrapper from django.utils import timezone from django.utils.asyncio import async_unsafe from django.utils.dateparse import parse_datetime, parse_time from django.utils.duration import duration_microseconds from django.utils.regex_helper import _lazy_re_compile from django.utils.version import PY38 from .client import DatabaseClient # isort:skip from .creation import DatabaseCreation # isort:skip from .features import DatabaseFeatures # isort:skip from .introspection import DatabaseIntrospection # isort:skip from .operations import DatabaseOperations # isort:skip from .schema import DatabaseSchemaEditor # isort:skip def decoder(conv_func): """ Convert bytestrings from Python's sqlite3 interface to a regular string. """ return lambda s: conv_func(s.decode()) def none_guard(func): """ Decorator that returns None if any of the arguments to the decorated function are None. Many SQL functions return NULL if any of their arguments are NULL. This decorator simplifies the implementation of this for the custom functions registered below. """ @functools.wraps(func) def wrapper(*args, **kwargs): return None if None in args else func(*args, **kwargs) return wrapper def list_aggregate(function): """ Return an aggregate class that accumulates values in a list and applies the provided function to the data. """ return type('ListAggregate', (list,), {'finalize': function, 'step': list.append}) def check_sqlite_version(): if Database.sqlite_version_info < (3, 8, 3): raise ImproperlyConfigured('SQLite 3.8.3 or later is required (found %s).' % Database.sqlite_version) check_sqlite_version() Database.register_converter("bool", b'1'.__eq__) Database.register_converter("time", decoder(parse_time)) Database.register_converter("datetime", decoder(parse_datetime)) Database.register_converter("timestamp", decoder(parse_datetime)) Database.register_adapter(decimal.Decimal, str) class DatabaseWrapper(BaseDatabaseWrapper): vendor = 'sqlite' display_name = 'SQLite' # SQLite doesn't actually support most of these types, but it "does the right # thing" given more verbose field definitions, so leave them as is so that # schema inspection is more useful. data_types = { 'AutoField': 'integer', 'BigAutoField': 'integer', 'BinaryField': 'BLOB', 'BooleanField': 'bool', 'CharField': 'varchar(%(max_length)s)', 'DateField': 'date', 'DateTimeField': 'datetime', 'DecimalField': 'decimal', 'DurationField': 'bigint', 'FileField': 'varchar(%(max_length)s)', 'FilePathField': 'varchar(%(max_length)s)', 'FloatField': 'real', 'IntegerField': 'integer', 'BigIntegerField': 'bigint', 'IPAddressField': 'char(15)', 'GenericIPAddressField': 'char(39)', 'JSONField': 'text', 'NullBooleanField': 'bool', 'OneToOneField': 'integer', 'PositiveBigIntegerField': 'bigint unsigned', 'PositiveIntegerField': 'integer unsigned', 'PositiveSmallIntegerField': 'smallint unsigned', 'SlugField': 'varchar(%(max_length)s)', 'SmallAutoField': 'integer', 'SmallIntegerField': 'smallint', 'TextField': 'text', 'TimeField': 'time', 'UUIDField': 'char(32)', } data_type_check_constraints = { 'PositiveBigIntegerField': '"%(column)s" >= 0', 'JSONField': '(JSON_VALID("%(column)s") OR "%(column)s" IS NULL)', 'PositiveIntegerField': '"%(column)s" >= 0', 'PositiveSmallIntegerField': '"%(column)s" >= 0', } data_types_suffix = { 'AutoField': 'AUTOINCREMENT', 'BigAutoField': 'AUTOINCREMENT', 'SmallAutoField': 'AUTOINCREMENT', } # SQLite requires LIKE statements to include an ESCAPE clause if the value # being escaped has a percent or underscore in it. # See https://www.sqlite.org/lang_expr.html for an explanation. operators = { 'exact': '= %s', 'iexact': "LIKE %s ESCAPE '\\'", 'contains': "LIKE %s ESCAPE '\\'", 'icontains': "LIKE %s ESCAPE '\\'", 'regex': 'REGEXP %s', 'iregex': "REGEXP '(?i)' || %s", 'gt': '> %s', 'gte': '>= %s', 'lt': '< %s', 'lte': '<= %s', 'startswith': "LIKE %s ESCAPE '\\'", 'endswith': "LIKE %s ESCAPE '\\'", 'istartswith': "LIKE %s ESCAPE '\\'", 'iendswith': "LIKE %s ESCAPE '\\'", } # The patterns below are used to generate SQL pattern lookup clauses when # the right-hand side of the lookup isn't a raw string (it might be an expression # or the result of a bilateral transformation). # In those cases, special characters for LIKE operators (e.g. \, *, _) should be # escaped on database side. # # Note: we use str.format() here for readability as '%' is used as a wildcard for # the LIKE operator. pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')" pattern_ops = { 'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'", 'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'", 'startswith': r"LIKE {} || '%%' ESCAPE '\'", 'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'", 'endswith': r"LIKE '%%' || {} ESCAPE '\'", 'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'", } Database = Database SchemaEditorClass = DatabaseSchemaEditor # Classes instantiated in __init__(). client_class = DatabaseClient creation_class = DatabaseCreation features_class = DatabaseFeatures introspection_class = DatabaseIntrospection ops_class = DatabaseOperations def get_connection_params(self): settings_dict = self.settings_dict if not settings_dict['NAME']: raise ImproperlyConfigured( "settings.DATABASES is improperly configured. " "Please supply the NAME value.") kwargs = { # TODO: Remove str() when dropping support for PY36. # https://bugs.python.org/issue33496 'database': str(settings_dict['NAME']), 'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES, **settings_dict['OPTIONS'], } # Always allow the underlying SQLite connection to be shareable # between multiple threads. The safe-guarding will be handled at a # higher level by the `BaseDatabaseWrapper.allow_thread_sharing` # property. This is necessary as the shareability is disabled by # default in pysqlite and it cannot be changed once a connection is # opened. if 'check_same_thread' in kwargs and kwargs['check_same_thread']: warnings.warn( 'The `check_same_thread` option was provided and set to ' 'True. It will be overridden with False. Use the ' '`DatabaseWrapper.allow_thread_sharing` property instead ' 'for controlling thread shareability.', RuntimeWarning ) kwargs.update({'check_same_thread': False, 'uri': True}) return kwargs @async_unsafe def get_new_connection(self, conn_params): conn = Database.connect(**conn_params) if PY38: create_deterministic_function = functools.partial( conn.create_function, deterministic=True, ) else: create_deterministic_function = conn.create_function create_deterministic_function('django_date_extract', 2, _sqlite_datetime_extract) create_deterministic_function('django_date_trunc', 2, _sqlite_date_trunc) create_deterministic_function('django_datetime_cast_date', 3, _sqlite_datetime_cast_date) create_deterministic_function('django_datetime_cast_time', 3, _sqlite_datetime_cast_time) create_deterministic_function('django_datetime_extract', 4, _sqlite_datetime_extract) create_deterministic_function('django_datetime_trunc', 4, _sqlite_datetime_trunc) create_deterministic_function('django_time_extract', 2, _sqlite_time_extract) create_deterministic_function('django_time_trunc', 2, _sqlite_time_trunc) create_deterministic_function('django_time_diff', 2, _sqlite_time_diff) create_deterministic_function('django_timestamp_diff', 2, _sqlite_timestamp_diff) create_deterministic_function('django_format_dtdelta', 3, _sqlite_format_dtdelta) create_deterministic_function('regexp', 2, _sqlite_regexp) create_deterministic_function('ACOS', 1, none_guard(math.acos)) create_deterministic_function('ASIN', 1, none_guard(math.asin)) create_deterministic_function('ATAN', 1, none_guard(math.atan)) create_deterministic_function('ATAN2', 2, none_guard(math.atan2)) create_deterministic_function('BITXOR', 2, none_guard(operator.xor)) create_deterministic_function('CEILING', 1, none_guard(math.ceil)) create_deterministic_function('COS', 1, none_guard(math.cos)) create_deterministic_function('COT', 1, none_guard(lambda x: 1 / math.tan(x))) create_deterministic_function('DEGREES', 1, none_guard(math.degrees)) create_deterministic_function('EXP', 1, none_guard(math.exp)) create_deterministic_function('FLOOR', 1, none_guard(math.floor)) create_deterministic_function('JSON_CONTAINS', 2, _sqlite_json_contains) create_deterministic_function('LN', 1, none_guard(math.log)) create_deterministic_function('LOG', 2, none_guard(lambda x, y: math.log(y, x))) create_deterministic_function('LPAD', 3, _sqlite_lpad) create_deterministic_function('MD5', 1, none_guard(lambda x: hashlib.md5(x.encode()).hexdigest())) create_deterministic_function('MOD', 2, none_guard(math.fmod)) create_deterministic_function('PI', 0, lambda: math.pi) create_deterministic_function('POWER', 2, none_guard(operator.pow)) create_deterministic_function('RADIANS', 1, none_guard(math.radians)) create_deterministic_function('REPEAT', 2, none_guard(operator.mul)) create_deterministic_function('REVERSE', 1, none_guard(lambda x: x[::-1])) create_deterministic_function('RPAD', 3, _sqlite_rpad) create_deterministic_function('SHA1', 1, none_guard(lambda x: hashlib.sha1(x.encode()).hexdigest())) create_deterministic_function('SHA224', 1, none_guard(lambda x: hashlib.sha224(x.encode()).hexdigest())) create_deterministic_function('SHA256', 1, none_guard(lambda x: hashlib.sha256(x.encode()).hexdigest())) create_deterministic_function('SHA384', 1, none_guard(lambda x: hashlib.sha384(x.encode()).hexdigest())) create_deterministic_function('SHA512', 1, none_guard(lambda x: hashlib.sha512(x.encode()).hexdigest())) create_deterministic_function('SIGN', 1, none_guard(lambda x: (x > 0) - (x < 0))) create_deterministic_function('SIN', 1, none_guard(math.sin)) create_deterministic_function('SQRT', 1, none_guard(math.sqrt)) create_deterministic_function('TAN', 1, none_guard(math.tan)) conn.create_aggregate('STDDEV_POP', 1, list_aggregate(statistics.pstdev)) conn.create_aggregate('STDDEV_SAMP', 1, list_aggregate(statistics.stdev)) conn.create_aggregate('VAR_POP', 1, list_aggregate(statistics.pvariance)) conn.create_aggregate('VAR_SAMP', 1, list_aggregate(statistics.variance)) conn.execute('PRAGMA foreign_keys = ON') return conn def init_connection_state(self): pass def create_cursor(self, name=None): return self.connection.cursor(factory=SQLiteCursorWrapper) @async_unsafe def close(self): self.validate_thread_sharing() # If database is in memory, closing the connection destroys the # database. To prevent accidental data loss, ignore close requests on # an in-memory db. if not self.is_in_memory_db(): BaseDatabaseWrapper.close(self) def _savepoint_allowed(self): # When 'isolation_level' is not None, sqlite3 commits before each # savepoint; it's a bug. When it is None, savepoints don't make sense # because autocommit is enabled. The only exception is inside 'atomic' # blocks. To work around that bug, on SQLite, 'atomic' starts a # transaction explicitly rather than simply disable autocommit. return self.in_atomic_block def _set_autocommit(self, autocommit): if autocommit: level = None else: # sqlite3's internal default is ''. It's different from None. # See Modules/_sqlite/connection.c. level = '' # 'isolation_level' is a misleading API. # SQLite always runs at the SERIALIZABLE isolation level. with self.wrap_database_errors: self.connection.isolation_level = level def disable_constraint_checking(self): with self.cursor() as cursor: cursor.execute('PRAGMA foreign_keys = OFF') # Foreign key constraints cannot be turned off while in a multi- # statement transaction. Fetch the current state of the pragma # to determine if constraints are effectively disabled. enabled = cursor.execute('PRAGMA foreign_keys').fetchone()[0] return not bool(enabled) def enable_constraint_checking(self): with self.cursor() as cursor: cursor.execute('PRAGMA foreign_keys = ON') def check_constraints(self, table_names=None): """ Check each table name in `table_names` for rows with invalid foreign key references. This method is intended to be used in conjunction with `disable_constraint_checking()` and `enable_constraint_checking()`, to determine if rows with invalid references were entered while constraint checks were off. """ if self.features.supports_pragma_foreign_key_check: with self.cursor() as cursor: if table_names is None: violations = cursor.execute('PRAGMA foreign_key_check').fetchall() else: violations = chain.from_iterable( cursor.execute('PRAGMA foreign_key_check(%s)' % table_name).fetchall() for table_name in table_names ) # See https://www.sqlite.org/pragma.html#pragma_foreign_key_check for table_name, rowid, referenced_table_name, foreign_key_index in violations: foreign_key = cursor.execute( 'PRAGMA foreign_key_list(%s)' % table_name ).fetchall()[foreign_key_index] column_name, referenced_column_name = foreign_key[3:5] primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name) primary_key_value, bad_value = cursor.execute( 'SELECT %s, %s FROM %s WHERE rowid = %%s' % ( primary_key_column_name, column_name, table_name ), (rowid,), ).fetchone() raise IntegrityError( "The row in table '%s' with primary key '%s' has an " "invalid foreign key: %s.%s contains a value '%s' that " "does not have a corresponding value in %s.%s." % ( table_name, primary_key_value, table_name, column_name, bad_value, referenced_table_name, referenced_column_name ) ) else: with self.cursor() as cursor: if table_names is None: table_names = self.introspection.table_names(cursor) for table_name in table_names: primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name) if not primary_key_column_name: continue key_columns = self.introspection.get_key_columns(cursor, table_name) for column_name, referenced_table_name, referenced_column_name in key_columns: cursor.execute( """ SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING LEFT JOIN `%s` as REFERRED ON (REFERRING.`%s` = REFERRED.`%s`) WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL """ % ( primary_key_column_name, column_name, table_name, referenced_table_name, column_name, referenced_column_name, column_name, referenced_column_name, ) ) for bad_row in cursor.fetchall(): raise IntegrityError( "The row in table '%s' with primary key '%s' has an " "invalid foreign key: %s.%s contains a value '%s' that " "does not have a corresponding value in %s.%s." % ( table_name, bad_row[0], table_name, column_name, bad_row[1], referenced_table_name, referenced_column_name, ) ) def is_usable(self): return True def _start_transaction_under_autocommit(self): """ Start a transaction explicitly in autocommit mode. Staying in autocommit mode works around a bug of sqlite3 that breaks savepoints when autocommit is disabled. """ self.cursor().execute("BEGIN") def is_in_memory_db(self): return self.creation.is_in_memory_db(self.settings_dict['NAME']) FORMAT_QMARK_REGEX = _lazy_re_compile(r'(?<!%)%s') class SQLiteCursorWrapper(Database.Cursor): """ Django uses "format" style placeholders, but pysqlite2 uses "qmark" style. This fixes it -- but note that if you want to use a literal "%s" in a query, you'll need to use "%%s". """ def execute(self, query, params=None): if params is None: return Database.Cursor.execute(self, query) query = self.convert_query(query) return Database.Cursor.execute(self, query, params) def executemany(self, query, param_list): query = self.convert_query(query) return Database.Cursor.executemany(self, query, param_list) def convert_query(self, query): return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%') def _sqlite_datetime_parse(dt, tzname=None, conn_tzname=None): if dt is None: return None try: dt = backend_utils.typecast_timestamp(dt) except (TypeError, ValueError): return None if conn_tzname: dt = dt.replace(tzinfo=pytz.timezone(conn_tzname)) if tzname is not None and tzname != conn_tzname: sign_index = tzname.find('+') + tzname.find('-') + 1 if sign_index > -1: sign = tzname[sign_index] tzname, offset = tzname.split(sign) if offset: hours, minutes = offset.split(':') offset_delta = datetime.timedelta(hours=int(hours), minutes=int(minutes)) dt += offset_delta if sign == '+' else -offset_delta dt = timezone.localtime(dt, pytz.timezone(tzname)) return dt def _sqlite_date_trunc(lookup_type, dt): dt = _sqlite_datetime_parse(dt) if dt is None: return None if lookup_type == 'year': return "%i-01-01" % dt.year elif lookup_type == 'quarter': month_in_quarter = dt.month - (dt.month - 1) % 3 return '%i-%02i-01' % (dt.year, month_in_quarter) elif lookup_type == 'month': return "%i-%02i-01" % (dt.year, dt.month) elif lookup_type == 'week': dt = dt - datetime.timedelta(days=dt.weekday()) return "%i-%02i-%02i" % (dt.year, dt.month, dt.day) elif lookup_type == 'day': return "%i-%02i-%02i" % (dt.year, dt.month, dt.day) def _sqlite_time_trunc(lookup_type, dt): if dt is None: return None try: dt = backend_utils.typecast_time(dt) except (ValueError, TypeError): return None if lookup_type == 'hour': return "%02i:00:00" % dt.hour elif lookup_type == 'minute': return "%02i:%02i:00" % (dt.hour, dt.minute) elif lookup_type == 'second': return "%02i:%02i:%02i" % (dt.hour, dt.minute, dt.second) def _sqlite_datetime_cast_date(dt, tzname, conn_tzname): dt = _sqlite_datetime_parse(dt, tzname, conn_tzname) if dt is None: return None return dt.date().isoformat() def _sqlite_datetime_cast_time(dt, tzname, conn_tzname): dt = _sqlite_datetime_parse(dt, tzname, conn_tzname) if dt is None: return None return dt.time().isoformat() def _sqlite_datetime_extract(lookup_type, dt, tzname=None, conn_tzname=None): dt = _sqlite_datetime_parse(dt, tzname, conn_tzname) if dt is None: return None if lookup_type == 'week_day': return (dt.isoweekday() % 7) + 1 elif lookup_type == 'iso_week_day': return dt.isoweekday() elif lookup_type == 'week': return dt.isocalendar()[1] elif lookup_type == 'quarter': return math.ceil(dt.month / 3) elif lookup_type == 'iso_year': return dt.isocalendar()[0] else: return getattr(dt, lookup_type) def _sqlite_datetime_trunc(lookup_type, dt, tzname, conn_tzname): dt = _sqlite_datetime_parse(dt, tzname, conn_tzname) if dt is None: return None if lookup_type == 'year': return "%i-01-01 00:00:00" % dt.year elif lookup_type == 'quarter': month_in_quarter = dt.month - (dt.month - 1) % 3 return '%i-%02i-01 00:00:00' % (dt.year, month_in_quarter) elif lookup_type == 'month': return "%i-%02i-01 00:00:00" % (dt.year, dt.month) elif lookup_type == 'week': dt = dt - datetime.timedelta(days=dt.weekday()) return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day) elif lookup_type == 'day': return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day) elif lookup_type == 'hour': return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour) elif lookup_type == 'minute': return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute) elif lookup_type == 'second': return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) def _sqlite_time_extract(lookup_type, dt): if dt is None: return None try: dt = backend_utils.typecast_time(dt) except (ValueError, TypeError): return None return getattr(dt, lookup_type) @none_guard def _sqlite_format_dtdelta(conn, lhs, rhs): """ LHS and RHS can be either: - An integer number of microseconds - A string representing a datetime """ try: real_lhs = datetime.timedelta(0, 0, lhs) if isinstance(lhs, int) else backend_utils.typecast_timestamp(lhs) real_rhs = datetime.timedelta(0, 0, rhs) if isinstance(rhs, int) else backend_utils.typecast_timestamp(rhs) if conn.strip() == '+': out = real_lhs + real_rhs else: out = real_lhs - real_rhs except (ValueError, TypeError): return None # typecast_timestamp returns a date or a datetime without timezone. # It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]" return str(out) @none_guard def _sqlite_time_diff(lhs, rhs): left = backend_utils.typecast_time(lhs) right = backend_utils.typecast_time(rhs) return ( (left.hour * 60 * 60 * 1000000) + (left.minute * 60 * 1000000) + (left.second * 1000000) + (left.microsecond) - (right.hour * 60 * 60 * 1000000) - (right.minute * 60 * 1000000) - (right.second * 1000000) - (right.microsecond) ) @none_guard def _sqlite_timestamp_diff(lhs, rhs): left = backend_utils.typecast_timestamp(lhs) right = backend_utils.typecast_timestamp(rhs) return duration_microseconds(left - right) @none_guard def _sqlite_regexp(re_pattern, re_string): return bool(re.search(re_pattern, str(re_string))) @none_guard def _sqlite_lpad(text, length, fill_text): if len(text) >= length: return text[:length] return (fill_text * length)[:length - len(text)] + text @none_guard def _sqlite_rpad(text, length, fill_text): return (text + fill_text * length)[:length] @none_guard def _sqlite_json_contains(haystack, needle): target, candidate = json.loads(haystack), json.loads(needle) if isinstance(target, dict) and isinstance(candidate, dict): return target.items() >= candidate.items() return target == candidate
021aebe932af947cd4651c29947fdae2e8abe555f8d0870af85a920d92a7decf
from django.apps import apps from django.contrib import auth from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager from django.contrib.auth.hashers import make_password from django.contrib.contenttypes.models import ContentType from django.core.exceptions import PermissionDenied from django.core.mail import send_mail from django.db import models from django.db.models.manager import EmptyManager from django.utils import timezone from django.utils.translation import gettext_lazy as _ from .validators import UnicodeUsernameValidator def update_last_login(sender, user, **kwargs): """ A signal receiver which updates the last_login date for the user logging in. """ user.last_login = timezone.now() user.save(update_fields=['last_login']) class PermissionManager(models.Manager): use_in_migrations = True def get_by_natural_key(self, codename, app_label, model): return self.get( codename=codename, content_type=ContentType.objects.db_manager(self.db).get_by_natural_key(app_label, model), ) class Permission(models.Model): """ The permissions system provides a way to assign permissions to specific users and groups of users. The permission system is used by the Django admin site, but may also be useful in your own code. The Django admin site uses permissions as follows: - The "add" permission limits the user's ability to view the "add" form and add an object. - The "change" permission limits a user's ability to view the change list, view the "change" form and change an object. - The "delete" permission limits the ability to delete an object. - The "view" permission limits the ability to view an object. Permissions are set globally per type of object, not per specific object instance. It is possible to say "Mary may change news stories," but it's not currently possible to say "Mary may change news stories, but only the ones she created herself" or "Mary may only change news stories that have a certain status or publication date." The permissions listed above are automatically created for each model. """ name = models.CharField(_('name'), max_length=255) content_type = models.ForeignKey( ContentType, models.CASCADE, verbose_name=_('content type'), ) codename = models.CharField(_('codename'), max_length=100) objects = PermissionManager() class Meta: verbose_name = _('permission') verbose_name_plural = _('permissions') unique_together = [['content_type', 'codename']] ordering = ['content_type__app_label', 'content_type__model', 'codename'] def __str__(self): return '%s | %s' % (self.content_type, self.name) def natural_key(self): return (self.codename,) + self.content_type.natural_key() natural_key.dependencies = ['contenttypes.contenttype'] class GroupManager(models.Manager): """ The manager for the auth's Group model. """ use_in_migrations = True def get_by_natural_key(self, name): return self.get(name=name) class Group(models.Model): """ Groups are a generic way of categorizing users to apply permissions, or some other label, to those users. A user can belong to any number of groups. A user in a group automatically has all the permissions granted to that group. For example, if the group 'Site editors' has the permission can_edit_home_page, any user in that group will have that permission. Beyond permissions, groups are a convenient way to categorize users to apply some label, or extended functionality, to them. For example, you could create a group 'Special users', and you could write code that would do special things to those users -- such as giving them access to a members-only portion of your site, or sending them members-only email messages. """ name = models.CharField(_('name'), max_length=150, unique=True) permissions = models.ManyToManyField( Permission, verbose_name=_('permissions'), blank=True, ) objects = GroupManager() class Meta: verbose_name = _('group') verbose_name_plural = _('groups') def __str__(self): return self.name def natural_key(self): return (self.name,) class UserManager(BaseUserManager): use_in_migrations = True def _create_user(self, username, email, password, **extra_fields): """ Create and save a user with the given username, email, and password. """ if not username: raise ValueError('The given username must be set') email = self.normalize_email(email) # Lookup the real model class from the global app registry so this # manager method can be used in migrations. This is fine because # managers are by definition working on the real model. GlobalUserModel = apps.get_model(self.model._meta.app_label, self.model._meta.object_name) username = GlobalUserModel.normalize_username(username) user = self.model(username=username, email=email, **extra_fields) user.password = make_password(password) user.save(using=self._db) return user def create_user(self, username, email=None, password=None, **extra_fields): extra_fields.setdefault('is_staff', False) extra_fields.setdefault('is_superuser', False) return self._create_user(username, email, password, **extra_fields) def create_superuser(self, username, email=None, password=None, **extra_fields): extra_fields.setdefault('is_staff', True) extra_fields.setdefault('is_superuser', True) if extra_fields.get('is_staff') is not True: raise ValueError('Superuser must have is_staff=True.') if extra_fields.get('is_superuser') is not True: raise ValueError('Superuser must have is_superuser=True.') return self._create_user(username, email, password, **extra_fields) def with_perm(self, perm, is_active=True, include_superusers=True, backend=None, obj=None): if backend is None: backends = auth._get_backends(return_tuples=True) if len(backends) == 1: backend, _ = backends[0] else: raise ValueError( 'You have multiple authentication backends configured and ' 'therefore must provide the `backend` argument.' ) elif not isinstance(backend, str): raise TypeError( 'backend must be a dotted import path string (got %r).' % backend ) else: backend = auth.load_backend(backend) if hasattr(backend, 'with_perm'): return backend.with_perm( perm, is_active=is_active, include_superusers=include_superusers, obj=obj, ) return self.none() # A few helper functions for common logic between User and AnonymousUser. def _user_get_permissions(user, obj, from_name): permissions = set() name = 'get_%s_permissions' % from_name for backend in auth.get_backends(): if hasattr(backend, name): permissions.update(getattr(backend, name)(user, obj)) return permissions def _user_has_perm(user, perm, obj): """ A backend can raise `PermissionDenied` to short-circuit permission checking. """ for backend in auth.get_backends(): if not hasattr(backend, 'has_perm'): continue try: if backend.has_perm(user, perm, obj): return True except PermissionDenied: return False return False def _user_has_module_perms(user, app_label): """ A backend can raise `PermissionDenied` to short-circuit permission checking. """ for backend in auth.get_backends(): if not hasattr(backend, 'has_module_perms'): continue try: if backend.has_module_perms(user, app_label): return True except PermissionDenied: return False return False class PermissionsMixin(models.Model): """ Add the fields and methods necessary to support the Group and Permission models using the ModelBackend. """ is_superuser = models.BooleanField( _('superuser status'), default=False, help_text=_( 'Designates that this user has all permissions without ' 'explicitly assigning them.' ), ) groups = models.ManyToManyField( Group, verbose_name=_('groups'), blank=True, help_text=_( 'The groups this user belongs to. A user will get all permissions ' 'granted to each of their groups.' ), related_name="user_set", related_query_name="user", ) user_permissions = models.ManyToManyField( Permission, verbose_name=_('user permissions'), blank=True, help_text=_('Specific permissions for this user.'), related_name="user_set", related_query_name="user", ) class Meta: abstract = True def get_user_permissions(self, obj=None): """ Return a list of permission strings that this user has directly. Query all available auth backends. If an object is passed in, return only permissions matching this object. """ return _user_get_permissions(self, obj, 'user') def get_group_permissions(self, obj=None): """ Return a list of permission strings that this user has through their groups. Query all available auth backends. If an object is passed in, return only permissions matching this object. """ return _user_get_permissions(self, obj, 'group') def get_all_permissions(self, obj=None): return _user_get_permissions(self, obj, 'all') def has_perm(self, perm, obj=None): """ Return True if the user has the specified permission. Query all available auth backends, but return immediately if any backend returns True. Thus, a user who has permission from a single auth backend is assumed to have permission in general. If an object is provided, check permissions for that object. """ # Active superusers have all permissions. if self.is_active and self.is_superuser: return True # Otherwise we need to check the backends. return _user_has_perm(self, perm, obj) def has_perms(self, perm_list, obj=None): """ Return True if the user has each of the specified permissions. If object is passed, check if the user has all required perms for it. """ return all(self.has_perm(perm, obj) for perm in perm_list) def has_module_perms(self, app_label): """ Return True if the user has any permissions in the given app label. Use similar logic as has_perm(), above. """ # Active superusers have all permissions. if self.is_active and self.is_superuser: return True return _user_has_module_perms(self, app_label) class AbstractUser(AbstractBaseUser, PermissionsMixin): """ An abstract base class implementing a fully featured User model with admin-compliant permissions. Username and password are required. Other fields are optional. """ username_validator = UnicodeUsernameValidator() username = models.CharField( _('username'), max_length=150, unique=True, help_text=_('Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.'), validators=[username_validator], error_messages={ 'unique': _("A user with that username already exists."), }, ) first_name = models.CharField(_('first name'), max_length=150, blank=True) last_name = models.CharField(_('last name'), max_length=150, blank=True) email = models.EmailField(_('email address'), blank=True) is_staff = models.BooleanField( _('staff status'), default=False, help_text=_('Designates whether the user can log into this admin site.'), ) is_active = models.BooleanField( _('active'), default=True, help_text=_( 'Designates whether this user should be treated as active. ' 'Unselect this instead of deleting accounts.' ), ) date_joined = models.DateTimeField(_('date joined'), default=timezone.now) objects = UserManager() EMAIL_FIELD = 'email' USERNAME_FIELD = 'username' REQUIRED_FIELDS = ['email'] class Meta: verbose_name = _('user') verbose_name_plural = _('users') abstract = True def clean(self): super().clean() self.email = self.__class__.objects.normalize_email(self.email) def get_full_name(self): """ Return the first_name plus the last_name, with a space in between. """ full_name = '%s %s' % (self.first_name, self.last_name) return full_name.strip() def get_short_name(self): """Return the short name for the user.""" return self.first_name def email_user(self, subject, message, from_email=None, **kwargs): """Send an email to this user.""" send_mail(subject, message, from_email, [self.email], **kwargs) class User(AbstractUser): """ Users within the Django authentication system are represented by this model. Username and password are required. Other fields are optional. """ class Meta(AbstractUser.Meta): swappable = 'AUTH_USER_MODEL' class AnonymousUser: id = None pk = None username = '' is_staff = False is_active = False is_superuser = False _groups = EmptyManager(Group) _user_permissions = EmptyManager(Permission) def __str__(self): return 'AnonymousUser' def __eq__(self, other): return isinstance(other, self.__class__) def __hash__(self): return 1 # instances always return the same hash value def __int__(self): raise TypeError('Cannot cast AnonymousUser to int. Are you trying to use it in place of User?') def save(self): raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.") def delete(self): raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.") def set_password(self, raw_password): raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.") def check_password(self, raw_password): raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.") @property def groups(self): return self._groups @property def user_permissions(self): return self._user_permissions def get_user_permissions(self, obj=None): return _user_get_permissions(self, obj, 'user') def get_group_permissions(self, obj=None): return set() def get_all_permissions(self, obj=None): return _user_get_permissions(self, obj, 'all') def has_perm(self, perm, obj=None): return _user_has_perm(self, perm, obj=obj) def has_perms(self, perm_list, obj=None): return all(self.has_perm(perm, obj) for perm in perm_list) def has_module_perms(self, module): return _user_has_module_perms(self, module) @property def is_anonymous(self): return True @property def is_authenticated(self): return False def get_username(self): return self.username
5300822f9b059facecb81abfe35cbfed4866533cb4126d849a5163c696650c0b
from unittest import mock from django.conf.global_settings import PASSWORD_HASHERS from django.contrib.auth import get_user_model from django.contrib.auth.backends import ModelBackend from django.contrib.auth.base_user import AbstractBaseUser from django.contrib.auth.hashers import get_hasher from django.contrib.auth.models import ( AbstractUser, AnonymousUser, Group, Permission, User, UserManager, ) from django.contrib.contenttypes.models import ContentType from django.core import mail from django.db import connection, migrations from django.db.migrations.state import ModelState, ProjectState from django.db.models.signals import post_save from django.test import ( SimpleTestCase, TestCase, TransactionTestCase, override_settings, ) from .models import IntegerUsernameUser from .models.with_custom_email_field import CustomEmailField class NaturalKeysTestCase(TestCase): def test_user_natural_key(self): staff_user = User.objects.create_user(username='staff') self.assertEqual(User.objects.get_by_natural_key('staff'), staff_user) self.assertEqual(staff_user.natural_key(), ('staff',)) def test_group_natural_key(self): users_group = Group.objects.create(name='users') self.assertEqual(Group.objects.get_by_natural_key('users'), users_group) class LoadDataWithoutNaturalKeysTestCase(TestCase): fixtures = ['regular.json'] def test_user_is_created_and_added_to_group(self): user = User.objects.get(username='my_username') group = Group.objects.get(name='my_group') self.assertEqual(group, user.groups.get()) class LoadDataWithNaturalKeysTestCase(TestCase): fixtures = ['natural.json'] def test_user_is_created_and_added_to_group(self): user = User.objects.get(username='my_username') group = Group.objects.get(name='my_group') self.assertEqual(group, user.groups.get()) class LoadDataWithNaturalKeysAndMultipleDatabasesTestCase(TestCase): databases = {'default', 'other'} def test_load_data_with_user_permissions(self): # Create test contenttypes for both databases default_objects = [ ContentType.objects.db_manager('default').create( model='examplemodela', app_label='app_a', ), ContentType.objects.db_manager('default').create( model='examplemodelb', app_label='app_b', ), ] other_objects = [ ContentType.objects.db_manager('other').create( model='examplemodelb', app_label='app_b', ), ContentType.objects.db_manager('other').create( model='examplemodela', app_label='app_a', ), ] # Now we create the test UserPermission Permission.objects.db_manager("default").create( name="Can delete example model b", codename="delete_examplemodelb", content_type=default_objects[1], ) Permission.objects.db_manager("other").create( name="Can delete example model b", codename="delete_examplemodelb", content_type=other_objects[0], ) perm_default = Permission.objects.get_by_natural_key( 'delete_examplemodelb', 'app_b', 'examplemodelb', ) perm_other = Permission.objects.db_manager('other').get_by_natural_key( 'delete_examplemodelb', 'app_b', 'examplemodelb', ) self.assertEqual(perm_default.content_type_id, default_objects[1].id) self.assertEqual(perm_other.content_type_id, other_objects[0].id) class UserManagerTestCase(TransactionTestCase): available_apps = [ 'auth_tests', 'django.contrib.auth', 'django.contrib.contenttypes', ] def test_create_user(self): email_lowercase = '[email protected]' user = User.objects.create_user('user', email_lowercase) self.assertEqual(user.email, email_lowercase) self.assertEqual(user.username, 'user') self.assertFalse(user.has_usable_password()) def test_create_user_email_domain_normalize_rfc3696(self): # According to https://tools.ietf.org/html/rfc3696#section-3 # the "@" symbol can be part of the local part of an email address returned = UserManager.normalize_email(r'Abc\@[email protected]') self.assertEqual(returned, r'Abc\@[email protected]') def test_create_user_email_domain_normalize(self): returned = UserManager.normalize_email('[email protected]') self.assertEqual(returned, '[email protected]') def test_create_user_email_domain_normalize_with_whitespace(self): returned = UserManager.normalize_email(r'email\ [email protected]') self.assertEqual(returned, r'email\ [email protected]') def test_empty_username(self): with self.assertRaisesMessage(ValueError, 'The given username must be set'): User.objects.create_user(username='') def test_create_user_is_staff(self): email = '[email protected]' user = User.objects.create_user('user', email, is_staff=True) self.assertEqual(user.email, email) self.assertEqual(user.username, 'user') self.assertTrue(user.is_staff) def test_create_super_user_raises_error_on_false_is_superuser(self): with self.assertRaisesMessage(ValueError, 'Superuser must have is_superuser=True.'): User.objects.create_superuser( username='test', email='[email protected]', password='test', is_superuser=False, ) def test_create_superuser_raises_error_on_false_is_staff(self): with self.assertRaisesMessage(ValueError, 'Superuser must have is_staff=True.'): User.objects.create_superuser( username='test', email='[email protected]', password='test', is_staff=False, ) def test_make_random_password(self): allowed_chars = 'abcdefg' password = UserManager().make_random_password(5, allowed_chars) self.assertEqual(len(password), 5) for char in password: self.assertIn(char, allowed_chars) def test_runpython_manager_methods(self): def forwards(apps, schema_editor): UserModel = apps.get_model('auth', 'User') user = UserModel.objects.create_user('user1', password='secure') self.assertIsInstance(user, UserModel) operation = migrations.RunPython(forwards, migrations.RunPython.noop) project_state = ProjectState() project_state.add_model(ModelState.from_model(User)) project_state.add_model(ModelState.from_model(Group)) project_state.add_model(ModelState.from_model(Permission)) project_state.add_model(ModelState.from_model(ContentType)) new_state = project_state.clone() with connection.schema_editor() as editor: operation.state_forwards('test_manager_methods', new_state) operation.database_forwards( 'test_manager_methods', editor, project_state, new_state, ) user = User.objects.get(username='user1') self.assertTrue(user.check_password('secure')) class AbstractBaseUserTests(SimpleTestCase): def test_has_usable_password(self): """ Passwords are usable even if they don't correspond to a hasher in settings.PASSWORD_HASHERS. """ self.assertIs(User(password='some-gibbberish').has_usable_password(), True) def test_normalize_username(self): self.assertEqual(IntegerUsernameUser().normalize_username(123), 123) def test_clean_normalize_username(self): # The normalization happens in AbstractBaseUser.clean() ohm_username = 'iamtheΩ' # U+2126 OHM SIGN for model in ('auth.User', 'auth_tests.CustomUser'): with self.subTest(model=model), self.settings(AUTH_USER_MODEL=model): User = get_user_model() user = User(**{User.USERNAME_FIELD: ohm_username, 'password': 'foo'}) user.clean() username = user.get_username() self.assertNotEqual(username, ohm_username) self.assertEqual(username, 'iamtheΩ') # U+03A9 GREEK CAPITAL LETTER OMEGA def test_default_email(self): user = AbstractBaseUser() self.assertEqual(user.get_email_field_name(), 'email') def test_custom_email(self): user = CustomEmailField() self.assertEqual(user.get_email_field_name(), 'email_address') class AbstractUserTestCase(TestCase): def test_email_user(self): # valid send_mail parameters kwargs = { "fail_silently": False, "auth_user": None, "auth_password": None, "connection": None, "html_message": None, } abstract_user = AbstractUser(email='[email protected]') abstract_user.email_user( subject="Subject here", message="This is a message", from_email="[email protected]", **kwargs ) self.assertEqual(len(mail.outbox), 1) message = mail.outbox[0] self.assertEqual(message.subject, "Subject here") self.assertEqual(message.body, "This is a message") self.assertEqual(message.from_email, "[email protected]") self.assertEqual(message.to, [abstract_user.email]) def test_last_login_default(self): user1 = User.objects.create(username='user1') self.assertIsNone(user1.last_login) user2 = User.objects.create_user(username='user2') self.assertIsNone(user2.last_login) def test_user_clean_normalize_email(self): user = User(username='user', password='foo', email='[email protected]') user.clean() self.assertEqual(user.email, '[email protected]') def test_user_double_save(self): """ Calling user.save() twice should trigger password_changed() once. """ user = User.objects.create_user(username='user', password='foo') user.set_password('bar') with mock.patch('django.contrib.auth.password_validation.password_changed') as pw_changed: user.save() self.assertEqual(pw_changed.call_count, 1) user.save() self.assertEqual(pw_changed.call_count, 1) @override_settings(PASSWORD_HASHERS=PASSWORD_HASHERS) def test_check_password_upgrade(self): """ password_changed() shouldn't be called if User.check_password() triggers a hash iteration upgrade. """ user = User.objects.create_user(username='user', password='foo') initial_password = user.password self.assertTrue(user.check_password('foo')) hasher = get_hasher('default') self.assertEqual('pbkdf2_sha256', hasher.algorithm) old_iterations = hasher.iterations try: # Upgrade the password iterations hasher.iterations = old_iterations + 1 with mock.patch('django.contrib.auth.password_validation.password_changed') as pw_changed: user.check_password('foo') self.assertEqual(pw_changed.call_count, 0) self.assertNotEqual(initial_password, user.password) finally: hasher.iterations = old_iterations class CustomModelBackend(ModelBackend): def with_perm(self, perm, is_active=True, include_superusers=True, backend=None, obj=None): if obj is not None and obj.username == 'charliebrown': return User.objects.filter(pk=obj.pk) return User.objects.filter(username__startswith='charlie') class UserWithPermTestCase(TestCase): @classmethod def setUpTestData(cls): content_type = ContentType.objects.get_for_model(Group) cls.permission = Permission.objects.create( name='test', content_type=content_type, codename='test', ) # User with permission. cls.user1 = User.objects.create_user('user 1', '[email protected]') cls.user1.user_permissions.add(cls.permission) # User with group permission. group1 = Group.objects.create(name='group 1') group1.permissions.add(cls.permission) group2 = Group.objects.create(name='group 2') group2.permissions.add(cls.permission) cls.user2 = User.objects.create_user('user 2', '[email protected]') cls.user2.groups.add(group1, group2) # Users without permissions. cls.user_charlie = User.objects.create_user('charlie', '[email protected]') cls.user_charlie_b = User.objects.create_user('charliebrown', '[email protected]') # Superuser. cls.superuser = User.objects.create_superuser( 'superuser', '[email protected]', 'superpassword', ) # Inactive user with permission. cls.inactive_user = User.objects.create_user( 'inactive_user', '[email protected]', is_active=False, ) cls.inactive_user.user_permissions.add(cls.permission) def test_invalid_permission_name(self): msg = 'Permission name should be in the form app_label.permission_codename.' for perm in ('nodots', 'too.many.dots', '...', ''): with self.subTest(perm), self.assertRaisesMessage(ValueError, msg): User.objects.with_perm(perm) def test_invalid_permission_type(self): msg = 'The `perm` argument must be a string or a permission instance.' for perm in (b'auth.test', object(), None): with self.subTest(perm), self.assertRaisesMessage(TypeError, msg): User.objects.with_perm(perm) def test_invalid_backend_type(self): msg = 'backend must be a dotted import path string (got %r).' for backend in (b'auth_tests.CustomModelBackend', object()): with self.subTest(backend): with self.assertRaisesMessage(TypeError, msg % backend): User.objects.with_perm('auth.test', backend=backend) def test_basic(self): active_users = [self.user1, self.user2] tests = [ ({}, [*active_users, self.superuser]), ({'obj': self.user1}, []), # Only inactive users. ({'is_active': False}, [self.inactive_user]), # All users. ({'is_active': None}, [*active_users, self.superuser, self.inactive_user]), # Exclude superusers. ({'include_superusers': False}, active_users), ( {'include_superusers': False, 'is_active': False}, [self.inactive_user], ), ( {'include_superusers': False, 'is_active': None}, [*active_users, self.inactive_user], ), ] for kwargs, expected_users in tests: for perm in ('auth.test', self.permission): with self.subTest(perm=perm, **kwargs): self.assertCountEqual( User.objects.with_perm(perm, **kwargs), expected_users, ) @override_settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.BaseBackend']) def test_backend_without_with_perm(self): self.assertSequenceEqual(User.objects.with_perm('auth.test'), []) def test_nonexistent_permission(self): self.assertSequenceEqual(User.objects.with_perm('auth.perm'), [self.superuser]) def test_nonexistent_backend(self): with self.assertRaises(ImportError): User.objects.with_perm( 'auth.test', backend='invalid.backend.CustomModelBackend', ) @override_settings(AUTHENTICATION_BACKENDS=['auth_tests.test_models.CustomModelBackend']) def test_custom_backend(self): for perm in ('auth.test', self.permission): with self.subTest(perm): self.assertCountEqual( User.objects.with_perm(perm), [self.user_charlie, self.user_charlie_b], ) @override_settings(AUTHENTICATION_BACKENDS=['auth_tests.test_models.CustomModelBackend']) def test_custom_backend_pass_obj(self): for perm in ('auth.test', self.permission): with self.subTest(perm): self.assertSequenceEqual( User.objects.with_perm(perm, obj=self.user_charlie_b), [self.user_charlie_b], ) @override_settings(AUTHENTICATION_BACKENDS=[ 'auth_tests.test_models.CustomModelBackend', 'django.contrib.auth.backends.ModelBackend', ]) def test_multiple_backends(self): msg = ( 'You have multiple authentication backends configured and ' 'therefore must provide the `backend` argument.' ) with self.assertRaisesMessage(ValueError, msg): User.objects.with_perm('auth.test') backend = 'auth_tests.test_models.CustomModelBackend' self.assertCountEqual( User.objects.with_perm('auth.test', backend=backend), [self.user_charlie, self.user_charlie_b], ) class IsActiveTestCase(TestCase): """ Tests the behavior of the guaranteed is_active attribute """ def test_builtin_user_isactive(self): user = User.objects.create(username='foo', email='[email protected]') # is_active is true by default self.assertIs(user.is_active, True) user.is_active = False user.save() user_fetched = User.objects.get(pk=user.pk) # the is_active flag is saved self.assertFalse(user_fetched.is_active) @override_settings(AUTH_USER_MODEL='auth_tests.IsActiveTestUser1') def test_is_active_field_default(self): """ tests that the default value for is_active is provided """ UserModel = get_user_model() user = UserModel(username='foo') self.assertIs(user.is_active, True) # you can set the attribute - but it will not save user.is_active = False # there should be no problem saving - but the attribute is not saved user.save() user_fetched = UserModel._default_manager.get(pk=user.pk) # the attribute is always true for newly retrieved instance self.assertIs(user_fetched.is_active, True) class TestCreateSuperUserSignals(TestCase): """ Simple test case for ticket #20541 """ def post_save_listener(self, *args, **kwargs): self.signals_count += 1 def setUp(self): self.signals_count = 0 post_save.connect(self.post_save_listener, sender=User) def tearDown(self): post_save.disconnect(self.post_save_listener, sender=User) def test_create_user(self): User.objects.create_user("JohnDoe") self.assertEqual(self.signals_count, 1) def test_create_superuser(self): User.objects.create_superuser("JohnDoe", "[email protected]", "1") self.assertEqual(self.signals_count, 1) class AnonymousUserTests(SimpleTestCase): no_repr_msg = "Django doesn't provide a DB representation for AnonymousUser." def setUp(self): self.user = AnonymousUser() def test_properties(self): self.assertIsNone(self.user.pk) self.assertEqual(self.user.username, '') self.assertEqual(self.user.get_username(), '') self.assertIs(self.user.is_anonymous, True) self.assertIs(self.user.is_authenticated, False) self.assertIs(self.user.is_staff, False) self.assertIs(self.user.is_active, False) self.assertIs(self.user.is_superuser, False) self.assertEqual(self.user.groups.all().count(), 0) self.assertEqual(self.user.user_permissions.all().count(), 0) self.assertEqual(self.user.get_user_permissions(), set()) self.assertEqual(self.user.get_group_permissions(), set()) def test_str(self): self.assertEqual(str(self.user), 'AnonymousUser') def test_eq(self): self.assertEqual(self.user, AnonymousUser()) self.assertNotEqual(self.user, User('super', '[email protected]', 'super')) def test_hash(self): self.assertEqual(hash(self.user), 1) def test_int(self): msg = ( 'Cannot cast AnonymousUser to int. Are you trying to use it in ' 'place of User?' ) with self.assertRaisesMessage(TypeError, msg): int(self.user) def test_delete(self): with self.assertRaisesMessage(NotImplementedError, self.no_repr_msg): self.user.delete() def test_save(self): with self.assertRaisesMessage(NotImplementedError, self.no_repr_msg): self.user.save() def test_set_password(self): with self.assertRaisesMessage(NotImplementedError, self.no_repr_msg): self.user.set_password('password') def test_check_password(self): with self.assertRaisesMessage(NotImplementedError, self.no_repr_msg): self.user.check_password('password') class GroupTests(SimpleTestCase): def test_str(self): g = Group(name='Users') self.assertEqual(str(g), 'Users') class PermissionTests(TestCase): def test_str(self): p = Permission.objects.get(codename='view_customemailfield') self.assertEqual(str(p), 'auth_tests | custom email field | Can view custom email field')
352645b22ca25a80550d384eb6127cd343e94227a058d63c710ba42def794120
import datetime import pickle import sys import unittest from operator import attrgetter from threading import Lock from django.core.exceptions import EmptyResultSet, FieldError from django.db import DEFAULT_DB_ALIAS, connection from django.db.models import Count, Exists, F, OuterRef, Q from django.db.models.expressions import RawSQL from django.db.models.sql.constants import LOUTER from django.db.models.sql.where import NothingNode, WhereNode from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from django.test.utils import CaptureQueriesContext, ignore_warnings from django.utils.deprecation import RemovedInDjango40Warning from .models import ( FK1, Annotation, Article, Author, BaseA, Book, CategoryItem, CategoryRelationship, Celebrity, Channel, Chapter, Child, ChildObjectA, Classroom, CommonMixedCaseForeignKeys, Company, Cover, CustomPk, CustomPkTag, DateTimePK, Detail, DumbCategory, Eaten, Employment, ExtraInfo, Fan, Food, Identifier, Individual, Item, Job, JobResponsibilities, Join, LeafA, LeafB, LoopX, LoopZ, ManagedModel, Member, MixedCaseDbColumnCategoryItem, MixedCaseFieldCategoryItem, ModelA, ModelB, ModelC, ModelD, MyObject, NamedCategory, Node, Note, NullableName, Number, ObjectA, ObjectB, ObjectC, OneToOneCategory, Order, OrderItem, Page, Paragraph, Person, Plaything, PointerA, Program, ProxyCategory, ProxyObjectA, ProxyObjectB, Ranking, Related, RelatedIndividual, RelatedObject, Report, ReportComment, ReservedName, Responsibility, School, SharedConnection, SimpleCategory, SingleObject, SpecialCategory, Staff, StaffUser, Student, Tag, Task, Teacher, Ticket21203Child, Ticket21203Parent, Ticket23605A, Ticket23605B, Ticket23605C, TvChef, Valid, X, ) class Queries1Tests(TestCase): @classmethod def setUpTestData(cls): generic = NamedCategory.objects.create(name="Generic") cls.t1 = Tag.objects.create(name='t1', category=generic) cls.t2 = Tag.objects.create(name='t2', parent=cls.t1, category=generic) cls.t3 = Tag.objects.create(name='t3', parent=cls.t1) t4 = Tag.objects.create(name='t4', parent=cls.t3) cls.t5 = Tag.objects.create(name='t5', parent=cls.t3) cls.n1 = Note.objects.create(note='n1', misc='foo', id=1) cls.n2 = Note.objects.create(note='n2', misc='bar', id=2) cls.n3 = Note.objects.create(note='n3', misc='foo', id=3) ann1 = Annotation.objects.create(name='a1', tag=cls.t1) ann1.notes.add(cls.n1) ann2 = Annotation.objects.create(name='a2', tag=t4) ann2.notes.add(cls.n2, cls.n3) # Create these out of order so that sorting by 'id' will be different to sorting # by 'info'. Helps detect some problems later. cls.e2 = ExtraInfo.objects.create(info='e2', note=cls.n2, value=41, filterable=False) e1 = ExtraInfo.objects.create(info='e1', note=cls.n1, value=42) cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1) cls.a2 = Author.objects.create(name='a2', num=2002, extra=e1) cls.a3 = Author.objects.create(name='a3', num=3003, extra=cls.e2) cls.a4 = Author.objects.create(name='a4', num=4004, extra=cls.e2) cls.time1 = datetime.datetime(2007, 12, 19, 22, 25, 0) cls.time2 = datetime.datetime(2007, 12, 19, 21, 0, 0) time3 = datetime.datetime(2007, 12, 20, 22, 25, 0) time4 = datetime.datetime(2007, 12, 20, 21, 0, 0) cls.i1 = Item.objects.create(name='one', created=cls.time1, modified=cls.time1, creator=cls.a1, note=cls.n3) cls.i1.tags.set([cls.t1, cls.t2]) cls.i2 = Item.objects.create(name='two', created=cls.time2, creator=cls.a2, note=cls.n2) cls.i2.tags.set([cls.t1, cls.t3]) cls.i3 = Item.objects.create(name='three', created=time3, creator=cls.a2, note=cls.n3) i4 = Item.objects.create(name='four', created=time4, creator=cls.a4, note=cls.n3) i4.tags.set([t4]) cls.r1 = Report.objects.create(name='r1', creator=cls.a1) Report.objects.create(name='r2', creator=cls.a3) Report.objects.create(name='r3') # Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering # will be rank3, rank2, rank1. cls.rank1 = Ranking.objects.create(rank=2, author=cls.a2) Cover.objects.create(title="first", item=i4) Cover.objects.create(title="second", item=cls.i2) def test_subquery_condition(self): qs1 = Tag.objects.filter(pk__lte=0) qs2 = Tag.objects.filter(parent__in=qs1) qs3 = Tag.objects.filter(parent__in=qs2) self.assertEqual(qs3.query.subq_aliases, {'T', 'U', 'V'}) self.assertIn('v0', str(qs3.query).lower()) qs4 = qs3.filter(parent__in=qs1) self.assertEqual(qs4.query.subq_aliases, {'T', 'U', 'V'}) # It is possible to reuse U for the second subquery, no need to use W. self.assertNotIn('w0', str(qs4.query).lower()) # So, 'U0."id"' is referenced in SELECT and WHERE twice. self.assertEqual(str(qs4.query).lower().count('u0.'), 4) def test_ticket1050(self): self.assertQuerysetEqual( Item.objects.filter(tags__isnull=True), ['<Item: three>'] ) self.assertQuerysetEqual( Item.objects.filter(tags__id__isnull=True), ['<Item: three>'] ) def test_ticket1801(self): self.assertQuerysetEqual( Author.objects.filter(item=self.i2), ['<Author: a2>'] ) self.assertQuerysetEqual( Author.objects.filter(item=self.i3), ['<Author: a2>'] ) self.assertQuerysetEqual( Author.objects.filter(item=self.i2) & Author.objects.filter(item=self.i3), ['<Author: a2>'] ) def test_ticket2306(self): # Checking that no join types are "left outer" joins. query = Item.objects.filter(tags=self.t2).query self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()]) self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1)).order_by('name'), ['<Item: one>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1)).filter(Q(tags=self.t2)), ['<Item: one>'] ) self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1)).filter(Q(creator__name='fred') | Q(tags=self.t2)), ['<Item: one>'] ) # Each filter call is processed "at once" against a single table, so this is # different from the previous example as it tries to find tags that are two # things at once (rather than two tags). self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1) & Q(tags=self.t2)), [] ) self.assertQuerysetEqual( Item.objects.filter(Q(tags=self.t1), Q(creator__name='fred') | Q(tags=self.t2)), [] ) qs = Author.objects.filter(ranking__rank=2, ranking__id=self.rank1.id) self.assertQuerysetEqual(list(qs), ['<Author: a2>']) self.assertEqual(2, qs.query.count_active_tables(), 2) qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=self.rank1.id) self.assertEqual(qs.query.count_active_tables(), 3) def test_ticket4464(self): self.assertQuerysetEqual( Item.objects.filter(tags=self.t1).filter(tags=self.t2), ['<Item: one>'] ) self.assertQuerysetEqual( Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name'), ['<Item: one>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.filter(tags__in=[self.t1, self.t2]).filter(tags=self.t3), ['<Item: two>'] ) # Make sure .distinct() works with slicing (this was broken in Oracle). self.assertQuerysetEqual( Item.objects.filter(tags__in=[self.t1, self.t2]).order_by('name')[:3], ['<Item: one>', '<Item: one>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name')[:3], ['<Item: one>', '<Item: two>'] ) def test_tickets_2080_3592(self): self.assertQuerysetEqual( Author.objects.filter(item__name='one') | Author.objects.filter(name='a3'), ['<Author: a1>', '<Author: a3>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(item__name='one') | Q(name='a3')), ['<Author: a1>', '<Author: a3>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(name='a3') | Q(item__name='one')), ['<Author: a1>', '<Author: a3>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(item__name='three') | Q(report__name='r3')), ['<Author: a2>'] ) def test_ticket6074(self): # Merging two empty result sets shouldn't leave a queryset with no constraints # (which would match everything). self.assertQuerysetEqual(Author.objects.filter(Q(id__in=[])), []) self.assertQuerysetEqual( Author.objects.filter(Q(id__in=[]) | Q(id__in=[])), [] ) def test_tickets_1878_2939(self): self.assertEqual(Item.objects.values('creator').distinct().count(), 3) # Create something with a duplicate 'name' so that we can test multi-column # cases (which require some tricky SQL transformations under the covers). xx = Item(name='four', created=self.time1, creator=self.a2, note=self.n1) xx.save() self.assertEqual( Item.objects.exclude(name='two').values('creator', 'name').distinct().count(), 4 ) self.assertEqual( ( Item.objects .exclude(name='two') .extra(select={'foo': '%s'}, select_params=(1,)) .values('creator', 'name', 'foo') .distinct() .count() ), 4 ) self.assertEqual( ( Item.objects .exclude(name='two') .extra(select={'foo': '%s'}, select_params=(1,)) .values('creator', 'name') .distinct() .count() ), 4 ) xx.delete() def test_ticket7323(self): self.assertEqual(Item.objects.values('creator', 'name').count(), 4) def test_ticket2253(self): q1 = Item.objects.order_by('name') q2 = Item.objects.filter(id=self.i1.id) self.assertQuerysetEqual( q1, ['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual(q2, ['<Item: one>']) self.assertQuerysetEqual( (q1 | q2).order_by('name'), ['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual((q1 & q2).order_by('name'), ['<Item: one>']) q1 = Item.objects.filter(tags=self.t1) q2 = Item.objects.filter(note=self.n3, tags=self.t2) q3 = Item.objects.filter(creator=self.a4) self.assertQuerysetEqual( ((q1 & q2) | q3).order_by('name'), ['<Item: four>', '<Item: one>'] ) def test_order_by_tables(self): q1 = Item.objects.order_by('name') q2 = Item.objects.filter(id=self.i1.id) list(q2) combined_query = (q1 & q2).order_by('name').query self.assertEqual(len([ t for t in combined_query.alias_map if combined_query.alias_refcount[t] ]), 1) def test_order_by_join_unref(self): """ This test is related to the above one, testing that there aren't old JOINs in the query. """ qs = Celebrity.objects.order_by('greatest_fan__fan_of') self.assertIn('OUTER JOIN', str(qs.query)) qs = qs.order_by('id') self.assertNotIn('OUTER JOIN', str(qs.query)) def test_get_clears_ordering(self): """ get() should clear ordering for optimization purposes. """ with CaptureQueriesContext(connection) as captured_queries: Author.objects.order_by('name').get(pk=self.a1.pk) self.assertNotIn('order by', captured_queries[0]['sql'].lower()) def test_tickets_4088_4306(self): self.assertQuerysetEqual( Report.objects.filter(creator=1001), ['<Report: r1>'] ) self.assertQuerysetEqual( Report.objects.filter(creator__num=1001), ['<Report: r1>'] ) self.assertQuerysetEqual(Report.objects.filter(creator__id=1001), []) self.assertQuerysetEqual( Report.objects.filter(creator__id=self.a1.id), ['<Report: r1>'] ) self.assertQuerysetEqual( Report.objects.filter(creator__name='a1'), ['<Report: r1>'] ) def test_ticket4510(self): self.assertQuerysetEqual( Author.objects.filter(report__name='r1'), ['<Author: a1>'] ) def test_ticket7378(self): self.assertQuerysetEqual(self.a1.report_set.all(), ['<Report: r1>']) def test_tickets_5324_6704(self): self.assertQuerysetEqual( Item.objects.filter(tags__name='t4'), ['<Item: four>'] ) self.assertQuerysetEqual( Item.objects.exclude(tags__name='t4').order_by('name').distinct(), ['<Item: one>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.exclude(tags__name='t4').order_by('name').distinct().reverse(), ['<Item: two>', '<Item: three>', '<Item: one>'] ) self.assertQuerysetEqual( Author.objects.exclude(item__name='one').distinct().order_by('name'), ['<Author: a2>', '<Author: a3>', '<Author: a4>'] ) # Excluding across a m2m relation when there is more than one related # object associated was problematic. self.assertQuerysetEqual( Item.objects.exclude(tags__name='t1').order_by('name'), ['<Item: four>', '<Item: three>'] ) self.assertQuerysetEqual( Item.objects.exclude(tags__name='t1').exclude(tags__name='t4'), ['<Item: three>'] ) # Excluding from a relation that cannot be NULL should not use outer joins. query = Item.objects.exclude(creator__in=[self.a1, self.a2]).query self.assertNotIn(LOUTER, [x.join_type for x in query.alias_map.values()]) # Similarly, when one of the joins cannot possibly, ever, involve NULL # values (Author -> ExtraInfo, in the following), it should never be # promoted to a left outer join. So the following query should only # involve one "left outer" join (Author -> Item is 0-to-many). qs = Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3)) self.assertEqual( len([ x for x in qs.query.alias_map.values() if x.join_type == LOUTER and qs.query.alias_refcount[x.table_alias] ]), 1 ) # The previous changes shouldn't affect nullable foreign key joins. self.assertQuerysetEqual( Tag.objects.filter(parent__isnull=True).order_by('name'), ['<Tag: t1>'] ) self.assertQuerysetEqual( Tag.objects.exclude(parent__isnull=True).order_by('name'), ['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.exclude(Q(parent__name='t1') | Q(parent__isnull=True)).order_by('name'), ['<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name='t1')).order_by('name'), ['<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by('name'), ['<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by('name'), ['<Tag: t4>', '<Tag: t5>'] ) def test_ticket2091(self): t = Tag.objects.get(name='t4') self.assertQuerysetEqual( Item.objects.filter(tags__in=[t]), ['<Item: four>'] ) def test_avoid_infinite_loop_on_too_many_subqueries(self): x = Tag.objects.filter(pk=1) local_recursion_limit = sys.getrecursionlimit() // 16 msg = 'Maximum recursion depth exceeded: too many subqueries.' with self.assertRaisesMessage(RecursionError, msg): for i in range(local_recursion_limit + 2): x = Tag.objects.filter(pk__in=x) def test_reasonable_number_of_subq_aliases(self): x = Tag.objects.filter(pk=1) for _ in range(20): x = Tag.objects.filter(pk__in=x) self.assertEqual( x.query.subq_aliases, { 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'AA', 'AB', 'AC', 'AD', 'AE', 'AF', 'AG', 'AH', 'AI', 'AJ', 'AK', 'AL', 'AM', 'AN', } ) def test_heterogeneous_qs_combination(self): # Combining querysets built on different models should behave in a well-defined # fashion. We raise an error. with self.assertRaisesMessage(AssertionError, 'Cannot combine queries on two different base models.'): Author.objects.all() & Tag.objects.all() with self.assertRaisesMessage(AssertionError, 'Cannot combine queries on two different base models.'): Author.objects.all() | Tag.objects.all() def test_ticket3141(self): self.assertEqual(Author.objects.extra(select={'foo': '1'}).count(), 4) self.assertEqual( Author.objects.extra(select={'foo': '%s'}, select_params=(1,)).count(), 4 ) def test_ticket2400(self): self.assertQuerysetEqual( Author.objects.filter(item__isnull=True), ['<Author: a3>'] ) self.assertQuerysetEqual( Tag.objects.filter(item__isnull=True), ['<Tag: t5>'] ) def test_ticket2496(self): self.assertQuerysetEqual( Item.objects.extra(tables=['queries_author']).select_related().order_by('name')[:1], ['<Item: four>'] ) def test_error_raised_on_filter_with_dictionary(self): with self.assertRaisesMessage(FieldError, 'Cannot parse keyword query as dict'): Note.objects.filter({'note': 'n1', 'misc': 'foo'}) def test_tickets_2076_7256(self): # Ordering on related tables should be possible, even if the table is # not otherwise involved. self.assertQuerysetEqual( Item.objects.order_by('note__note', 'name'), ['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>'] ) # Ordering on a related field should use the remote model's default # ordering as a final step. self.assertQuerysetEqual( Author.objects.order_by('extra', '-name'), ['<Author: a2>', '<Author: a1>', '<Author: a4>', '<Author: a3>'] ) # Using remote model default ordering can span multiple models (in this # case, Cover is ordered by Item's default, which uses Note's default). self.assertQuerysetEqual( Cover.objects.all(), ['<Cover: first>', '<Cover: second>'] ) # If the remote model does not have a default ordering, we order by its 'id' # field. self.assertQuerysetEqual( Item.objects.order_by('creator', 'name'), ['<Item: one>', '<Item: three>', '<Item: two>', '<Item: four>'] ) # Ordering by a many-valued attribute (e.g. a many-to-many or reverse # ForeignKey) is legal, but the results might not make sense. That # isn't Django's problem. Garbage in, garbage out. self.assertQuerysetEqual( Item.objects.filter(tags__isnull=False).order_by('tags', 'id'), ['<Item: one>', '<Item: two>', '<Item: one>', '<Item: two>', '<Item: four>'] ) # If we replace the default ordering, Django adjusts the required # tables automatically. Item normally requires a join with Note to do # the default ordering, but that isn't needed here. qs = Item.objects.order_by('name') self.assertQuerysetEqual( qs, ['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>'] ) self.assertEqual(len(qs.query.alias_map), 1) def test_tickets_2874_3002(self): qs = Item.objects.select_related().order_by('note__note', 'name') self.assertQuerysetEqual( qs, ['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>'] ) # This is also a good select_related() test because there are multiple # Note entries in the SQL. The two Note items should be different. self.assertEqual(repr(qs[0].note), '<Note: n2>') self.assertEqual(repr(qs[0].creator.extra.note), '<Note: n1>') def test_ticket3037(self): self.assertQuerysetEqual( Item.objects.filter(Q(creator__name='a3', name='two') | Q(creator__name='a4', name='four')), ['<Item: four>'] ) def test_tickets_5321_7070(self): # Ordering columns must be included in the output columns. Note that # this means results that might otherwise be distinct are not (if there # are multiple values in the ordering cols), as in this example. This # isn't a bug; it's a warning to be careful with the selection of # ordering columns. self.assertSequenceEqual( Note.objects.values('misc').distinct().order_by('note', '-misc'), [{'misc': 'foo'}, {'misc': 'bar'}, {'misc': 'foo'}] ) def test_ticket4358(self): # If you don't pass any fields to values(), relation fields are # returned as "foo_id" keys, not "foo". For consistency, you should be # able to pass "foo_id" in the fields list and have it work, too. We # actually allow both "foo" and "foo_id". # The *_id version is returned by default. self.assertIn('note_id', ExtraInfo.objects.values()[0]) # You can also pass it in explicitly. self.assertSequenceEqual(ExtraInfo.objects.values('note_id'), [{'note_id': 1}, {'note_id': 2}]) # ...or use the field name. self.assertSequenceEqual(ExtraInfo.objects.values('note'), [{'note': 1}, {'note': 2}]) def test_ticket6154(self): # Multiple filter statements are joined using "AND" all the time. self.assertQuerysetEqual( Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1) | Q(item__note=self.n3)), ['<Author: a1>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(extra__note=self.n1) | Q(item__note=self.n3)).filter(id=self.a1.id), ['<Author: a1>'] ) def test_ticket6981(self): self.assertQuerysetEqual( Tag.objects.select_related('parent').order_by('name'), ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'] ) def test_ticket9926(self): self.assertQuerysetEqual( Tag.objects.select_related("parent", "category").order_by('name'), ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Tag.objects.select_related('parent', "parent__category").order_by('name'), ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'] ) def test_tickets_6180_6203(self): # Dates with limits and/or counts self.assertEqual(Item.objects.count(), 4) self.assertEqual(Item.objects.datetimes('created', 'month').count(), 1) self.assertEqual(Item.objects.datetimes('created', 'day').count(), 2) self.assertEqual(len(Item.objects.datetimes('created', 'day')), 2) self.assertEqual(Item.objects.datetimes('created', 'day')[0], datetime.datetime(2007, 12, 19, 0, 0)) def test_tickets_7087_12242(self): # Dates with extra select columns self.assertQuerysetEqual( Item.objects.datetimes('created', 'day').extra(select={'a': 1}), ['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)'] ) self.assertQuerysetEqual( Item.objects.extra(select={'a': 1}).datetimes('created', 'day'), ['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)'] ) name = "one" self.assertQuerysetEqual( Item.objects.datetimes('created', 'day').extra(where=['name=%s'], params=[name]), ['datetime.datetime(2007, 12, 19, 0, 0)'] ) self.assertQuerysetEqual( Item.objects.extra(where=['name=%s'], params=[name]).datetimes('created', 'day'), ['datetime.datetime(2007, 12, 19, 0, 0)'] ) def test_ticket7155(self): # Nullable dates self.assertQuerysetEqual( Item.objects.datetimes('modified', 'day'), ['datetime.datetime(2007, 12, 19, 0, 0)'] ) @ignore_warnings(category=RemovedInDjango40Warning) def test_ticket7098(self): self.assertSequenceEqual( Item.objects.values('note__note').order_by('queries_note.note', 'id'), [{'note__note': 'n2'}, {'note__note': 'n3'}, {'note__note': 'n3'}, {'note__note': 'n3'}] ) def test_order_by_rawsql(self): self.assertSequenceEqual( Item.objects.values('note__note').order_by( RawSQL('queries_note.note', ()), 'id', ), [ {'note__note': 'n2'}, {'note__note': 'n3'}, {'note__note': 'n3'}, {'note__note': 'n3'}, ], ) def test_order_by_raw_column_alias_warning(self): msg = ( "Passing column raw column aliases to order_by() is deprecated. " "Wrap 'queries_author.name' in a RawSQL expression before " "passing it to order_by()." ) with self.assertRaisesMessage(RemovedInDjango40Warning, msg): Item.objects.values('creator__name').order_by('queries_author.name') def test_ticket7096(self): # Make sure exclude() with multiple conditions continues to work. self.assertQuerysetEqual( Tag.objects.filter(parent=self.t1, name='t3').order_by('name'), ['<Tag: t3>'] ) self.assertQuerysetEqual( Tag.objects.exclude(parent=self.t1, name='t3').order_by('name'), ['<Tag: t1>', '<Tag: t2>', '<Tag: t4>', '<Tag: t5>'] ) self.assertQuerysetEqual( Item.objects.exclude(tags__name='t1', name='one').order_by('name').distinct(), ['<Item: four>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual( Item.objects.filter(name__in=['three', 'four']).exclude(tags__name='t1').order_by('name'), ['<Item: four>', '<Item: three>'] ) # More twisted cases, involving nested negations. self.assertQuerysetEqual( Item.objects.exclude(~Q(tags__name='t1', name='one')), ['<Item: one>'] ) self.assertQuerysetEqual( Item.objects.filter(~Q(tags__name='t1', name='one'), name='two'), ['<Item: two>'] ) self.assertQuerysetEqual( Item.objects.exclude(~Q(tags__name='t1', name='one'), name='two'), ['<Item: four>', '<Item: one>', '<Item: three>'] ) def test_tickets_7204_7506(self): # Make sure querysets with related fields can be pickled. If this # doesn't crash, it's a Good Thing. pickle.dumps(Item.objects.all()) def test_ticket7813(self): # We should also be able to pickle things that use select_related(). # The only tricky thing here is to ensure that we do the related # selections properly after unpickling. qs = Item.objects.select_related() query = qs.query.get_compiler(qs.db).as_sql()[0] query2 = pickle.loads(pickle.dumps(qs.query)) self.assertEqual( query2.get_compiler(qs.db).as_sql()[0], query ) def test_deferred_load_qs_pickling(self): # Check pickling of deferred-loading querysets qs = Item.objects.defer('name', 'creator') q2 = pickle.loads(pickle.dumps(qs)) self.assertEqual(list(qs), list(q2)) q3 = pickle.loads(pickle.dumps(qs, pickle.HIGHEST_PROTOCOL)) self.assertEqual(list(qs), list(q3)) def test_ticket7277(self): self.assertQuerysetEqual( self.n1.annotation_set.filter( Q(tag=self.t5) | Q(tag__children=self.t5) | Q(tag__children__children=self.t5) ), ['<Annotation: a1>'] ) def test_tickets_7448_7707(self): # Complex objects should be converted to strings before being used in # lookups. self.assertQuerysetEqual( Item.objects.filter(created__in=[self.time1, self.time2]), ['<Item: one>', '<Item: two>'] ) def test_ticket7235(self): # An EmptyQuerySet should not raise exceptions if it is filtered. Eaten.objects.create(meal='m') q = Eaten.objects.none() with self.assertNumQueries(0): self.assertQuerysetEqual(q.all(), []) self.assertQuerysetEqual(q.filter(meal='m'), []) self.assertQuerysetEqual(q.exclude(meal='m'), []) self.assertQuerysetEqual(q.complex_filter({'pk': 1}), []) self.assertQuerysetEqual(q.select_related('food'), []) self.assertQuerysetEqual(q.annotate(Count('food')), []) self.assertQuerysetEqual(q.order_by('meal', 'food'), []) self.assertQuerysetEqual(q.distinct(), []) self.assertQuerysetEqual( q.extra(select={'foo': "1"}), [] ) self.assertQuerysetEqual(q.reverse(), []) q.query.low_mark = 1 with self.assertRaisesMessage(AssertionError, 'Cannot change a query once a slice has been taken'): q.extra(select={'foo': "1"}) self.assertQuerysetEqual(q.defer('meal'), []) self.assertQuerysetEqual(q.only('meal'), []) def test_ticket7791(self): # There were "issues" when ordering and distinct-ing on fields related # via ForeignKeys. self.assertEqual( len(Note.objects.order_by('extrainfo__info').distinct()), 3 ) # Pickling of QuerySets using datetimes() should work. qs = Item.objects.datetimes('created', 'month') pickle.loads(pickle.dumps(qs)) def test_ticket9997(self): # If a ValuesList or Values queryset is passed as an inner query, we # make sure it's only requesting a single value and use that as the # thing to select. self.assertQuerysetEqual( Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name')), ['<Tag: t2>', '<Tag: t3>'] ) # Multi-valued values() and values_list() querysets should raise errors. with self.assertRaisesMessage(TypeError, 'Cannot use multi-field values as a filter value.'): Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name', 'id')) with self.assertRaisesMessage(TypeError, 'Cannot use multi-field values as a filter value.'): Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values_list('name', 'id')) def test_ticket9985(self): # qs.values_list(...).values(...) combinations should work. self.assertSequenceEqual( Note.objects.values_list("note", flat=True).values("id").order_by("id"), [{'id': 1}, {'id': 2}, {'id': 3}] ) self.assertQuerysetEqual( Annotation.objects.filter(notes__in=Note.objects.filter(note="n1").values_list('note').values('id')), ['<Annotation: a1>'] ) def test_ticket10205(self): # When bailing out early because of an empty "__in" filter, we need # to set things up correctly internally so that subqueries can continue properly. self.assertEqual(Tag.objects.filter(name__in=()).update(name="foo"), 0) def test_ticket10432(self): # Testing an empty "__in" filter with a generator as the value. def f(): return iter([]) n_obj = Note.objects.all()[0] def g(): yield n_obj.pk self.assertQuerysetEqual(Note.objects.filter(pk__in=f()), []) self.assertEqual(list(Note.objects.filter(pk__in=g())), [n_obj]) def test_ticket10742(self): # Queries used in an __in clause don't execute subqueries subq = Author.objects.filter(num__lt=3000) qs = Author.objects.filter(pk__in=subq) self.assertQuerysetEqual(qs, ['<Author: a1>', '<Author: a2>']) # The subquery result cache should not be populated self.assertIsNone(subq._result_cache) subq = Author.objects.filter(num__lt=3000) qs = Author.objects.exclude(pk__in=subq) self.assertQuerysetEqual(qs, ['<Author: a3>', '<Author: a4>']) # The subquery result cache should not be populated self.assertIsNone(subq._result_cache) subq = Author.objects.filter(num__lt=3000) self.assertQuerysetEqual( Author.objects.filter(Q(pk__in=subq) & Q(name='a1')), ['<Author: a1>'] ) # The subquery result cache should not be populated self.assertIsNone(subq._result_cache) def test_ticket7076(self): # Excluding shouldn't eliminate NULL entries. self.assertQuerysetEqual( Item.objects.exclude(modified=self.time1).order_by('name'), ['<Item: four>', '<Item: three>', '<Item: two>'] ) self.assertQuerysetEqual( Tag.objects.exclude(parent__name=self.t1.name), ['<Tag: t1>', '<Tag: t4>', '<Tag: t5>'] ) def test_ticket7181(self): # Ordering by related tables should accommodate nullable fields (this # test is a little tricky, since NULL ordering is database dependent. # Instead, we just count the number of results). self.assertEqual(len(Tag.objects.order_by('parent__name')), 5) # Empty querysets can be merged with others. self.assertQuerysetEqual( Note.objects.none() | Note.objects.all(), ['<Note: n1>', '<Note: n2>', '<Note: n3>'] ) self.assertQuerysetEqual( Note.objects.all() | Note.objects.none(), ['<Note: n1>', '<Note: n2>', '<Note: n3>'] ) self.assertQuerysetEqual(Note.objects.none() & Note.objects.all(), []) self.assertQuerysetEqual(Note.objects.all() & Note.objects.none(), []) def test_ticket9411(self): # Make sure bump_prefix() (an internal Query method) doesn't (re-)break. It's # sufficient that this query runs without error. qs = Tag.objects.values_list('id', flat=True).order_by('id') qs.query.bump_prefix(qs.query) first = qs[0] self.assertEqual(list(qs), list(range(first, first + 5))) def test_ticket8439(self): # Complex combinations of conjunctions, disjunctions and nullable # relations. self.assertQuerysetEqual( Author.objects.filter(Q(item__note__extrainfo=self.e2) | Q(report=self.r1, name='xyz')), ['<Author: a2>'] ) self.assertQuerysetEqual( Author.objects.filter(Q(report=self.r1, name='xyz') | Q(item__note__extrainfo=self.e2)), ['<Author: a2>'] ) self.assertQuerysetEqual( Annotation.objects.filter(Q(tag__parent=self.t1) | Q(notes__note='n1', name='a1')), ['<Annotation: a1>'] ) xx = ExtraInfo.objects.create(info='xx', note=self.n3) self.assertQuerysetEqual( Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)), ['<Note: n1>', '<Note: n3>'] ) q = Note.objects.filter(Q(extrainfo__author=self.a1) | Q(extrainfo=xx)).query self.assertEqual( len([x for x in q.alias_map.values() if x.join_type == LOUTER and q.alias_refcount[x.table_alias]]), 1 ) def test_ticket17429(self): """ Meta.ordering=None works the same as Meta.ordering=[] """ original_ordering = Tag._meta.ordering Tag._meta.ordering = None try: self.assertQuerysetEqual( Tag.objects.all(), ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'], ordered=False ) finally: Tag._meta.ordering = original_ordering def test_exclude(self): self.assertQuerysetEqual( Item.objects.exclude(tags__name='t4'), [repr(i) for i in Item.objects.filter(~Q(tags__name='t4'))]) self.assertQuerysetEqual( Item.objects.exclude(Q(tags__name='t4') | Q(tags__name='t3')), [repr(i) for i in Item.objects.filter(~(Q(tags__name='t4') | Q(tags__name='t3')))]) self.assertQuerysetEqual( Item.objects.exclude(Q(tags__name='t4') | ~Q(tags__name='t3')), [repr(i) for i in Item.objects.filter(~(Q(tags__name='t4') | ~Q(tags__name='t3')))]) def test_nested_exclude(self): self.assertQuerysetEqual( Item.objects.exclude(~Q(tags__name='t4')), [repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))]) def test_double_exclude(self): self.assertQuerysetEqual( Item.objects.filter(Q(tags__name='t4')), [repr(i) for i in Item.objects.filter(~~Q(tags__name='t4'))]) self.assertQuerysetEqual( Item.objects.filter(Q(tags__name='t4')), [repr(i) for i in Item.objects.filter(~Q(~Q(tags__name='t4')))]) def test_exclude_in(self): self.assertQuerysetEqual( Item.objects.exclude(Q(tags__name__in=['t4', 't3'])), [repr(i) for i in Item.objects.filter(~Q(tags__name__in=['t4', 't3']))]) self.assertQuerysetEqual( Item.objects.filter(Q(tags__name__in=['t4', 't3'])), [repr(i) for i in Item.objects.filter(~~Q(tags__name__in=['t4', 't3']))]) def test_ticket_10790_1(self): # Querying direct fields with isnull should trim the left outer join. # It also should not create INNER JOIN. q = Tag.objects.filter(parent__isnull=True) self.assertQuerysetEqual(q, ['<Tag: t1>']) self.assertNotIn('JOIN', str(q.query)) q = Tag.objects.filter(parent__isnull=False) self.assertQuerysetEqual( q, ['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'], ) self.assertNotIn('JOIN', str(q.query)) q = Tag.objects.exclude(parent__isnull=True) self.assertQuerysetEqual( q, ['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'], ) self.assertNotIn('JOIN', str(q.query)) q = Tag.objects.exclude(parent__isnull=False) self.assertQuerysetEqual(q, ['<Tag: t1>']) self.assertNotIn('JOIN', str(q.query)) q = Tag.objects.exclude(parent__parent__isnull=False) self.assertQuerysetEqual( q, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'], ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1) self.assertNotIn('INNER JOIN', str(q.query)) def test_ticket_10790_2(self): # Querying across several tables should strip only the last outer join, # while preserving the preceding inner joins. q = Tag.objects.filter(parent__parent__isnull=False) self.assertQuerysetEqual( q, ['<Tag: t4>', '<Tag: t5>'], ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 1) # Querying without isnull should not convert anything to left outer join. q = Tag.objects.filter(parent__parent=self.t1) self.assertQuerysetEqual( q, ['<Tag: t4>', '<Tag: t5>'], ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 1) def test_ticket_10790_3(self): # Querying via indirect fields should populate the left outer join q = NamedCategory.objects.filter(tag__isnull=True) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1) # join to dumbcategory ptr_id self.assertEqual(str(q.query).count('INNER JOIN'), 1) self.assertQuerysetEqual(q, []) # Querying across several tables should strip only the last join, while # preserving the preceding left outer joins. q = NamedCategory.objects.filter(tag__parent__isnull=True) self.assertEqual(str(q.query).count('INNER JOIN'), 1) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1) self.assertQuerysetEqual(q, ['<NamedCategory: Generic>']) def test_ticket_10790_4(self): # Querying across m2m field should not strip the m2m table from join. q = Author.objects.filter(item__tags__isnull=True) self.assertQuerysetEqual( q, ['<Author: a2>', '<Author: a3>'], ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 2) self.assertNotIn('INNER JOIN', str(q.query)) q = Author.objects.filter(item__tags__parent__isnull=True) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a3>'], ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 3) self.assertNotIn('INNER JOIN', str(q.query)) def test_ticket_10790_5(self): # Querying with isnull=False across m2m field should not create outer joins q = Author.objects.filter(item__tags__isnull=False) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a4>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 2) q = Author.objects.filter(item__tags__parent__isnull=False) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a2>', '<Author: a4>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 3) q = Author.objects.filter(item__tags__parent__parent__isnull=False) self.assertQuerysetEqual( q, ['<Author: a4>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 4) def test_ticket_10790_6(self): # Querying with isnull=True across m2m field should not create inner joins # and strip last outer join q = Author.objects.filter(item__tags__parent__parent__isnull=True) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a2>', '<Author: a3>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 4) self.assertEqual(str(q.query).count('INNER JOIN'), 0) q = Author.objects.filter(item__tags__parent__isnull=True) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a3>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 3) self.assertEqual(str(q.query).count('INNER JOIN'), 0) def test_ticket_10790_7(self): # Reverse querying with isnull should not strip the join q = Author.objects.filter(item__isnull=True) self.assertQuerysetEqual( q, ['<Author: a3>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 1) self.assertEqual(str(q.query).count('INNER JOIN'), 0) q = Author.objects.filter(item__isnull=False) self.assertQuerysetEqual( q, ['<Author: a1>', '<Author: a2>', '<Author: a2>', '<Author: a4>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 1) def test_ticket_10790_8(self): # Querying with combined q-objects should also strip the left outer join q = Tag.objects.filter(Q(parent__isnull=True) | Q(parent=self.t1)) self.assertQuerysetEqual( q, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'] ) self.assertEqual(str(q.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q.query).count('INNER JOIN'), 0) def test_ticket_10790_combine(self): # Combining queries should not re-populate the left outer join q1 = Tag.objects.filter(parent__isnull=True) q2 = Tag.objects.filter(parent__isnull=False) q3 = q1 | q2 self.assertQuerysetEqual( q3, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>'], ) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) q3 = q1 & q2 self.assertQuerysetEqual(q3, []) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) q2 = Tag.objects.filter(parent=self.t1) q3 = q1 | q2 self.assertQuerysetEqual( q3, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'] ) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) q3 = q2 | q1 self.assertQuerysetEqual( q3, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'] ) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) q1 = Tag.objects.filter(parent__isnull=True) q2 = Tag.objects.filter(parent__parent__isnull=True) q3 = q1 | q2 self.assertQuerysetEqual( q3, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'] ) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 1) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) q3 = q2 | q1 self.assertQuerysetEqual( q3, ['<Tag: t1>', '<Tag: t2>', '<Tag: t3>'] ) self.assertEqual(str(q3.query).count('LEFT OUTER JOIN'), 1) self.assertEqual(str(q3.query).count('INNER JOIN'), 0) def test_ticket19672(self): self.assertQuerysetEqual( Report.objects.filter(Q(creator__isnull=False) & ~Q(creator__extra__value=41)), ['<Report: r1>'] ) def test_ticket_20250(self): # A negated Q along with an annotated queryset failed in Django 1.4 qs = Author.objects.annotate(Count('item')) qs = qs.filter(~Q(extra__value=0)).order_by('name') self.assertIn('SELECT', str(qs.query)) self.assertQuerysetEqual( qs, ['<Author: a1>', '<Author: a2>', '<Author: a3>', '<Author: a4>'] ) def test_lookup_constraint_fielderror(self): msg = ( "Cannot resolve keyword 'unknown_field' into field. Choices are: " "annotation, category, category_id, children, id, item, " "managedmodel, name, note, parent, parent_id" ) with self.assertRaisesMessage(FieldError, msg): Tag.objects.filter(unknown_field__name='generic') def test_common_mixed_case_foreign_keys(self): """ Valid query should be generated when fields fetched from joined tables include FKs whose names only differ by case. """ c1 = SimpleCategory.objects.create(name='c1') c2 = SimpleCategory.objects.create(name='c2') c3 = SimpleCategory.objects.create(name='c3') category = CategoryItem.objects.create(category=c1) mixed_case_field_category = MixedCaseFieldCategoryItem.objects.create(CaTeGoRy=c2) mixed_case_db_column_category = MixedCaseDbColumnCategoryItem.objects.create(category=c3) CommonMixedCaseForeignKeys.objects.create( category=category, mixed_case_field_category=mixed_case_field_category, mixed_case_db_column_category=mixed_case_db_column_category, ) qs = CommonMixedCaseForeignKeys.objects.values( 'category', 'mixed_case_field_category', 'mixed_case_db_column_category', 'category__category', 'mixed_case_field_category__CaTeGoRy', 'mixed_case_db_column_category__category', ) self.assertTrue(qs.first()) def test_excluded_intermediary_m2m_table_joined(self): self.assertSequenceEqual( Note.objects.filter(~Q(tag__annotation__name=F('note'))), [self.n1, self.n2, self.n3], ) self.assertSequenceEqual( Note.objects.filter(tag__annotation__name='a1').filter(~Q(tag__annotation__name=F('note'))), [], ) def test_field_with_filterable(self): self.assertSequenceEqual( Author.objects.filter(extra=self.e2), [self.a3, self.a4], ) class Queries2Tests(TestCase): @classmethod def setUpTestData(cls): Number.objects.create(num=4) Number.objects.create(num=8) Number.objects.create(num=12) def test_ticket4289(self): # A slight variation on the restricting the filtering choices by the # lookup constraints. self.assertQuerysetEqual(Number.objects.filter(num__lt=4), []) self.assertQuerysetEqual(Number.objects.filter(num__gt=8, num__lt=12), []) self.assertQuerysetEqual( Number.objects.filter(num__gt=8, num__lt=13), ['<Number: 12>'] ) self.assertQuerysetEqual( Number.objects.filter(Q(num__lt=4) | Q(num__gt=8, num__lt=12)), [] ) self.assertQuerysetEqual( Number.objects.filter(Q(num__gt=8, num__lt=12) | Q(num__lt=4)), [] ) self.assertQuerysetEqual( Number.objects.filter(Q(num__gt=8) & Q(num__lt=12) | Q(num__lt=4)), [] ) self.assertQuerysetEqual( Number.objects.filter(Q(num__gt=7) & Q(num__lt=12) | Q(num__lt=4)), ['<Number: 8>'] ) def test_ticket12239(self): # Custom lookups are registered to round float values correctly on gte # and lt IntegerField queries. self.assertQuerysetEqual( Number.objects.filter(num__gt=11.9), ['<Number: 12>'] ) self.assertQuerysetEqual(Number.objects.filter(num__gt=12), []) self.assertQuerysetEqual(Number.objects.filter(num__gt=12.0), []) self.assertQuerysetEqual(Number.objects.filter(num__gt=12.1), []) self.assertQuerysetEqual( Number.objects.filter(num__lt=12), ['<Number: 4>', '<Number: 8>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lt=12.0), ['<Number: 4>', '<Number: 8>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lt=12.1), ['<Number: 4>', '<Number: 8>', '<Number: 12>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__gte=11.9), ['<Number: 12>'] ) self.assertQuerysetEqual( Number.objects.filter(num__gte=12), ['<Number: 12>'] ) self.assertQuerysetEqual( Number.objects.filter(num__gte=12.0), ['<Number: 12>'] ) self.assertQuerysetEqual(Number.objects.filter(num__gte=12.1), []) self.assertQuerysetEqual(Number.objects.filter(num__gte=12.9), []) self.assertQuerysetEqual( Number.objects.filter(num__lte=11.9), ['<Number: 4>', '<Number: 8>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lte=12), ['<Number: 4>', '<Number: 8>', '<Number: 12>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lte=12.0), ['<Number: 4>', '<Number: 8>', '<Number: 12>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lte=12.1), ['<Number: 4>', '<Number: 8>', '<Number: 12>'], ordered=False ) self.assertQuerysetEqual( Number.objects.filter(num__lte=12.9), ['<Number: 4>', '<Number: 8>', '<Number: 12>'], ordered=False ) def test_ticket7759(self): # Count should work with a partially read result set. count = Number.objects.count() qs = Number.objects.all() def run(): for obj in qs: return qs.count() == count self.assertTrue(run()) class Queries3Tests(TestCase): def test_ticket7107(self): # This shouldn't create an infinite loop. self.assertQuerysetEqual(Valid.objects.all(), []) def test_ticket8683(self): # An error should be raised when QuerySet.datetimes() is passed the # wrong type of field. with self.assertRaisesMessage(AssertionError, "'name' isn't a DateField, TimeField, or DateTimeField."): Item.objects.datetimes('name', 'month') def test_ticket22023(self): with self.assertRaisesMessage(TypeError, "Cannot call only() after .values() or .values_list()"): Valid.objects.values().only() with self.assertRaisesMessage(TypeError, "Cannot call defer() after .values() or .values_list()"): Valid.objects.values().defer() class Queries4Tests(TestCase): @classmethod def setUpTestData(cls): generic = NamedCategory.objects.create(name="Generic") cls.t1 = Tag.objects.create(name='t1', category=generic) n1 = Note.objects.create(note='n1', misc='foo') n2 = Note.objects.create(note='n2', misc='bar') e1 = ExtraInfo.objects.create(info='e1', note=n1) e2 = ExtraInfo.objects.create(info='e2', note=n2) cls.a1 = Author.objects.create(name='a1', num=1001, extra=e1) cls.a3 = Author.objects.create(name='a3', num=3003, extra=e2) cls.r1 = Report.objects.create(name='r1', creator=cls.a1) cls.r2 = Report.objects.create(name='r2', creator=cls.a3) cls.r3 = Report.objects.create(name='r3') Item.objects.create(name='i1', created=datetime.datetime.now(), note=n1, creator=cls.a1) Item.objects.create(name='i2', created=datetime.datetime.now(), note=n1, creator=cls.a3) def test_ticket24525(self): tag = Tag.objects.create() anth100 = tag.note_set.create(note='ANTH', misc='100') math101 = tag.note_set.create(note='MATH', misc='101') s1 = tag.annotation_set.create(name='1') s2 = tag.annotation_set.create(name='2') s1.notes.set([math101, anth100]) s2.notes.set([math101]) result = math101.annotation_set.all() & tag.annotation_set.exclude(notes__in=[anth100]) self.assertEqual(list(result), [s2]) def test_ticket11811(self): unsaved_category = NamedCategory(name="Other") msg = 'Unsaved model instance <NamedCategory: Other> cannot be used in an ORM query.' with self.assertRaisesMessage(ValueError, msg): Tag.objects.filter(pk=self.t1.pk).update(category=unsaved_category) def test_ticket14876(self): # Note: when combining the query we need to have information available # about the join type of the trimmed "creator__isnull" join. If we # don't have that information, then the join is created as INNER JOIN # and results will be incorrect. q1 = Report.objects.filter(Q(creator__isnull=True) | Q(creator__extra__info='e1')) q2 = Report.objects.filter(Q(creator__isnull=True)) | Report.objects.filter(Q(creator__extra__info='e1')) self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False) self.assertEqual(str(q1.query), str(q2.query)) q1 = Report.objects.filter(Q(creator__extra__info='e1') | Q(creator__isnull=True)) q2 = Report.objects.filter(Q(creator__extra__info='e1')) | Report.objects.filter(Q(creator__isnull=True)) self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"], ordered=False) self.assertEqual(str(q1.query), str(q2.query)) q1 = Item.objects.filter(Q(creator=self.a1) | Q(creator__report__name='r1')).order_by() q2 = ( Item.objects .filter(Q(creator=self.a1)).order_by() | Item.objects.filter(Q(creator__report__name='r1')) .order_by() ) self.assertQuerysetEqual(q1, ["<Item: i1>"]) self.assertEqual(str(q1.query), str(q2.query)) q1 = Item.objects.filter(Q(creator__report__name='e1') | Q(creator=self.a1)).order_by() q2 = ( Item.objects.filter(Q(creator__report__name='e1')).order_by() | Item.objects.filter(Q(creator=self.a1)).order_by() ) self.assertQuerysetEqual(q1, ["<Item: i1>"]) self.assertEqual(str(q1.query), str(q2.query)) def test_combine_join_reuse(self): # Joins having identical connections are correctly recreated in the # rhs query, in case the query is ORed together (#18748). Report.objects.create(name='r4', creator=self.a1) q1 = Author.objects.filter(report__name='r5') q2 = Author.objects.filter(report__name='r4').filter(report__name='r1') combined = q1 | q2 self.assertEqual(str(combined.query).count('JOIN'), 2) self.assertEqual(len(combined), 1) self.assertEqual(combined[0].name, 'a1') def test_join_reuse_order(self): # Join aliases are reused in order. This shouldn't raise AssertionError # because change_map contains a circular reference (#26522). s1 = School.objects.create() s2 = School.objects.create() s3 = School.objects.create() t1 = Teacher.objects.create() otherteachers = Teacher.objects.exclude(pk=t1.pk).exclude(friends=t1) qs1 = otherteachers.filter(schools=s1).filter(schools=s2) qs2 = otherteachers.filter(schools=s1).filter(schools=s3) self.assertQuerysetEqual(qs1 | qs2, []) def test_ticket7095(self): # Updates that are filtered on the model being updated are somewhat # tricky in MySQL. ManagedModel.objects.create(data='mm1', tag=self.t1, public=True) self.assertEqual(ManagedModel.objects.update(data='mm'), 1) # A values() or values_list() query across joined models must use outer # joins appropriately. # Note: In Oracle, we expect a null CharField to return '' instead of # None. if connection.features.interprets_empty_strings_as_nulls: expected_null_charfield_repr = '' else: expected_null_charfield_repr = None self.assertSequenceEqual( Report.objects.values_list("creator__extra__info", flat=True).order_by("name"), ['e1', 'e2', expected_null_charfield_repr], ) # Similarly for select_related(), joins beyond an initial nullable join # must use outer joins so that all results are included. self.assertQuerysetEqual( Report.objects.select_related("creator", "creator__extra").order_by("name"), ['<Report: r1>', '<Report: r2>', '<Report: r3>'] ) # When there are multiple paths to a table from another table, we have # to be careful not to accidentally reuse an inappropriate join when # using select_related(). We used to return the parent's Detail record # here by mistake. d1 = Detail.objects.create(data="d1") d2 = Detail.objects.create(data="d2") m1 = Member.objects.create(name="m1", details=d1) m2 = Member.objects.create(name="m2", details=d2) Child.objects.create(person=m2, parent=m1) obj = m1.children.select_related("person__details")[0] self.assertEqual(obj.person.details.data, 'd2') def test_order_by_resetting(self): # Calling order_by() with no parameters removes any existing ordering on the # model. But it should still be possible to add new ordering after that. qs = Author.objects.order_by().order_by('name') self.assertIn('ORDER BY', qs.query.get_compiler(qs.db).as_sql()[0]) def test_order_by_reverse_fk(self): # It is possible to order by reverse of foreign key, although that can lead # to duplicate results. c1 = SimpleCategory.objects.create(name="category1") c2 = SimpleCategory.objects.create(name="category2") CategoryItem.objects.create(category=c1) CategoryItem.objects.create(category=c2) CategoryItem.objects.create(category=c1) self.assertSequenceEqual(SimpleCategory.objects.order_by('categoryitem', 'pk'), [c1, c2, c1]) def test_filter_reverse_non_integer_pk(self): date_obj = DateTimePK.objects.create() extra_obj = ExtraInfo.objects.create(info='extra', date=date_obj) self.assertEqual( DateTimePK.objects.filter(extrainfo=extra_obj).get(), date_obj, ) def test_ticket10181(self): # Avoid raising an EmptyResultSet if an inner query is probably # empty (and hence, not executed). self.assertQuerysetEqual( Tag.objects.filter(id__in=Tag.objects.filter(id__in=[])), [] ) def test_ticket15316_filter_false(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create(name="named category1", special_name="special1") c3 = SpecialCategory.objects.create(name="named category2", special_name="special2") CategoryItem.objects.create(category=c1) ci2 = CategoryItem.objects.create(category=c2) ci3 = CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.filter(category__specialcategory__isnull=False) self.assertEqual(qs.count(), 2) self.assertSequenceEqual(qs, [ci2, ci3]) def test_ticket15316_exclude_false(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create(name="named category1", special_name="special1") c3 = SpecialCategory.objects.create(name="named category2", special_name="special2") ci1 = CategoryItem.objects.create(category=c1) CategoryItem.objects.create(category=c2) CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.exclude(category__specialcategory__isnull=False) self.assertEqual(qs.count(), 1) self.assertSequenceEqual(qs, [ci1]) def test_ticket15316_filter_true(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create(name="named category1", special_name="special1") c3 = SpecialCategory.objects.create(name="named category2", special_name="special2") ci1 = CategoryItem.objects.create(category=c1) CategoryItem.objects.create(category=c2) CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.filter(category__specialcategory__isnull=True) self.assertEqual(qs.count(), 1) self.assertSequenceEqual(qs, [ci1]) def test_ticket15316_exclude_true(self): c1 = SimpleCategory.objects.create(name="category1") c2 = SpecialCategory.objects.create(name="named category1", special_name="special1") c3 = SpecialCategory.objects.create(name="named category2", special_name="special2") CategoryItem.objects.create(category=c1) ci2 = CategoryItem.objects.create(category=c2) ci3 = CategoryItem.objects.create(category=c3) qs = CategoryItem.objects.exclude(category__specialcategory__isnull=True) self.assertEqual(qs.count(), 2) self.assertSequenceEqual(qs, [ci2, ci3]) def test_ticket15316_one2one_filter_false(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") OneToOneCategory.objects.create(category=c1, new_name="new1") OneToOneCategory.objects.create(category=c0, new_name="new2") CategoryItem.objects.create(category=c) ci2 = CategoryItem.objects.create(category=c0) ci3 = CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=False).order_by('pk') self.assertEqual(qs.count(), 2) self.assertSequenceEqual(qs, [ci2, ci3]) def test_ticket15316_one2one_exclude_false(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") OneToOneCategory.objects.create(category=c1, new_name="new1") OneToOneCategory.objects.create(category=c0, new_name="new2") ci1 = CategoryItem.objects.create(category=c) CategoryItem.objects.create(category=c0) CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=False) self.assertEqual(qs.count(), 1) self.assertSequenceEqual(qs, [ci1]) def test_ticket15316_one2one_filter_true(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") OneToOneCategory.objects.create(category=c1, new_name="new1") OneToOneCategory.objects.create(category=c0, new_name="new2") ci1 = CategoryItem.objects.create(category=c) CategoryItem.objects.create(category=c0) CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.filter(category__onetoonecategory__isnull=True) self.assertEqual(qs.count(), 1) self.assertSequenceEqual(qs, [ci1]) def test_ticket15316_one2one_exclude_true(self): c = SimpleCategory.objects.create(name="cat") c0 = SimpleCategory.objects.create(name="cat0") c1 = SimpleCategory.objects.create(name="category1") OneToOneCategory.objects.create(category=c1, new_name="new1") OneToOneCategory.objects.create(category=c0, new_name="new2") CategoryItem.objects.create(category=c) ci2 = CategoryItem.objects.create(category=c0) ci3 = CategoryItem.objects.create(category=c1) qs = CategoryItem.objects.exclude(category__onetoonecategory__isnull=True).order_by('pk') self.assertEqual(qs.count(), 2) self.assertSequenceEqual(qs, [ci2, ci3]) class Queries5Tests(TestCase): @classmethod def setUpTestData(cls): # Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the # Meta.ordering will be rank3, rank2, rank1. n1 = Note.objects.create(note='n1', misc='foo', id=1) n2 = Note.objects.create(note='n2', misc='bar', id=2) e1 = ExtraInfo.objects.create(info='e1', note=n1) e2 = ExtraInfo.objects.create(info='e2', note=n2) a1 = Author.objects.create(name='a1', num=1001, extra=e1) a2 = Author.objects.create(name='a2', num=2002, extra=e1) a3 = Author.objects.create(name='a3', num=3003, extra=e2) cls.rank1 = Ranking.objects.create(rank=2, author=a2) Ranking.objects.create(rank=1, author=a3) Ranking.objects.create(rank=3, author=a1) def test_ordering(self): # Cross model ordering is possible in Meta, too. self.assertQuerysetEqual( Ranking.objects.all(), ['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>'] ) self.assertQuerysetEqual( Ranking.objects.all().order_by('rank'), ['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>'] ) # Ordering of extra() pieces is possible, too and you can mix extra # fields and model fields in the ordering. self.assertQuerysetEqual( Ranking.objects.extra(tables=['django_site'], order_by=['-django_site.id', 'rank']), ['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>'] ) sql = 'case when %s > 2 then 1 else 0 end' % connection.ops.quote_name('rank') qs = Ranking.objects.extra(select={'good': sql}) self.assertEqual( [o.good for o in qs.extra(order_by=('-good',))], [True, False, False] ) self.assertQuerysetEqual( qs.extra(order_by=('-good', 'id')), ['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>'] ) # Despite having some extra aliases in the query, we can still omit # them in a values() query. dicts = qs.values('id', 'rank').order_by('id') self.assertEqual( [d['rank'] for d in dicts], [2, 1, 3] ) def test_ticket7256(self): # An empty values() call includes all aliases, including those from an # extra() sql = 'case when %s > 2 then 1 else 0 end' % connection.ops.quote_name('rank') qs = Ranking.objects.extra(select={'good': sql}) dicts = qs.values().order_by('id') for d in dicts: del d['id'] del d['author_id'] self.assertEqual( [sorted(d.items()) for d in dicts], [[('good', 0), ('rank', 2)], [('good', 0), ('rank', 1)], [('good', 1), ('rank', 3)]] ) def test_ticket7045(self): # Extra tables used to crash SQL construction on the second use. qs = Ranking.objects.extra(tables=['django_site']) qs.query.get_compiler(qs.db).as_sql() # test passes if this doesn't raise an exception. qs.query.get_compiler(qs.db).as_sql() def test_ticket9848(self): # Make sure that updates which only filter on sub-tables don't # inadvertently update the wrong records (bug #9848). author_start = Author.objects.get(name='a1') ranking_start = Ranking.objects.get(author__name='a1') # Make sure that the IDs from different tables don't happen to match. self.assertQuerysetEqual( Ranking.objects.filter(author__name='a1'), ['<Ranking: 3: a1>'] ) self.assertEqual( Ranking.objects.filter(author__name='a1').update(rank=4636), 1 ) r = Ranking.objects.get(author__name='a1') self.assertEqual(r.id, ranking_start.id) self.assertEqual(r.author.id, author_start.id) self.assertEqual(r.rank, 4636) r.rank = 3 r.save() self.assertQuerysetEqual( Ranking.objects.all(), ['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>'] ) def test_ticket5261(self): # Test different empty excludes. self.assertQuerysetEqual( Note.objects.exclude(Q()), ['<Note: n1>', '<Note: n2>'] ) self.assertQuerysetEqual( Note.objects.filter(~Q()), ['<Note: n1>', '<Note: n2>'] ) self.assertQuerysetEqual( Note.objects.filter(~Q() | ~Q()), ['<Note: n1>', '<Note: n2>'] ) self.assertQuerysetEqual( Note.objects.exclude(~Q() & ~Q()), ['<Note: n1>', '<Note: n2>'] ) def test_extra_select_literal_percent_s(self): # Allow %%s to escape select clauses self.assertEqual( Note.objects.extra(select={'foo': "'%%s'"})[0].foo, '%s' ) self.assertEqual( Note.objects.extra(select={'foo': "'%%s bar %%s'"})[0].foo, '%s bar %s' ) self.assertEqual( Note.objects.extra(select={'foo': "'bar %%s'"})[0].foo, 'bar %s' ) class SelectRelatedTests(TestCase): def test_tickets_3045_3288(self): # Once upon a time, select_related() with circular relations would loop # infinitely if you forgot to specify "depth". Now we set an arbitrary # default upper bound. self.assertQuerysetEqual(X.objects.all(), []) self.assertQuerysetEqual(X.objects.select_related(), []) class SubclassFKTests(TestCase): def test_ticket7778(self): # Model subclasses could not be deleted if a nullable foreign key # relates to a model that relates back. num_celebs = Celebrity.objects.count() tvc = TvChef.objects.create(name="Huey") self.assertEqual(Celebrity.objects.count(), num_celebs + 1) Fan.objects.create(fan_of=tvc) Fan.objects.create(fan_of=tvc) tvc.delete() # The parent object should have been deleted as well. self.assertEqual(Celebrity.objects.count(), num_celebs) class CustomPkTests(TestCase): def test_ticket7371(self): self.assertQuerysetEqual(Related.objects.order_by('custom'), []) class NullableRelOrderingTests(TestCase): def test_ticket10028(self): # Ordering by model related to nullable relations(!) should use outer # joins, so that all results are included. Plaything.objects.create(name="p1") self.assertQuerysetEqual( Plaything.objects.all(), ['<Plaything: p1>'] ) def test_join_already_in_query(self): # Ordering by model related to nullable relations should not change # the join type of already existing joins. Plaything.objects.create(name="p1") s = SingleObject.objects.create(name='s') r = RelatedObject.objects.create(single=s, f=1) Plaything.objects.create(name="p2", others=r) qs = Plaything.objects.all().filter(others__isnull=False).order_by('pk') self.assertNotIn('JOIN', str(qs.query)) qs = Plaything.objects.all().filter(others__f__isnull=False).order_by('pk') self.assertIn('INNER', str(qs.query)) qs = qs.order_by('others__single__name') # The ordering by others__single__pk will add one new join (to single) # and that join must be LEFT join. The already existing join to related # objects must be kept INNER. So, we have both an INNER and a LEFT join # in the query. self.assertEqual(str(qs.query).count('LEFT'), 1) self.assertEqual(str(qs.query).count('INNER'), 1) self.assertQuerysetEqual( qs, ['<Plaything: p2>'] ) class DisjunctiveFilterTests(TestCase): @classmethod def setUpTestData(cls): cls.n1 = Note.objects.create(note='n1', misc='foo', id=1) ExtraInfo.objects.create(info='e1', note=cls.n1) def test_ticket7872(self): # Another variation on the disjunctive filtering theme. # For the purposes of this regression test, it's important that there is no # Join object related to the LeafA we create. LeafA.objects.create(data='first') self.assertQuerysetEqual(LeafA.objects.all(), ['<LeafA: first>']) self.assertQuerysetEqual( LeafA.objects.filter(Q(data='first') | Q(join__b__data='second')), ['<LeafA: first>'] ) def test_ticket8283(self): # Checking that applying filters after a disjunction works correctly. self.assertQuerysetEqual( (ExtraInfo.objects.filter(note=self.n1) | ExtraInfo.objects.filter(info='e2')).filter(note=self.n1), ['<ExtraInfo: e1>'] ) self.assertQuerysetEqual( (ExtraInfo.objects.filter(info='e2') | ExtraInfo.objects.filter(note=self.n1)).filter(note=self.n1), ['<ExtraInfo: e1>'] ) class Queries6Tests(TestCase): @classmethod def setUpTestData(cls): generic = NamedCategory.objects.create(name="Generic") cls.t1 = Tag.objects.create(name='t1', category=generic) cls.t2 = Tag.objects.create(name='t2', parent=cls.t1, category=generic) cls.t3 = Tag.objects.create(name='t3', parent=cls.t1) cls.t4 = Tag.objects.create(name='t4', parent=cls.t3) cls.t5 = Tag.objects.create(name='t5', parent=cls.t3) n1 = Note.objects.create(note='n1', misc='foo', id=1) ann1 = Annotation.objects.create(name='a1', tag=cls.t1) ann1.notes.add(n1) Annotation.objects.create(name='a2', tag=cls.t4) def test_parallel_iterators(self): # Parallel iterators work. qs = Tag.objects.all() i1, i2 = iter(qs), iter(qs) self.assertEqual(repr(next(i1)), '<Tag: t1>') self.assertEqual(repr(next(i1)), '<Tag: t2>') self.assertEqual(repr(next(i2)), '<Tag: t1>') self.assertEqual(repr(next(i2)), '<Tag: t2>') self.assertEqual(repr(next(i2)), '<Tag: t3>') self.assertEqual(repr(next(i1)), '<Tag: t3>') qs = X.objects.all() self.assertFalse(qs) self.assertFalse(qs) def test_nested_queries_sql(self): # Nested queries should not evaluate the inner query as part of constructing the # SQL (so we should see a nested query here, indicated by two "SELECT" calls). qs = Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy")) self.assertEqual( qs.query.get_compiler(qs.db).as_sql()[0].count('SELECT'), 2 ) def test_tickets_8921_9188(self): # Incorrect SQL was being generated for certain types of exclude() # queries that crossed multi-valued relations (#8921, #9188 and some # preemptively discovered cases). self.assertQuerysetEqual( PointerA.objects.filter(connection__pointerb__id=1), [] ) self.assertQuerysetEqual( PointerA.objects.exclude(connection__pointerb__id=1), [] ) self.assertQuerysetEqual( Tag.objects.exclude(children=None), ['<Tag: t1>', '<Tag: t3>'] ) # This example is tricky because the parent could be NULL, so only checking # parents with annotations omits some results (tag t1, in this case). self.assertQuerysetEqual( Tag.objects.exclude(parent__annotation__name="a1"), ['<Tag: t1>', '<Tag: t4>', '<Tag: t5>'] ) # The annotation->tag link is single values and tag->children links is # multi-valued. So we have to split the exclude filter in the middle # and then optimize the inner query without losing results. self.assertQuerysetEqual( Annotation.objects.exclude(tag__children__name="t2"), ['<Annotation: a2>'] ) # Nested queries are possible (although should be used with care, since # they have performance problems on backends like MySQL. self.assertQuerysetEqual( Annotation.objects.filter(notes__in=Note.objects.filter(note="n1")), ['<Annotation: a1>'] ) def test_ticket3739(self): # The all() method on querysets returns a copy of the queryset. q1 = Tag.objects.order_by('name') self.assertIsNot(q1, q1.all()) def test_ticket_11320(self): qs = Tag.objects.exclude(category=None).exclude(category__name='foo') self.assertEqual(str(qs.query).count(' INNER JOIN '), 1) def test_distinct_ordered_sliced_subquery_aggregation(self): self.assertEqual(Tag.objects.distinct().order_by('category__name')[:3].count(), 3) def test_multiple_columns_with_the_same_name_slice(self): self.assertEqual( list(Tag.objects.order_by('name').values_list('name', 'category__name')[:2]), [('t1', 'Generic'), ('t2', 'Generic')], ) self.assertSequenceEqual( Tag.objects.order_by('name').select_related('category')[:2], [self.t1, self.t2], ) self.assertEqual( list(Tag.objects.order_by('-name').values_list('name', 'parent__name')[:2]), [('t5', 't3'), ('t4', 't3')], ) self.assertSequenceEqual( Tag.objects.order_by('-name').select_related('parent')[:2], [self.t5, self.t4], ) class RawQueriesTests(TestCase): @classmethod def setUpTestData(cls): Note.objects.create(note='n1', misc='foo', id=1) def test_ticket14729(self): # Test representation of raw query with one or few parameters passed as list query = "SELECT * FROM queries_note WHERE note = %s" params = ['n1'] qs = Note.objects.raw(query, params=params) self.assertEqual(repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1>") query = "SELECT * FROM queries_note WHERE note = %s and misc = %s" params = ['n1', 'foo'] qs = Note.objects.raw(query, params=params) self.assertEqual(repr(qs), "<RawQuerySet: SELECT * FROM queries_note WHERE note = n1 and misc = foo>") class GeneratorExpressionTests(SimpleTestCase): def test_ticket10432(self): # Using an empty iterator as the rvalue for an "__in" # lookup is legal. self.assertCountEqual(Note.objects.filter(pk__in=iter(())), []) class ComparisonTests(TestCase): @classmethod def setUpTestData(cls): cls.n1 = Note.objects.create(note='n1', misc='foo', id=1) e1 = ExtraInfo.objects.create(info='e1', note=cls.n1) cls.a2 = Author.objects.create(name='a2', num=2002, extra=e1) def test_ticket8597(self): # Regression tests for case-insensitive comparisons Item.objects.create(name="a_b", created=datetime.datetime.now(), creator=self.a2, note=self.n1) Item.objects.create(name="x%y", created=datetime.datetime.now(), creator=self.a2, note=self.n1) self.assertQuerysetEqual( Item.objects.filter(name__iexact="A_b"), ['<Item: a_b>'] ) self.assertQuerysetEqual( Item.objects.filter(name__iexact="x%Y"), ['<Item: x%y>'] ) self.assertQuerysetEqual( Item.objects.filter(name__istartswith="A_b"), ['<Item: a_b>'] ) self.assertQuerysetEqual( Item.objects.filter(name__iendswith="A_b"), ['<Item: a_b>'] ) class ExistsSql(TestCase): def test_exists(self): with CaptureQueriesContext(connection) as captured_queries: self.assertFalse(Tag.objects.exists()) # Ok - so the exist query worked - but did it include too many columns? self.assertEqual(len(captured_queries), 1) qstr = captured_queries[0]['sql'] id, name = connection.ops.quote_name('id'), connection.ops.quote_name('name') self.assertNotIn(id, qstr) self.assertNotIn(name, qstr) def test_ticket_18414(self): Article.objects.create(name='one', created=datetime.datetime.now()) Article.objects.create(name='one', created=datetime.datetime.now()) Article.objects.create(name='two', created=datetime.datetime.now()) self.assertTrue(Article.objects.exists()) self.assertTrue(Article.objects.distinct().exists()) self.assertTrue(Article.objects.distinct()[1:3].exists()) self.assertFalse(Article.objects.distinct()[1:1].exists()) @skipUnlessDBFeature('can_distinct_on_fields') def test_ticket_18414_distinct_on(self): Article.objects.create(name='one', created=datetime.datetime.now()) Article.objects.create(name='one', created=datetime.datetime.now()) Article.objects.create(name='two', created=datetime.datetime.now()) self.assertTrue(Article.objects.distinct('name').exists()) self.assertTrue(Article.objects.distinct('name')[1:2].exists()) self.assertFalse(Article.objects.distinct('name')[2:3].exists()) class QuerysetOrderedTests(unittest.TestCase): """ Tests for the Queryset.ordered attribute. """ def test_no_default_or_explicit_ordering(self): self.assertIs(Annotation.objects.all().ordered, False) def test_cleared_default_ordering(self): self.assertIs(Tag.objects.all().ordered, True) self.assertIs(Tag.objects.all().order_by().ordered, False) def test_explicit_ordering(self): self.assertIs(Annotation.objects.all().order_by('id').ordered, True) def test_empty_queryset(self): self.assertIs(Annotation.objects.none().ordered, True) def test_order_by_extra(self): self.assertIs(Annotation.objects.all().extra(order_by=['id']).ordered, True) def test_annotated_ordering(self): qs = Annotation.objects.annotate(num_notes=Count('notes')) self.assertIs(qs.ordered, False) self.assertIs(qs.order_by('num_notes').ordered, True) @skipUnlessDBFeature('allow_sliced_subqueries_with_in') class SubqueryTests(TestCase): @classmethod def setUpTestData(cls): NamedCategory.objects.create(id=1, name='first') NamedCategory.objects.create(id=2, name='second') NamedCategory.objects.create(id=3, name='third') NamedCategory.objects.create(id=4, name='fourth') def test_ordered_subselect(self): "Subselects honor any manual ordering" query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2]) self.assertEqual(set(query.values_list('id', flat=True)), {3, 4}) query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[:2]) self.assertEqual(set(query.values_list('id', flat=True)), {3, 4}) query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2]) self.assertEqual(set(query.values_list('id', flat=True)), {3}) query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:]) self.assertEqual(set(query.values_list('id', flat=True)), {1, 2}) def test_slice_subquery_and_query(self): """ Slice a query that has a sliced subquery """ query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2])[0:2] self.assertEqual({x.id for x in query}, {3, 4}) query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:3])[1:3] self.assertEqual({x.id for x in query}, {3}) query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:])[1:] self.assertEqual({x.id for x in query}, {2}) def test_related_sliced_subquery(self): """ Related objects constraints can safely contain sliced subqueries. refs #22434 """ generic = NamedCategory.objects.create(id=5, name="Generic") t1 = Tag.objects.create(name='t1', category=generic) t2 = Tag.objects.create(name='t2', category=generic) ManagedModel.objects.create(data='mm1', tag=t1, public=True) mm2 = ManagedModel.objects.create(data='mm2', tag=t2, public=True) query = ManagedModel.normal_manager.filter( tag__in=Tag.objects.order_by('-id')[:1] ) self.assertEqual({x.id for x in query}, {mm2.id}) def test_sliced_delete(self): "Delete queries can safely contain sliced subqueries" DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:1]).delete() self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {1, 2, 3}) DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:2]).delete() self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {1, 3}) DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[1:]).delete() self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), {3}) def test_distinct_ordered_sliced_subquery(self): # Implicit values('id'). self.assertSequenceEqual( NamedCategory.objects.filter( id__in=NamedCategory.objects.distinct().order_by('name')[0:2], ).order_by('name').values_list('name', flat=True), ['first', 'fourth'] ) # Explicit values('id'). self.assertSequenceEqual( NamedCategory.objects.filter( id__in=NamedCategory.objects.distinct().order_by('-name').values('id')[0:2], ).order_by('name').values_list('name', flat=True), ['second', 'third'] ) # Annotated value. self.assertSequenceEqual( DumbCategory.objects.filter( id__in=DumbCategory.objects.annotate( double_id=F('id') * 2 ).order_by('id').distinct().values('double_id')[0:2], ).order_by('id').values_list('id', flat=True), [2, 4] ) @skipUnlessDBFeature('allow_sliced_subqueries_with_in') class QuerySetBitwiseOperationTests(TestCase): @classmethod def setUpTestData(cls): school = School.objects.create() cls.room_1 = Classroom.objects.create(school=school, has_blackboard=False, name='Room 1') cls.room_2 = Classroom.objects.create(school=school, has_blackboard=True, name='Room 2') cls.room_3 = Classroom.objects.create(school=school, has_blackboard=True, name='Room 3') cls.room_4 = Classroom.objects.create(school=school, has_blackboard=False, name='Room 4') def test_or_with_rhs_slice(self): qs1 = Classroom.objects.filter(has_blackboard=True) qs2 = Classroom.objects.filter(has_blackboard=False)[:1] self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_3]) def test_or_with_lhs_slice(self): qs1 = Classroom.objects.filter(has_blackboard=True)[:1] qs2 = Classroom.objects.filter(has_blackboard=False) self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2, self.room_4]) def test_or_with_both_slice(self): qs1 = Classroom.objects.filter(has_blackboard=False)[:1] qs2 = Classroom.objects.filter(has_blackboard=True)[:1] self.assertCountEqual(qs1 | qs2, [self.room_1, self.room_2]) def test_or_with_both_slice_and_ordering(self): qs1 = Classroom.objects.filter(has_blackboard=False).order_by('-pk')[:1] qs2 = Classroom.objects.filter(has_blackboard=True).order_by('-name')[:1] self.assertCountEqual(qs1 | qs2, [self.room_3, self.room_4]) class CloneTests(TestCase): def test_evaluated_queryset_as_argument(self): "#13227 -- If a queryset is already evaluated, it can still be used as a query arg" n = Note(note='Test1', misc='misc') n.save() e = ExtraInfo(info='good', note=n) e.save() n_list = Note.objects.all() # Evaluate the Note queryset, populating the query cache list(n_list) # Make one of cached results unpickable. n_list._result_cache[0].lock = Lock() with self.assertRaises(TypeError): pickle.dumps(n_list) # Use the note queryset in a query, and evaluate # that query in a way that involves cloning. self.assertEqual(ExtraInfo.objects.filter(note__in=n_list)[0].info, 'good') def test_no_model_options_cloning(self): """ Cloning a queryset does not get out of hand. While complete testing is impossible, this is a sanity check against invalid use of deepcopy. refs #16759. """ opts_class = type(Note._meta) note_deepcopy = getattr(opts_class, "__deepcopy__", None) opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model options shouldn't be cloned.") try: Note.objects.filter(pk__lte=F('pk') + 1).all() finally: if note_deepcopy is None: delattr(opts_class, "__deepcopy__") else: opts_class.__deepcopy__ = note_deepcopy def test_no_fields_cloning(self): """ Cloning a queryset does not get out of hand. While complete testing is impossible, this is a sanity check against invalid use of deepcopy. refs #16759. """ opts_class = type(Note._meta.get_field("misc")) note_deepcopy = getattr(opts_class, "__deepcopy__", None) opts_class.__deepcopy__ = lambda obj, memo: self.fail("Model fields shouldn't be cloned") try: Note.objects.filter(note=F('misc')).all() finally: if note_deepcopy is None: delattr(opts_class, "__deepcopy__") else: opts_class.__deepcopy__ = note_deepcopy class EmptyQuerySetTests(SimpleTestCase): def test_emptyqueryset_values(self): # #14366 -- Calling .values() on an empty QuerySet and then cloning # that should not cause an error self.assertCountEqual(Number.objects.none().values('num').order_by('num'), []) def test_values_subquery(self): self.assertCountEqual(Number.objects.filter(pk__in=Number.objects.none().values('pk')), []) self.assertCountEqual(Number.objects.filter(pk__in=Number.objects.none().values_list('pk')), []) def test_ticket_19151(self): # #19151 -- Calling .values() or .values_list() on an empty QuerySet # should return an empty QuerySet and not cause an error. q = Author.objects.none() self.assertCountEqual(q.values(), []) self.assertCountEqual(q.values_list(), []) class ValuesQuerysetTests(TestCase): @classmethod def setUpTestData(cls): Number.objects.create(num=72) def test_flat_values_list(self): qs = Number.objects.values_list("num") qs = qs.values_list("num", flat=True) self.assertSequenceEqual(qs, [72]) def test_extra_values(self): # testing for ticket 14930 issues qs = Number.objects.extra(select={'value_plus_x': 'num+%s', 'value_minus_x': 'num-%s'}, select_params=(1, 2)) qs = qs.order_by('value_minus_x') qs = qs.values('num') self.assertSequenceEqual(qs, [{'num': 72}]) def test_extra_values_order_twice(self): # testing for ticket 14930 issues qs = Number.objects.extra(select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'}) qs = qs.order_by('value_minus_one').order_by('value_plus_one') qs = qs.values('num') self.assertSequenceEqual(qs, [{'num': 72}]) def test_extra_values_order_multiple(self): # Postgres doesn't allow constants in order by, so check for that. qs = Number.objects.extra(select={ 'value_plus_one': 'num+1', 'value_minus_one': 'num-1', 'constant_value': '1' }) qs = qs.order_by('value_plus_one', 'value_minus_one', 'constant_value') qs = qs.values('num') self.assertSequenceEqual(qs, [{'num': 72}]) def test_extra_values_order_in_extra(self): # testing for ticket 14930 issues qs = Number.objects.extra( select={'value_plus_one': 'num+1', 'value_minus_one': 'num-1'}, order_by=['value_minus_one'], ) qs = qs.values('num') def test_extra_select_params_values_order_in_extra(self): # testing for 23259 issue qs = Number.objects.extra( select={'value_plus_x': 'num+%s'}, select_params=[1], order_by=['value_plus_x'], ) qs = qs.filter(num=72) qs = qs.values('num') self.assertSequenceEqual(qs, [{'num': 72}]) def test_extra_multiple_select_params_values_order_by(self): # testing for 23259 issue qs = Number.objects.extra(select={'value_plus_x': 'num+%s', 'value_minus_x': 'num-%s'}, select_params=(72, 72)) qs = qs.order_by('value_minus_x') qs = qs.filter(num=1) qs = qs.values('num') self.assertSequenceEqual(qs, []) def test_extra_values_list(self): # testing for ticket 14930 issues qs = Number.objects.extra(select={'value_plus_one': 'num+1'}) qs = qs.order_by('value_plus_one') qs = qs.values_list('num') self.assertSequenceEqual(qs, [(72,)]) def test_flat_extra_values_list(self): # testing for ticket 14930 issues qs = Number.objects.extra(select={'value_plus_one': 'num+1'}) qs = qs.order_by('value_plus_one') qs = qs.values_list('num', flat=True) self.assertSequenceEqual(qs, [72]) def test_field_error_values_list(self): # see #23443 msg = "Cannot resolve keyword %r into field. Join on 'name' not permitted." % 'foo' with self.assertRaisesMessage(FieldError, msg): Tag.objects.values_list('name__foo') def test_named_values_list_flat(self): msg = "'flat' and 'named' can't be used together." with self.assertRaisesMessage(TypeError, msg): Number.objects.values_list('num', flat=True, named=True) def test_named_values_list_bad_field_name(self): msg = "Type names and field names must be valid identifiers: '1'" with self.assertRaisesMessage(ValueError, msg): Number.objects.extra(select={'1': 'num+1'}).values_list('1', named=True).first() def test_named_values_list_with_fields(self): qs = Number.objects.extra(select={'num2': 'num+1'}).annotate(Count('id')) values = qs.values_list('num', 'num2', named=True).first() self.assertEqual(type(values).__name__, 'Row') self.assertEqual(values._fields, ('num', 'num2')) self.assertEqual(values.num, 72) self.assertEqual(values.num2, 73) def test_named_values_list_without_fields(self): qs = Number.objects.extra(select={'num2': 'num+1'}).annotate(Count('id')) values = qs.values_list(named=True).first() self.assertEqual(type(values).__name__, 'Row') self.assertEqual( values._fields, ('num2', 'id', 'num', 'other_num', 'another_num', 'id__count'), ) self.assertEqual(values.num, 72) self.assertEqual(values.num2, 73) self.assertEqual(values.id__count, 1) def test_named_values_list_expression_with_default_alias(self): expr = Count('id') values = Number.objects.annotate(id__count1=expr).values_list(expr, 'id__count1', named=True).first() self.assertEqual(values._fields, ('id__count2', 'id__count1')) def test_named_values_list_expression(self): expr = F('num') + 1 qs = Number.objects.annotate(combinedexpression1=expr).values_list(expr, 'combinedexpression1', named=True) values = qs.first() self.assertEqual(values._fields, ('combinedexpression2', 'combinedexpression1')) class QuerySetSupportsPythonIdioms(TestCase): @classmethod def setUpTestData(cls): some_date = datetime.datetime(2014, 5, 16, 12, 1) for i in range(1, 8): Article.objects.create( name="Article {}".format(i), created=some_date) def get_ordered_articles(self): return Article.objects.all().order_by('name') def test_can_get_items_using_index_and_slice_notation(self): self.assertEqual(self.get_ordered_articles()[0].name, 'Article 1') self.assertQuerysetEqual( self.get_ordered_articles()[1:3], ["<Article: Article 2>", "<Article: Article 3>"] ) def test_slicing_with_steps_can_be_used(self): self.assertQuerysetEqual( self.get_ordered_articles()[::2], [ "<Article: Article 1>", "<Article: Article 3>", "<Article: Article 5>", "<Article: Article 7>" ] ) def test_slicing_without_step_is_lazy(self): with self.assertNumQueries(0): self.get_ordered_articles()[0:5] def test_slicing_with_tests_is_not_lazy(self): with self.assertNumQueries(1): self.get_ordered_articles()[0:5:3] def test_slicing_can_slice_again_after_slicing(self): self.assertQuerysetEqual( self.get_ordered_articles()[0:5][0:2], ["<Article: Article 1>", "<Article: Article 2>"] ) self.assertQuerysetEqual(self.get_ordered_articles()[0:5][4:], ["<Article: Article 5>"]) self.assertQuerysetEqual(self.get_ordered_articles()[0:5][5:], []) # Some more tests! self.assertQuerysetEqual( self.get_ordered_articles()[2:][0:2], ["<Article: Article 3>", "<Article: Article 4>"] ) self.assertQuerysetEqual( self.get_ordered_articles()[2:][:2], ["<Article: Article 3>", "<Article: Article 4>"] ) self.assertQuerysetEqual(self.get_ordered_articles()[2:][2:3], ["<Article: Article 5>"]) # Using an offset without a limit is also possible. self.assertQuerysetEqual( self.get_ordered_articles()[5:], ["<Article: Article 6>", "<Article: Article 7>"] ) def test_slicing_cannot_filter_queryset_once_sliced(self): with self.assertRaisesMessage(AssertionError, "Cannot filter a query once a slice has been taken."): Article.objects.all()[0:5].filter(id=1) def test_slicing_cannot_reorder_queryset_once_sliced(self): with self.assertRaisesMessage(AssertionError, "Cannot reorder a query once a slice has been taken."): Article.objects.all()[0:5].order_by('id') def test_slicing_cannot_combine_queries_once_sliced(self): with self.assertRaisesMessage(AssertionError, "Cannot combine queries once a slice has been taken."): Article.objects.all()[0:1] & Article.objects.all()[4:5] def test_slicing_negative_indexing_not_supported_for_single_element(self): """hint: inverting your ordering might do what you need""" with self.assertRaisesMessage(AssertionError, "Negative indexing is not supported."): Article.objects.all()[-1] def test_slicing_negative_indexing_not_supported_for_range(self): """hint: inverting your ordering might do what you need""" with self.assertRaisesMessage(AssertionError, "Negative indexing is not supported."): Article.objects.all()[0:-5] def test_invalid_index(self): msg = 'QuerySet indices must be integers or slices, not str.' with self.assertRaisesMessage(TypeError, msg): Article.objects.all()['foo'] def test_can_get_number_of_items_in_queryset_using_standard_len(self): self.assertEqual(len(Article.objects.filter(name__exact='Article 1')), 1) def test_can_combine_queries_using_and_and_or_operators(self): s1 = Article.objects.filter(name__exact='Article 1') s2 = Article.objects.filter(name__exact='Article 2') self.assertQuerysetEqual( (s1 | s2).order_by('name'), ["<Article: Article 1>", "<Article: Article 2>"] ) self.assertQuerysetEqual(s1 & s2, []) class WeirdQuerysetSlicingTests(TestCase): @classmethod def setUpTestData(cls): Number.objects.create(num=1) Number.objects.create(num=2) Article.objects.create(name='one', created=datetime.datetime.now()) Article.objects.create(name='two', created=datetime.datetime.now()) Article.objects.create(name='three', created=datetime.datetime.now()) Article.objects.create(name='four', created=datetime.datetime.now()) food = Food.objects.create(name='spam') Eaten.objects.create(meal='spam with eggs', food=food) def test_tickets_7698_10202(self): # People like to slice with '0' as the high-water mark. self.assertQuerysetEqual(Article.objects.all()[0:0], []) self.assertQuerysetEqual(Article.objects.all()[0:0][:10], []) self.assertEqual(Article.objects.all()[:0].count(), 0) with self.assertRaisesMessage(TypeError, 'Cannot reverse a query once a slice has been taken.'): Article.objects.all()[:0].latest('created') def test_empty_resultset_sql(self): # ticket #12192 self.assertNumQueries(0, lambda: list(Number.objects.all()[1:1])) def test_empty_sliced_subquery(self): self.assertEqual(Eaten.objects.filter(food__in=Food.objects.all()[0:0]).count(), 0) def test_empty_sliced_subquery_exclude(self): self.assertEqual(Eaten.objects.exclude(food__in=Food.objects.all()[0:0]).count(), 1) def test_zero_length_values_slicing(self): n = 42 with self.assertNumQueries(0): self.assertQuerysetEqual(Article.objects.values()[n:n], []) self.assertQuerysetEqual(Article.objects.values_list()[n:n], []) class EscapingTests(TestCase): def test_ticket_7302(self): # Reserved names are appropriately escaped ReservedName.objects.create(name='a', order=42) ReservedName.objects.create(name='b', order=37) self.assertQuerysetEqual( ReservedName.objects.all().order_by('order'), ['<ReservedName: b>', '<ReservedName: a>'] ) self.assertQuerysetEqual( ReservedName.objects.extra(select={'stuff': 'name'}, order_by=('order', 'stuff')), ['<ReservedName: b>', '<ReservedName: a>'] ) class ToFieldTests(TestCase): def test_in_query(self): apple = Food.objects.create(name="apple") pear = Food.objects.create(name="pear") lunch = Eaten.objects.create(food=apple, meal="lunch") dinner = Eaten.objects.create(food=pear, meal="dinner") self.assertEqual( set(Eaten.objects.filter(food__in=[apple, pear])), {lunch, dinner}, ) def test_in_subquery(self): apple = Food.objects.create(name="apple") lunch = Eaten.objects.create(food=apple, meal="lunch") self.assertEqual( set(Eaten.objects.filter(food__in=Food.objects.filter(name='apple'))), {lunch} ) self.assertEqual( set(Eaten.objects.filter(food__in=Food.objects.filter(name='apple').values('eaten__meal'))), set() ) self.assertEqual( set(Food.objects.filter(eaten__in=Eaten.objects.filter(meal='lunch'))), {apple} ) def test_nested_in_subquery(self): extra = ExtraInfo.objects.create() author = Author.objects.create(num=42, extra=extra) report = Report.objects.create(creator=author) comment = ReportComment.objects.create(report=report) comments = ReportComment.objects.filter( report__in=Report.objects.filter( creator__in=extra.author_set.all(), ), ) self.assertSequenceEqual(comments, [comment]) def test_reverse_in(self): apple = Food.objects.create(name="apple") pear = Food.objects.create(name="pear") lunch_apple = Eaten.objects.create(food=apple, meal="lunch") lunch_pear = Eaten.objects.create(food=pear, meal="dinner") self.assertEqual( set(Food.objects.filter(eaten__in=[lunch_apple, lunch_pear])), {apple, pear} ) def test_single_object(self): apple = Food.objects.create(name="apple") lunch = Eaten.objects.create(food=apple, meal="lunch") dinner = Eaten.objects.create(food=apple, meal="dinner") self.assertEqual( set(Eaten.objects.filter(food=apple)), {lunch, dinner} ) def test_single_object_reverse(self): apple = Food.objects.create(name="apple") lunch = Eaten.objects.create(food=apple, meal="lunch") self.assertEqual( set(Food.objects.filter(eaten=lunch)), {apple} ) def test_recursive_fk(self): node1 = Node.objects.create(num=42) node2 = Node.objects.create(num=1, parent=node1) self.assertEqual( list(Node.objects.filter(parent=node1)), [node2] ) def test_recursive_fk_reverse(self): node1 = Node.objects.create(num=42) node2 = Node.objects.create(num=1, parent=node1) self.assertEqual( list(Node.objects.filter(node=node2)), [node1] ) class IsNullTests(TestCase): def test_primary_key(self): custom = CustomPk.objects.create(name='pk') null = Related.objects.create() notnull = Related.objects.create(custom=custom) self.assertSequenceEqual(Related.objects.filter(custom__isnull=False), [notnull]) self.assertSequenceEqual(Related.objects.filter(custom__isnull=True), [null]) def test_to_field(self): apple = Food.objects.create(name="apple") Eaten.objects.create(food=apple, meal="lunch") Eaten.objects.create(meal="lunch") self.assertQuerysetEqual( Eaten.objects.filter(food__isnull=False), ['<Eaten: apple at lunch>'] ) self.assertQuerysetEqual( Eaten.objects.filter(food__isnull=True), ['<Eaten: None at lunch>'] ) class ConditionalTests(TestCase): """Tests whose execution depend on different environment conditions like Python version or DB backend features""" @classmethod def setUpTestData(cls): generic = NamedCategory.objects.create(name="Generic") t1 = Tag.objects.create(name='t1', category=generic) Tag.objects.create(name='t2', parent=t1, category=generic) t3 = Tag.objects.create(name='t3', parent=t1) Tag.objects.create(name='t4', parent=t3) Tag.objects.create(name='t5', parent=t3) def test_infinite_loop(self): # If you're not careful, it's possible to introduce infinite loops via # default ordering on foreign keys in a cycle. We detect that. with self.assertRaisesMessage(FieldError, 'Infinite loop caused by ordering.'): list(LoopX.objects.all()) # Force queryset evaluation with list() with self.assertRaisesMessage(FieldError, 'Infinite loop caused by ordering.'): list(LoopZ.objects.all()) # Force queryset evaluation with list() # Note that this doesn't cause an infinite loop, since the default # ordering on the Tag model is empty (and thus defaults to using "id" # for the related field). self.assertEqual(len(Tag.objects.order_by('parent')), 5) # ... but you can still order in a non-recursive fashion among linked # fields (the previous test failed because the default ordering was # recursive). self.assertQuerysetEqual( LoopX.objects.all().order_by('y__x__y__x__id'), [] ) # When grouping without specifying ordering, we add an explicit "ORDER BY NULL" # portion in MySQL to prevent unnecessary sorting. @skipUnlessDBFeature('requires_explicit_null_ordering_when_grouping') def test_null_ordering_added(self): query = Tag.objects.values_list('parent_id', flat=True).order_by().query query.group_by = ['parent_id'] sql = query.get_compiler(DEFAULT_DB_ALIAS).as_sql()[0] fragment = "ORDER BY " pos = sql.find(fragment) self.assertEqual(sql.find(fragment, pos + 1), -1) self.assertEqual(sql.find("NULL", pos + len(fragment)), pos + len(fragment)) def test_in_list_limit(self): # The "in" lookup works with lists of 1000 items or more. # The numbers amount is picked to force three different IN batches # for Oracle, yet to be less than 2100 parameter limit for MSSQL. numbers = list(range(2050)) max_query_params = connection.features.max_query_params if max_query_params is None or max_query_params >= len(numbers): Number.objects.bulk_create(Number(num=num) for num in numbers) for number in [1000, 1001, 2000, len(numbers)]: with self.subTest(number=number): self.assertEqual(Number.objects.filter(num__in=numbers[:number]).count(), number) class UnionTests(unittest.TestCase): """ Tests for the union of two querysets. Bug #12252. """ @classmethod def setUpTestData(cls): objectas = [] objectbs = [] objectcs = [] a_info = ['one', 'two', 'three'] for name in a_info: o = ObjectA(name=name) o.save() objectas.append(o) b_info = [('un', 1, objectas[0]), ('deux', 2, objectas[0]), ('trois', 3, objectas[2])] for name, number, objecta in b_info: o = ObjectB(name=name, num=number, objecta=objecta) o.save() objectbs.append(o) c_info = [('ein', objectas[2], objectbs[2]), ('zwei', objectas[1], objectbs[1])] for name, objecta, objectb in c_info: o = ObjectC(name=name, objecta=objecta, objectb=objectb) o.save() objectcs.append(o) def check_union(self, model, Q1, Q2): filter = model.objects.filter self.assertEqual(set(filter(Q1) | filter(Q2)), set(filter(Q1 | Q2))) self.assertEqual(set(filter(Q2) | filter(Q1)), set(filter(Q1 | Q2))) def test_A_AB(self): Q1 = Q(name='two') Q2 = Q(objectb__name='deux') self.check_union(ObjectA, Q1, Q2) def test_A_AB2(self): Q1 = Q(name='two') Q2 = Q(objectb__name='deux', objectb__num=2) self.check_union(ObjectA, Q1, Q2) def test_AB_ACB(self): Q1 = Q(objectb__name='deux') Q2 = Q(objectc__objectb__name='deux') self.check_union(ObjectA, Q1, Q2) def test_BAB_BAC(self): Q1 = Q(objecta__objectb__name='deux') Q2 = Q(objecta__objectc__name='ein') self.check_union(ObjectB, Q1, Q2) def test_BAB_BACB(self): Q1 = Q(objecta__objectb__name='deux') Q2 = Q(objecta__objectc__objectb__name='trois') self.check_union(ObjectB, Q1, Q2) def test_BA_BCA__BAB_BAC_BCA(self): Q1 = Q(objecta__name='one', objectc__objecta__name='two') Q2 = Q(objecta__objectc__name='ein', objectc__objecta__name='three', objecta__objectb__name='trois') self.check_union(ObjectB, Q1, Q2) class DefaultValuesInsertTest(TestCase): def test_no_extra_params(self): """ Can create an instance of a model with only the PK field (#17056)." """ DumbCategory.objects.create() class ExcludeTests(TestCase): @classmethod def setUpTestData(cls): f1 = Food.objects.create(name='apples') Food.objects.create(name='oranges') Eaten.objects.create(food=f1, meal='dinner') j1 = Job.objects.create(name='Manager') cls.r1 = Responsibility.objects.create(description='Playing golf') j2 = Job.objects.create(name='Programmer') r2 = Responsibility.objects.create(description='Programming') JobResponsibilities.objects.create(job=j1, responsibility=cls.r1) JobResponsibilities.objects.create(job=j2, responsibility=r2) def test_to_field(self): self.assertQuerysetEqual( Food.objects.exclude(eaten__meal='dinner'), ['<Food: oranges>']) self.assertQuerysetEqual( Job.objects.exclude(responsibilities__description='Playing golf'), ['<Job: Programmer>']) self.assertQuerysetEqual( Responsibility.objects.exclude(jobs__name='Manager'), ['<Responsibility: Programming>']) def test_ticket14511(self): alex = Person.objects.get_or_create(name='Alex')[0] jane = Person.objects.get_or_create(name='Jane')[0] oracle = Company.objects.get_or_create(name='Oracle')[0] google = Company.objects.get_or_create(name='Google')[0] microsoft = Company.objects.get_or_create(name='Microsoft')[0] intel = Company.objects.get_or_create(name='Intel')[0] def employ(employer, employee, title): Employment.objects.get_or_create(employee=employee, employer=employer, title=title) employ(oracle, alex, 'Engineer') employ(oracle, alex, 'Developer') employ(google, alex, 'Engineer') employ(google, alex, 'Manager') employ(microsoft, alex, 'Manager') employ(intel, alex, 'Manager') employ(microsoft, jane, 'Developer') employ(intel, jane, 'Manager') alex_tech_employers = alex.employers.filter( employment__title__in=('Engineer', 'Developer')).distinct().order_by('name') self.assertSequenceEqual(alex_tech_employers, [google, oracle]) alex_nontech_employers = alex.employers.exclude( employment__title__in=('Engineer', 'Developer')).distinct().order_by('name') self.assertSequenceEqual(alex_nontech_employers, [google, intel, microsoft]) def test_exclude_reverse_fk_field_ref(self): tag = Tag.objects.create() Note.objects.create(tag=tag, note='note') annotation = Annotation.objects.create(name='annotation', tag=tag) self.assertEqual(Annotation.objects.exclude(tag__note__note=F('name')).get(), annotation) def test_exclude_with_circular_fk_relation(self): self.assertEqual(ObjectB.objects.exclude(objecta__objectb__name=F('name')).count(), 0) def test_subquery_exclude_outerref(self): qs = JobResponsibilities.objects.filter( Exists(Responsibility.objects.exclude(jobs=OuterRef('job'))), ) self.assertTrue(qs.exists()) self.r1.delete() self.assertFalse(qs.exists()) def test_exclude_nullable_fields(self): number = Number.objects.create(num=1, other_num=1) Number.objects.create(num=2, other_num=2, another_num=2) self.assertSequenceEqual( Number.objects.exclude(other_num=F('another_num')), [number], ) self.assertSequenceEqual( Number.objects.exclude(num=F('another_num')), [number], ) class ExcludeTest17600(TestCase): """ Some regressiontests for ticket #17600. Some of these likely duplicate other existing tests. """ @classmethod def setUpTestData(cls): # Create a few Orders. cls.o1 = Order.objects.create(pk=1) cls.o2 = Order.objects.create(pk=2) cls.o3 = Order.objects.create(pk=3) # Create some OrderItems for the first order with homogeneous # status_id values cls.oi1 = OrderItem.objects.create(order=cls.o1, status=1) cls.oi2 = OrderItem.objects.create(order=cls.o1, status=1) cls.oi3 = OrderItem.objects.create(order=cls.o1, status=1) # Create some OrderItems for the second order with heterogeneous # status_id values cls.oi4 = OrderItem.objects.create(order=cls.o2, status=1) cls.oi5 = OrderItem.objects.create(order=cls.o2, status=2) cls.oi6 = OrderItem.objects.create(order=cls.o2, status=3) # Create some OrderItems for the second order with heterogeneous # status_id values cls.oi7 = OrderItem.objects.create(order=cls.o3, status=2) cls.oi8 = OrderItem.objects.create(order=cls.o3, status=3) cls.oi9 = OrderItem.objects.create(order=cls.o3, status=4) def test_exclude_plain(self): """ This should exclude Orders which have some items with status 1 """ self.assertQuerysetEqual( Order.objects.exclude(items__status=1), ['<Order: 3>']) def test_exclude_plain_distinct(self): """ This should exclude Orders which have some items with status 1 """ self.assertQuerysetEqual( Order.objects.exclude(items__status=1).distinct(), ['<Order: 3>']) def test_exclude_with_q_object_distinct(self): """ This should exclude Orders which have some items with status 1 """ self.assertQuerysetEqual( Order.objects.exclude(Q(items__status=1)).distinct(), ['<Order: 3>']) def test_exclude_with_q_object_no_distinct(self): """ This should exclude Orders which have some items with status 1 """ self.assertQuerysetEqual( Order.objects.exclude(Q(items__status=1)), ['<Order: 3>']) def test_exclude_with_q_is_equal_to_plain_exclude(self): """ Using exclude(condition) and exclude(Q(condition)) should yield the same QuerySet """ self.assertEqual( list(Order.objects.exclude(items__status=1).distinct()), list(Order.objects.exclude(Q(items__status=1)).distinct())) def test_exclude_with_q_is_equal_to_plain_exclude_variation(self): """ Using exclude(condition) and exclude(Q(condition)) should yield the same QuerySet """ self.assertEqual( list(Order.objects.exclude(items__status=1)), list(Order.objects.exclude(Q(items__status=1)).distinct())) @unittest.expectedFailure def test_only_orders_with_all_items_having_status_1(self): """ This should only return orders having ALL items set to status 1, or those items not having any orders at all. The correct way to write this query in SQL seems to be using two nested subqueries. """ self.assertQuerysetEqual( Order.objects.exclude(~Q(items__status=1)).distinct(), ['<Order: 1>']) class Exclude15786(TestCase): """Regression test for #15786""" def test_ticket15786(self): c1 = SimpleCategory.objects.create(name='c1') c2 = SimpleCategory.objects.create(name='c2') OneToOneCategory.objects.create(category=c1) OneToOneCategory.objects.create(category=c2) rel = CategoryRelationship.objects.create(first=c1, second=c2) self.assertEqual( CategoryRelationship.objects.exclude( first__onetoonecategory=F('second__onetoonecategory') ).get(), rel ) class NullInExcludeTest(TestCase): @classmethod def setUpTestData(cls): NullableName.objects.create(name='i1') NullableName.objects.create() def test_null_in_exclude_qs(self): none_val = '' if connection.features.interprets_empty_strings_as_nulls else None self.assertQuerysetEqual( NullableName.objects.exclude(name__in=[]), ['i1', none_val], attrgetter('name')) self.assertQuerysetEqual( NullableName.objects.exclude(name__in=['i1']), [none_val], attrgetter('name')) self.assertQuerysetEqual( NullableName.objects.exclude(name__in=['i3']), ['i1', none_val], attrgetter('name')) inner_qs = NullableName.objects.filter(name='i1').values_list('name') self.assertQuerysetEqual( NullableName.objects.exclude(name__in=inner_qs), [none_val], attrgetter('name')) # The inner queryset wasn't executed - it should be turned # into subquery above self.assertIs(inner_qs._result_cache, None) @unittest.expectedFailure def test_col_not_in_list_containing_null(self): """ The following case is not handled properly because SQL's COL NOT IN (list containing null) handling is too weird to abstract away. """ self.assertQuerysetEqual( NullableName.objects.exclude(name__in=[None]), ['i1'], attrgetter('name')) def test_double_exclude(self): self.assertEqual( list(NullableName.objects.filter(~~Q(name='i1'))), list(NullableName.objects.filter(Q(name='i1')))) self.assertNotIn( 'IS NOT NULL', str(NullableName.objects.filter(~~Q(name='i1')).query)) class EmptyStringsAsNullTest(TestCase): """ Filtering on non-null character fields works as expected. The reason for these tests is that Oracle treats '' as NULL, and this can cause problems in query construction. Refs #17957. """ @classmethod def setUpTestData(cls): cls.nc = NamedCategory.objects.create(name='') def test_direct_exclude(self): self.assertQuerysetEqual( NamedCategory.objects.exclude(name__in=['nonexistent']), [self.nc.pk], attrgetter('pk') ) def test_joined_exclude(self): self.assertQuerysetEqual( DumbCategory.objects.exclude(namedcategory__name__in=['nonexistent']), [self.nc.pk], attrgetter('pk') ) def test_21001(self): foo = NamedCategory.objects.create(name='foo') self.assertQuerysetEqual( NamedCategory.objects.exclude(name=''), [foo.pk], attrgetter('pk') ) class ProxyQueryCleanupTest(TestCase): def test_evaluated_proxy_count(self): """ Generating the query string doesn't alter the query's state in irreversible ways. Refs #18248. """ ProxyCategory.objects.create() qs = ProxyCategory.objects.all() self.assertEqual(qs.count(), 1) str(qs.query) self.assertEqual(qs.count(), 1) class WhereNodeTest(SimpleTestCase): class DummyNode: def as_sql(self, compiler, connection): return 'dummy', [] class MockCompiler: def compile(self, node): return node.as_sql(self, connection) def __call__(self, name): return connection.ops.quote_name(name) def test_empty_full_handling_conjunction(self): compiler = WhereNodeTest.MockCompiler() w = WhereNode(children=[NothingNode()]) with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('', [])) w = WhereNode(children=[self.DummyNode(), self.DummyNode()]) self.assertEqual(w.as_sql(compiler, connection), ('(dummy AND dummy)', [])) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy AND dummy)', [])) w = WhereNode(children=[NothingNode(), self.DummyNode()]) with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('', [])) def test_empty_full_handling_disjunction(self): compiler = WhereNodeTest.MockCompiler() w = WhereNode(children=[NothingNode()], connector='OR') with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('', [])) w = WhereNode(children=[self.DummyNode(), self.DummyNode()], connector='OR') self.assertEqual(w.as_sql(compiler, connection), ('(dummy OR dummy)', [])) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy OR dummy)', [])) w = WhereNode(children=[NothingNode(), self.DummyNode()], connector='OR') self.assertEqual(w.as_sql(compiler, connection), ('dummy', [])) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('NOT (dummy)', [])) def test_empty_nodes(self): compiler = WhereNodeTest.MockCompiler() empty_w = WhereNode() w = WhereNode(children=[empty_w, empty_w]) self.assertEqual(w.as_sql(compiler, connection), ('', [])) w.negate() with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.connector = 'OR' with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) w.negate() self.assertEqual(w.as_sql(compiler, connection), ('', [])) w = WhereNode(children=[empty_w, NothingNode()], connector='OR') self.assertEqual(w.as_sql(compiler, connection), ('', [])) w = WhereNode(children=[empty_w, NothingNode()], connector='AND') with self.assertRaises(EmptyResultSet): w.as_sql(compiler, connection) class QuerySetExceptionTests(SimpleTestCase): def test_iter_exceptions(self): qs = ExtraInfo.objects.only('author') msg = "'ManyToOneRel' object has no attribute 'attname'" with self.assertRaisesMessage(AttributeError, msg): list(qs) def test_invalid_order_by(self): msg = ( "Cannot resolve keyword '*' into field. Choices are: created, id, " "name" ) with self.assertRaisesMessage(FieldError, msg): Article.objects.order_by('*') def test_invalid_queryset_model(self): msg = 'Cannot use QuerySet for "Article": Use a QuerySet for "ExtraInfo".' with self.assertRaisesMessage(ValueError, msg): list(Author.objects.filter(extra=Article.objects.all())) class NullJoinPromotionOrTest(TestCase): @classmethod def setUpTestData(cls): cls.d1 = ModelD.objects.create(name='foo') d2 = ModelD.objects.create(name='bar') cls.a1 = ModelA.objects.create(name='a1', d=cls.d1) c = ModelC.objects.create(name='c') b = ModelB.objects.create(name='b', c=c) cls.a2 = ModelA.objects.create(name='a2', b=b, d=d2) def test_ticket_17886(self): # The first Q-object is generating the match, the rest of the filters # should not remove the match even if they do not match anything. The # problem here was that b__name generates a LOUTER JOIN, then # b__c__name generates join to c, which the ORM tried to promote but # failed as that join isn't nullable. q_obj = ( Q(d__name='foo') | Q(b__name='foo') | Q(b__c__name='foo') ) qset = ModelA.objects.filter(q_obj) self.assertEqual(list(qset), [self.a1]) # We generate one INNER JOIN to D. The join is direct and not nullable # so we can use INNER JOIN for it. However, we can NOT use INNER JOIN # for the b->c join, as a->b is nullable. self.assertEqual(str(qset.query).count('INNER JOIN'), 1) def test_isnull_filter_promotion(self): qs = ModelA.objects.filter(Q(b__name__isnull=True)) self.assertEqual(str(qs.query).count('LEFT OUTER'), 1) self.assertEqual(list(qs), [self.a1]) qs = ModelA.objects.filter(~Q(b__name__isnull=True)) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(list(qs), [self.a2]) qs = ModelA.objects.filter(~~Q(b__name__isnull=True)) self.assertEqual(str(qs.query).count('LEFT OUTER'), 1) self.assertEqual(list(qs), [self.a1]) qs = ModelA.objects.filter(Q(b__name__isnull=False)) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(list(qs), [self.a2]) qs = ModelA.objects.filter(~Q(b__name__isnull=False)) self.assertEqual(str(qs.query).count('LEFT OUTER'), 1) self.assertEqual(list(qs), [self.a1]) qs = ModelA.objects.filter(~~Q(b__name__isnull=False)) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(list(qs), [self.a2]) def test_null_join_demotion(self): qs = ModelA.objects.filter(Q(b__name__isnull=False) & Q(b__name__isnull=True)) self.assertIn(' INNER JOIN ', str(qs.query)) qs = ModelA.objects.filter(Q(b__name__isnull=True) & Q(b__name__isnull=False)) self.assertIn(' INNER JOIN ', str(qs.query)) qs = ModelA.objects.filter(Q(b__name__isnull=False) | Q(b__name__isnull=True)) self.assertIn(' LEFT OUTER JOIN ', str(qs.query)) qs = ModelA.objects.filter(Q(b__name__isnull=True) | Q(b__name__isnull=False)) self.assertIn(' LEFT OUTER JOIN ', str(qs.query)) def test_ticket_21366(self): n = Note.objects.create(note='n', misc='m') e = ExtraInfo.objects.create(info='info', note=n) a = Author.objects.create(name='Author1', num=1, extra=e) Ranking.objects.create(rank=1, author=a) r1 = Report.objects.create(name='Foo', creator=a) r2 = Report.objects.create(name='Bar') Report.objects.create(name='Bar', creator=a) qs = Report.objects.filter( Q(creator__ranking__isnull=True) | Q(creator__ranking__rank=1, name='Foo') ) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) self.assertEqual(str(qs.query).count(' JOIN '), 2) self.assertSequenceEqual(qs.order_by('name'), [r2, r1]) def test_ticket_21748(self): i1 = Identifier.objects.create(name='i1') i2 = Identifier.objects.create(name='i2') i3 = Identifier.objects.create(name='i3') Program.objects.create(identifier=i1) Channel.objects.create(identifier=i1) Program.objects.create(identifier=i2) self.assertSequenceEqual(Identifier.objects.filter(program=None, channel=None), [i3]) self.assertSequenceEqual(Identifier.objects.exclude(program=None, channel=None).order_by('name'), [i1, i2]) def test_ticket_21748_double_negated_and(self): i1 = Identifier.objects.create(name='i1') i2 = Identifier.objects.create(name='i2') Identifier.objects.create(name='i3') p1 = Program.objects.create(identifier=i1) c1 = Channel.objects.create(identifier=i1) Program.objects.create(identifier=i2) # Check the ~~Q() (or equivalently .exclude(~Q)) works like Q() for # join promotion. qs1_doubleneg = Identifier.objects.exclude(~Q(program__id=p1.id, channel__id=c1.id)).order_by('pk') qs1_filter = Identifier.objects.filter(program__id=p1.id, channel__id=c1.id).order_by('pk') self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x) self.assertEqual(str(qs1_filter.query).count('JOIN'), str(qs1_doubleneg.query).count('JOIN')) self.assertEqual(2, str(qs1_doubleneg.query).count('INNER JOIN')) self.assertEqual(str(qs1_filter.query).count('INNER JOIN'), str(qs1_doubleneg.query).count('INNER JOIN')) def test_ticket_21748_double_negated_or(self): i1 = Identifier.objects.create(name='i1') i2 = Identifier.objects.create(name='i2') Identifier.objects.create(name='i3') p1 = Program.objects.create(identifier=i1) c1 = Channel.objects.create(identifier=i1) p2 = Program.objects.create(identifier=i2) # Test OR + doubleneg. The expected result is that channel is LOUTER # joined, program INNER joined qs1_filter = Identifier.objects.filter( Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id) ).order_by('pk') qs1_doubleneg = Identifier.objects.exclude( ~Q(Q(program__id=p2.id, channel__id=c1.id) | Q(program__id=p1.id)) ).order_by('pk') self.assertQuerysetEqual(qs1_doubleneg, qs1_filter, lambda x: x) self.assertEqual(str(qs1_filter.query).count('JOIN'), str(qs1_doubleneg.query).count('JOIN')) self.assertEqual(1, str(qs1_doubleneg.query).count('INNER JOIN')) self.assertEqual(str(qs1_filter.query).count('INNER JOIN'), str(qs1_doubleneg.query).count('INNER JOIN')) def test_ticket_21748_complex_filter(self): i1 = Identifier.objects.create(name='i1') i2 = Identifier.objects.create(name='i2') Identifier.objects.create(name='i3') p1 = Program.objects.create(identifier=i1) c1 = Channel.objects.create(identifier=i1) p2 = Program.objects.create(identifier=i2) # Finally, a more complex case, one time in a way where each # NOT is pushed to lowest level in the boolean tree, and # another query where this isn't done. qs1 = Identifier.objects.filter( ~Q(~Q(program__id=p2.id, channel__id=c1.id) & Q(program__id=p1.id)) ).order_by('pk') qs2 = Identifier.objects.filter( Q(Q(program__id=p2.id, channel__id=c1.id) | ~Q(program__id=p1.id)) ).order_by('pk') self.assertQuerysetEqual(qs1, qs2, lambda x: x) self.assertEqual(str(qs1.query).count('JOIN'), str(qs2.query).count('JOIN')) self.assertEqual(0, str(qs1.query).count('INNER JOIN')) self.assertEqual(str(qs1.query).count('INNER JOIN'), str(qs2.query).count('INNER JOIN')) class ReverseJoinTrimmingTest(TestCase): def test_reverse_trimming(self): # We don't accidentally trim reverse joins - we can't know if there is # anything on the other side of the join, so trimming reverse joins # can't be done, ever. t = Tag.objects.create() qs = Tag.objects.filter(annotation__tag=t.pk) self.assertIn('INNER JOIN', str(qs.query)) self.assertEqual(list(qs), []) class JoinReuseTest(TestCase): """ The queries reuse joins sensibly (for example, direct joins are always reused). """ def test_fk_reuse(self): qs = Annotation.objects.filter(tag__name='foo').filter(tag__name='bar') self.assertEqual(str(qs.query).count('JOIN'), 1) def test_fk_reuse_select_related(self): qs = Annotation.objects.filter(tag__name='foo').select_related('tag') self.assertEqual(str(qs.query).count('JOIN'), 1) def test_fk_reuse_annotation(self): qs = Annotation.objects.filter(tag__name='foo').annotate(cnt=Count('tag__name')) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_fk_reuse_disjunction(self): qs = Annotation.objects.filter(Q(tag__name='foo') | Q(tag__name='bar')) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_fk_reuse_order_by(self): qs = Annotation.objects.filter(tag__name='foo').order_by('tag__name') self.assertEqual(str(qs.query).count('JOIN'), 1) def test_revo2o_reuse(self): qs = Detail.objects.filter(member__name='foo').filter(member__name='foo') self.assertEqual(str(qs.query).count('JOIN'), 1) def test_revfk_noreuse(self): qs = Author.objects.filter(report__name='r4').filter(report__name='r1') self.assertEqual(str(qs.query).count('JOIN'), 2) def test_inverted_q_across_relations(self): """ When a trimmable join is specified in the query (here school__), the ORM detects it and removes unnecessary joins. The set of reusable joins are updated after trimming the query so that other lookups don't consider that the outer query's filters are in effect for the subquery (#26551). """ springfield_elementary = School.objects.create() hogward = School.objects.create() Student.objects.create(school=springfield_elementary) hp = Student.objects.create(school=hogward) Classroom.objects.create(school=hogward, name='Potion') Classroom.objects.create(school=springfield_elementary, name='Main') qs = Student.objects.filter( ~(Q(school__classroom__name='Main') & Q(school__classroom__has_blackboard=None)) ) self.assertSequenceEqual(qs, [hp]) class DisjunctionPromotionTests(TestCase): def test_disjunction_promotion_select_related(self): fk1 = FK1.objects.create(f1='f1', f2='f2') basea = BaseA.objects.create(a=fk1) qs = BaseA.objects.filter(Q(a=fk1) | Q(b=2)) self.assertEqual(str(qs.query).count(' JOIN '), 0) qs = qs.select_related('a', 'b') self.assertEqual(str(qs.query).count(' INNER JOIN '), 0) self.assertEqual(str(qs.query).count(' LEFT OUTER JOIN '), 2) with self.assertNumQueries(1): self.assertSequenceEqual(qs, [basea]) self.assertEqual(qs[0].a, fk1) self.assertIs(qs[0].b, None) def test_disjunction_promotion1(self): # Pre-existing join, add two ORed filters to the same join, # all joins can be INNER JOINS. qs = BaseA.objects.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) qs = qs.filter(Q(b__f1='foo') | Q(b__f2='foo')) self.assertEqual(str(qs.query).count('INNER JOIN'), 2) # Reverse the order of AND and OR filters. qs = BaseA.objects.filter(Q(b__f1='foo') | Q(b__f2='foo')) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) qs = qs.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 2) def test_disjunction_promotion2(self): qs = BaseA.objects.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) # Now we have two different joins in an ORed condition, these # must be OUTER joins. The pre-existing join should remain INNER. qs = qs.filter(Q(b__f1='foo') | Q(c__f2='foo')) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) # Reverse case. qs = BaseA.objects.filter(Q(b__f1='foo') | Q(c__f2='foo')) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) qs = qs.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) def test_disjunction_promotion3(self): qs = BaseA.objects.filter(a__f2='bar') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) # The ANDed a__f2 filter allows us to use keep using INNER JOIN # even inside the ORed case. If the join to a__ returns nothing, # the ANDed filter for a__f2 can't be true. qs = qs.filter(Q(a__f1='foo') | Q(b__f2='foo')) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) def test_disjunction_promotion3_demote(self): # This one needs demotion logic: the first filter causes a to be # outer joined, the second filter makes it inner join again. qs = BaseA.objects.filter( Q(a__f1='foo') | Q(b__f2='foo')).filter(a__f2='bar') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) def test_disjunction_promotion4_demote(self): qs = BaseA.objects.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('JOIN'), 0) # Demote needed for the "a" join. It is marked as outer join by # above filter (even if it is trimmed away). qs = qs.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) def test_disjunction_promotion4(self): qs = BaseA.objects.filter(a__f1='foo') self.assertEqual(str(qs.query).count('INNER JOIN'), 1) qs = qs.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) def test_disjunction_promotion5_demote(self): qs = BaseA.objects.filter(Q(a=1) | Q(a=2)) # Note that the above filters on a force the join to an # inner join even if it is trimmed. self.assertEqual(str(qs.query).count('JOIN'), 0) qs = qs.filter(Q(a__f1='foo') | Q(b__f1='foo')) # So, now the a__f1 join doesn't need promotion. self.assertEqual(str(qs.query).count('INNER JOIN'), 1) # But b__f1 does. self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) qs = BaseA.objects.filter(Q(a__f1='foo') | Q(b__f1='foo')) # Now the join to a is created as LOUTER self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) qs = qs.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) def test_disjunction_promotion6(self): qs = BaseA.objects.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('JOIN'), 0) qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo')) self.assertEqual(str(qs.query).count('INNER JOIN'), 2) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0) qs = BaseA.objects.filter(Q(a__f1='foo') & Q(b__f1='foo')) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0) self.assertEqual(str(qs.query).count('INNER JOIN'), 2) qs = qs.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('INNER JOIN'), 2) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 0) def test_disjunction_promotion7(self): qs = BaseA.objects.filter(Q(a=1) | Q(a=2)) self.assertEqual(str(qs.query).count('JOIN'), 0) qs = BaseA.objects.filter(Q(a__f1='foo') | (Q(b__f1='foo') & Q(a__f1='bar'))) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) qs = BaseA.objects.filter( (Q(a__f1='foo') | Q(b__f1='foo')) & (Q(a__f1='bar') | Q(c__f1='foo')) ) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3) self.assertEqual(str(qs.query).count('INNER JOIN'), 0) qs = BaseA.objects.filter( Q(a__f1='foo') | Q(a__f1='bar') & (Q(b__f1='bar') | Q(c__f1='foo')) ) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) def test_disjunction_promotion_fexpression(self): qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(b__f1='foo')) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 1) self.assertEqual(str(qs.query).count('INNER JOIN'), 1) qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | Q(b__f1='foo')) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3) qs = BaseA.objects.filter(Q(a__f1=F('b__f1')) | Q(a__f2=F('b__f2')) | Q(c__f1='foo')) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 3) qs = BaseA.objects.filter(Q(a__f1=F('c__f1')) | (Q(pk=1) & Q(pk=2))) self.assertEqual(str(qs.query).count('LEFT OUTER JOIN'), 2) self.assertEqual(str(qs.query).count('INNER JOIN'), 0) class ManyToManyExcludeTest(TestCase): def test_exclude_many_to_many(self): Identifier.objects.create(name='extra') program = Program.objects.create(identifier=Identifier.objects.create(name='program')) channel = Channel.objects.create(identifier=Identifier.objects.create(name='channel')) channel.programs.add(program) # channel contains 'program1', so all Identifiers except that one # should be returned self.assertQuerysetEqual( Identifier.objects.exclude(program__channel=channel).order_by('name'), ['<Identifier: channel>', '<Identifier: extra>'] ) self.assertQuerysetEqual( Identifier.objects.exclude(program__channel=None).order_by('name'), ['<Identifier: program>'] ) def test_ticket_12823(self): pg3 = Page.objects.create(text='pg3') pg2 = Page.objects.create(text='pg2') pg1 = Page.objects.create(text='pg1') pa1 = Paragraph.objects.create(text='pa1') pa1.page.set([pg1, pg2]) pa2 = Paragraph.objects.create(text='pa2') pa2.page.set([pg2, pg3]) pa3 = Paragraph.objects.create(text='pa3') ch1 = Chapter.objects.create(title='ch1', paragraph=pa1) ch2 = Chapter.objects.create(title='ch2', paragraph=pa2) ch3 = Chapter.objects.create(title='ch3', paragraph=pa3) b1 = Book.objects.create(title='b1', chapter=ch1) b2 = Book.objects.create(title='b2', chapter=ch2) b3 = Book.objects.create(title='b3', chapter=ch3) q = Book.objects.exclude(chapter__paragraph__page__text='pg1') self.assertNotIn('IS NOT NULL', str(q.query)) self.assertEqual(len(q), 2) self.assertNotIn(b1, q) self.assertIn(b2, q) self.assertIn(b3, q) class RelabelCloneTest(TestCase): def test_ticket_19964(self): my1 = MyObject.objects.create(data='foo') my1.parent = my1 my1.save() my2 = MyObject.objects.create(data='bar', parent=my1) parents = MyObject.objects.filter(parent=F('id')) children = MyObject.objects.filter(parent__in=parents).exclude(parent=F('id')) self.assertEqual(list(parents), [my1]) # Evaluating the children query (which has parents as part of it) does # not change results for the parents query. self.assertEqual(list(children), [my2]) self.assertEqual(list(parents), [my1]) class Ticket20101Tests(TestCase): def test_ticket_20101(self): """ Tests QuerySet ORed combining in exclude subquery case. """ t = Tag.objects.create(name='foo') a1 = Annotation.objects.create(tag=t, name='a1') a2 = Annotation.objects.create(tag=t, name='a2') a3 = Annotation.objects.create(tag=t, name='a3') n = Note.objects.create(note='foo', misc='bar') qs1 = Note.objects.exclude(annotation__in=[a1, a2]) qs2 = Note.objects.filter(annotation__in=[a3]) self.assertIn(n, qs1) self.assertNotIn(n, qs2) self.assertIn(n, (qs1 | qs2)) class EmptyStringPromotionTests(SimpleTestCase): def test_empty_string_promotion(self): qs = RelatedObject.objects.filter(single__name='') if connection.features.interprets_empty_strings_as_nulls: self.assertIn('LEFT OUTER JOIN', str(qs.query)) else: self.assertNotIn('LEFT OUTER JOIN', str(qs.query)) class ValuesSubqueryTests(TestCase): def test_values_in_subquery(self): # If a values() queryset is used, then the given values # will be used instead of forcing use of the relation's field. o1 = Order.objects.create(id=-2) o2 = Order.objects.create(id=-1) oi1 = OrderItem.objects.create(order=o1, status=0) oi1.status = oi1.pk oi1.save() OrderItem.objects.create(order=o2, status=0) # The query below should match o1 as it has related order_item # with id == status. self.assertSequenceEqual(Order.objects.filter(items__in=OrderItem.objects.values_list('status')), [o1]) class DoubleInSubqueryTests(TestCase): def test_double_subquery_in(self): lfa1 = LeafA.objects.create(data='foo') lfa2 = LeafA.objects.create(data='bar') lfb1 = LeafB.objects.create(data='lfb1') lfb2 = LeafB.objects.create(data='lfb2') Join.objects.create(a=lfa1, b=lfb1) Join.objects.create(a=lfa2, b=lfb2) leaf_as = LeafA.objects.filter(data='foo').values_list('pk', flat=True) joins = Join.objects.filter(a__in=leaf_as).values_list('b__id', flat=True) qs = LeafB.objects.filter(pk__in=joins) self.assertSequenceEqual(qs, [lfb1]) class Ticket18785Tests(SimpleTestCase): def test_ticket_18785(self): # Test join trimming from ticket18785 qs = Item.objects.exclude( note__isnull=False ).filter( name='something', creator__extra__isnull=True ).order_by() self.assertEqual(1, str(qs.query).count('INNER JOIN')) self.assertEqual(0, str(qs.query).count('OUTER JOIN')) class Ticket20788Tests(TestCase): def test_ticket_20788(self): Paragraph.objects.create() paragraph = Paragraph.objects.create() page = paragraph.page.create() chapter = Chapter.objects.create(paragraph=paragraph) Book.objects.create(chapter=chapter) paragraph2 = Paragraph.objects.create() Page.objects.create() chapter2 = Chapter.objects.create(paragraph=paragraph2) book2 = Book.objects.create(chapter=chapter2) sentences_not_in_pub = Book.objects.exclude(chapter__paragraph__page=page) self.assertSequenceEqual(sentences_not_in_pub, [book2]) class Ticket12807Tests(TestCase): def test_ticket_12807(self): p1 = Paragraph.objects.create() p2 = Paragraph.objects.create() # The ORed condition below should have no effect on the query - the # ~Q(pk__in=[]) will always be True. qs = Paragraph.objects.filter((Q(pk=p2.pk) | ~Q(pk__in=[])) & Q(pk=p1.pk)) self.assertSequenceEqual(qs, [p1]) class RelatedLookupTypeTests(TestCase): error = 'Cannot query "%s": Must be "%s" instance.' @classmethod def setUpTestData(cls): cls.oa = ObjectA.objects.create(name="oa") cls.poa = ProxyObjectA.objects.get(name="oa") cls.coa = ChildObjectA.objects.create(name="coa") cls.wrong_type = Order.objects.create(id=cls.oa.pk) cls.ob = ObjectB.objects.create(name="ob", objecta=cls.oa, num=1) ProxyObjectB.objects.create(name="pob", objecta=cls.oa, num=2) cls.pob = ProxyObjectB.objects.all() ObjectC.objects.create(childobjecta=cls.coa) def test_wrong_type_lookup(self): """ A ValueError is raised when the incorrect object type is passed to a query lookup. """ # Passing incorrect object type with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)): ObjectB.objects.get(objecta=self.wrong_type) with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)): ObjectB.objects.filter(objecta__in=[self.wrong_type]) with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectA._meta.object_name)): ObjectB.objects.filter(objecta=self.wrong_type) with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)): ObjectA.objects.filter(objectb__in=[self.wrong_type, self.ob]) # Passing an object of the class on which query is done. with self.assertRaisesMessage(ValueError, self.error % (self.ob, ObjectA._meta.object_name)): ObjectB.objects.filter(objecta__in=[self.poa, self.ob]) with self.assertRaisesMessage(ValueError, self.error % (self.ob, ChildObjectA._meta.object_name)): ObjectC.objects.exclude(childobjecta__in=[self.coa, self.ob]) def test_wrong_backward_lookup(self): """ A ValueError is raised when the incorrect object type is passed to a query lookup for backward relations. """ with self.assertRaisesMessage(ValueError, self.error % (self.oa, ObjectB._meta.object_name)): ObjectA.objects.filter(objectb__in=[self.oa, self.ob]) with self.assertRaisesMessage(ValueError, self.error % (self.oa, ObjectB._meta.object_name)): ObjectA.objects.exclude(objectb=self.oa) with self.assertRaisesMessage(ValueError, self.error % (self.wrong_type, ObjectB._meta.object_name)): ObjectA.objects.get(objectb=self.wrong_type) def test_correct_lookup(self): """ When passing proxy model objects, child objects, or parent objects, lookups work fine. """ out_a = ['<ObjectA: oa>'] out_b = ['<ObjectB: ob>', '<ObjectB: pob>'] out_c = ['<ObjectC: >'] # proxy model objects self.assertQuerysetEqual(ObjectB.objects.filter(objecta=self.poa).order_by('name'), out_b) self.assertQuerysetEqual(ObjectA.objects.filter(objectb__in=self.pob).order_by('pk'), out_a * 2) # child objects self.assertQuerysetEqual(ObjectB.objects.filter(objecta__in=[self.coa]), []) self.assertQuerysetEqual(ObjectB.objects.filter(objecta__in=[self.poa, self.coa]).order_by('name'), out_b) self.assertQuerysetEqual( ObjectB.objects.filter(objecta__in=iter([self.poa, self.coa])).order_by('name'), out_b ) # parent objects self.assertQuerysetEqual(ObjectC.objects.exclude(childobjecta=self.oa), out_c) # QuerySet related object type checking shouldn't issue queries # (the querysets aren't evaluated here, hence zero queries) (#23266). with self.assertNumQueries(0): ObjectB.objects.filter(objecta__in=ObjectA.objects.all()) def test_values_queryset_lookup(self): """ #23396 - Ensure ValueQuerySets are not checked for compatibility with the lookup field """ # Make sure the num and objecta field values match. ob = ObjectB.objects.get(name='ob') ob.num = ob.objecta.pk ob.save() pob = ObjectB.objects.get(name='pob') pob.num = pob.objecta.pk pob.save() self.assertQuerysetEqual(ObjectB.objects.filter( objecta__in=ObjectB.objects.all().values_list('num') ).order_by('pk'), ['<ObjectB: ob>', '<ObjectB: pob>']) class Ticket14056Tests(TestCase): def test_ticket_14056(self): s1 = SharedConnection.objects.create(data='s1') s2 = SharedConnection.objects.create(data='s2') s3 = SharedConnection.objects.create(data='s3') PointerA.objects.create(connection=s2) expected_ordering = ( [s1, s3, s2] if connection.features.nulls_order_largest else [s2, s1, s3] ) self.assertSequenceEqual(SharedConnection.objects.order_by('-pointera__connection', 'pk'), expected_ordering) class Ticket20955Tests(TestCase): def test_ticket_20955(self): jack = Staff.objects.create(name='jackstaff') jackstaff = StaffUser.objects.create(staff=jack) jill = Staff.objects.create(name='jillstaff') jillstaff = StaffUser.objects.create(staff=jill) task = Task.objects.create(creator=jackstaff, owner=jillstaff, title="task") task_get = Task.objects.get(pk=task.pk) # Load data so that assertNumQueries doesn't complain about the get # version's queries. task_get.creator.staffuser.staff task_get.owner.staffuser.staff qs = Task.objects.select_related( 'creator__staffuser__staff', 'owner__staffuser__staff') self.assertEqual(str(qs.query).count(' JOIN '), 6) task_select_related = qs.get(pk=task.pk) with self.assertNumQueries(0): self.assertEqual(task_select_related.creator.staffuser.staff, task_get.creator.staffuser.staff) self.assertEqual(task_select_related.owner.staffuser.staff, task_get.owner.staffuser.staff) class Ticket21203Tests(TestCase): def test_ticket_21203(self): p = Ticket21203Parent.objects.create(parent_bool=True) c = Ticket21203Child.objects.create(parent=p) qs = Ticket21203Child.objects.select_related('parent').defer('parent__created') self.assertSequenceEqual(qs, [c]) self.assertIs(qs[0].parent.parent_bool, True) class ValuesJoinPromotionTests(TestCase): def test_values_no_promotion_for_existing(self): qs = Node.objects.filter(parent__parent__isnull=False) self.assertIn(' INNER JOIN ', str(qs.query)) qs = qs.values('parent__parent__id') self.assertIn(' INNER JOIN ', str(qs.query)) # Make sure there is a left outer join without the filter. qs = Node.objects.values('parent__parent__id') self.assertIn(' LEFT OUTER JOIN ', str(qs.query)) def test_non_nullable_fk_not_promoted(self): qs = ObjectB.objects.values('objecta__name') self.assertIn(' INNER JOIN ', str(qs.query)) def test_ticket_21376(self): a = ObjectA.objects.create() ObjectC.objects.create(objecta=a) qs = ObjectC.objects.filter( Q(objecta=a) | Q(objectb__objecta=a), ) qs = qs.filter( Q(objectb=1) | Q(objecta=a), ) self.assertEqual(qs.count(), 1) tblname = connection.ops.quote_name(ObjectB._meta.db_table) self.assertIn(' LEFT OUTER JOIN %s' % tblname, str(qs.query)) class ForeignKeyToBaseExcludeTests(TestCase): def test_ticket_21787(self): sc1 = SpecialCategory.objects.create(special_name='sc1', name='sc1') sc2 = SpecialCategory.objects.create(special_name='sc2', name='sc2') sc3 = SpecialCategory.objects.create(special_name='sc3', name='sc3') c1 = CategoryItem.objects.create(category=sc1) CategoryItem.objects.create(category=sc2) self.assertSequenceEqual(SpecialCategory.objects.exclude(categoryitem__id=c1.pk).order_by('name'), [sc2, sc3]) self.assertSequenceEqual(SpecialCategory.objects.filter(categoryitem__id=c1.pk), [sc1]) class ReverseM2MCustomPkTests(TestCase): def test_ticket_21879(self): cpt1 = CustomPkTag.objects.create(id='cpt1', tag='cpt1') cp1 = CustomPk.objects.create(name='cp1', extra='extra') cp1.custompktag_set.add(cpt1) self.assertSequenceEqual(CustomPk.objects.filter(custompktag=cpt1), [cp1]) self.assertSequenceEqual(CustomPkTag.objects.filter(custom_pk=cp1), [cpt1]) class Ticket22429Tests(TestCase): def test_ticket_22429(self): sc1 = School.objects.create() st1 = Student.objects.create(school=sc1) sc2 = School.objects.create() st2 = Student.objects.create(school=sc2) cr = Classroom.objects.create(school=sc1) cr.students.add(st1) queryset = Student.objects.filter(~Q(classroom__school=F('school'))) self.assertSequenceEqual(queryset, [st2]) class Ticket23605Tests(TestCase): def test_ticket_23605(self): # Test filtering on a complicated q-object from ticket's report. # The query structure is such that we have multiple nested subqueries. # The original problem was that the inner queries weren't relabeled # correctly. # See also #24090. a1 = Ticket23605A.objects.create() a2 = Ticket23605A.objects.create() c1 = Ticket23605C.objects.create(field_c0=10000.0) Ticket23605B.objects.create( field_b0=10000.0, field_b1=True, modelc_fk=c1, modela_fk=a1) complex_q = Q(pk__in=Ticket23605A.objects.filter( Q( # True for a1 as field_b0 = 10000, field_c0=10000 # False for a2 as no ticket23605b found ticket23605b__field_b0__gte=1000000 / F("ticket23605b__modelc_fk__field_c0") ) & # True for a1 (field_b1=True) Q(ticket23605b__field_b1=True) & ~Q(ticket23605b__pk__in=Ticket23605B.objects.filter( ~( # Same filters as above commented filters, but # double-negated (one for Q() above, one for # parentheses). So, again a1 match, a2 not. Q(field_b1=True) & Q(field_b0__gte=1000000 / F("modelc_fk__field_c0")) ) ))).filter(ticket23605b__field_b1=True)) qs1 = Ticket23605A.objects.filter(complex_q) self.assertSequenceEqual(qs1, [a1]) qs2 = Ticket23605A.objects.exclude(complex_q) self.assertSequenceEqual(qs2, [a2]) class TestTicket24279(TestCase): def test_ticket_24278(self): School.objects.create() qs = School.objects.filter(Q(pk__in=()) | Q()) self.assertQuerysetEqual(qs, []) class TestInvalidValuesRelation(SimpleTestCase): def test_invalid_values(self): msg = "Field 'id' expected a number but got 'abc'." with self.assertRaisesMessage(ValueError, msg): Annotation.objects.filter(tag='abc') with self.assertRaisesMessage(ValueError, msg): Annotation.objects.filter(tag__in=[123, 'abc']) class TestTicket24605(TestCase): def test_ticket_24605(self): """ Subquery table names should be quoted. """ i1 = Individual.objects.create(alive=True) RelatedIndividual.objects.create(related=i1) i2 = Individual.objects.create(alive=False) RelatedIndividual.objects.create(related=i2) i3 = Individual.objects.create(alive=True) i4 = Individual.objects.create(alive=False) self.assertSequenceEqual(Individual.objects.filter(Q(alive=False), Q(related_individual__isnull=True)), [i4]) self.assertSequenceEqual( Individual.objects.exclude(Q(alive=False), Q(related_individual__isnull=True)).order_by('pk'), [i1, i2, i3] ) class Ticket23622Tests(TestCase): @skipUnlessDBFeature('can_distinct_on_fields') def test_ticket_23622(self): """ Make sure __pk__in and __in work the same for related fields when using a distinct on subquery. """ a1 = Ticket23605A.objects.create() a2 = Ticket23605A.objects.create() c1 = Ticket23605C.objects.create(field_c0=0.0) Ticket23605B.objects.create( modela_fk=a1, field_b0=123, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a1, field_b0=23, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a1, field_b0=234, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a1, field_b0=12, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a2, field_b0=567, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a2, field_b0=76, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a2, field_b0=7, field_b1=True, modelc_fk=c1, ) Ticket23605B.objects.create( modela_fk=a2, field_b0=56, field_b1=True, modelc_fk=c1, ) qx = ( Q(ticket23605b__pk__in=Ticket23605B.objects.order_by('modela_fk', '-field_b1').distinct('modela_fk')) & Q(ticket23605b__field_b0__gte=300) ) qy = ( Q(ticket23605b__in=Ticket23605B.objects.order_by('modela_fk', '-field_b1').distinct('modela_fk')) & Q(ticket23605b__field_b0__gte=300) ) self.assertEqual( set(Ticket23605A.objects.filter(qx).values_list('pk', flat=True)), set(Ticket23605A.objects.filter(qy).values_list('pk', flat=True)) ) self.assertSequenceEqual(Ticket23605A.objects.filter(qx), [a2])
8d048a42755d95661f594f80c379046c44889f8ebc9cb31a7e713adfb2892773
""" Various complex queries that have been problematic in the past. """ from django.db import models from django.db.models.functions import Now class DumbCategory(models.Model): pass class ProxyCategory(DumbCategory): class Meta: proxy = True class NamedCategory(DumbCategory): name = models.CharField(max_length=10) def __str__(self): return self.name class Tag(models.Model): name = models.CharField(max_length=10) parent = models.ForeignKey( 'self', models.SET_NULL, blank=True, null=True, related_name='children', ) category = models.ForeignKey(NamedCategory, models.SET_NULL, null=True, default=None) class Meta: ordering = ['name'] def __str__(self): return self.name class Note(models.Model): note = models.CharField(max_length=100) misc = models.CharField(max_length=10) tag = models.ForeignKey(Tag, models.SET_NULL, blank=True, null=True) class Meta: ordering = ['note'] def __str__(self): return self.note class Annotation(models.Model): name = models.CharField(max_length=10) tag = models.ForeignKey(Tag, models.CASCADE) notes = models.ManyToManyField(Note) def __str__(self): return self.name class DateTimePK(models.Model): date = models.DateTimeField(primary_key=True, auto_now_add=True) class ExtraInfo(models.Model): info = models.CharField(max_length=100) note = models.ForeignKey(Note, models.CASCADE, null=True) value = models.IntegerField(null=True) date = models.ForeignKey(DateTimePK, models.SET_NULL, null=True) filterable = models.BooleanField(default=True) class Meta: ordering = ['info'] def __str__(self): return self.info class Author(models.Model): name = models.CharField(max_length=10) num = models.IntegerField(unique=True) extra = models.ForeignKey(ExtraInfo, models.CASCADE) class Meta: ordering = ['name'] def __str__(self): return self.name class Item(models.Model): name = models.CharField(max_length=10) created = models.DateTimeField() modified = models.DateTimeField(blank=True, null=True) tags = models.ManyToManyField(Tag, blank=True) creator = models.ForeignKey(Author, models.CASCADE) note = models.ForeignKey(Note, models.CASCADE) class Meta: ordering = ['-note', 'name'] def __str__(self): return self.name class Report(models.Model): name = models.CharField(max_length=10) creator = models.ForeignKey(Author, models.SET_NULL, to_field='num', null=True) def __str__(self): return self.name class ReportComment(models.Model): report = models.ForeignKey(Report, models.CASCADE) class Ranking(models.Model): rank = models.IntegerField() author = models.ForeignKey(Author, models.CASCADE) class Meta: # A complex ordering specification. Should stress the system a bit. ordering = ('author__extra__note', 'author__name', 'rank') def __str__(self): return '%d: %s' % (self.rank, self.author.name) class Cover(models.Model): title = models.CharField(max_length=50) item = models.ForeignKey(Item, models.CASCADE) class Meta: ordering = ['item'] def __str__(self): return self.title class Number(models.Model): num = models.IntegerField() other_num = models.IntegerField(null=True) another_num = models.IntegerField(null=True) def __str__(self): return str(self.num) # Symmetrical m2m field with a normal field using the reverse accessor name # ("valid"). class Valid(models.Model): valid = models.CharField(max_length=10) parent = models.ManyToManyField('self') class Meta: ordering = ['valid'] # Some funky cross-linked models for testing a couple of infinite recursion # cases. class X(models.Model): y = models.ForeignKey('Y', models.CASCADE) class Y(models.Model): x1 = models.ForeignKey(X, models.CASCADE, related_name='y1') # Some models with a cycle in the default ordering. This would be bad if we # didn't catch the infinite loop. class LoopX(models.Model): y = models.ForeignKey('LoopY', models.CASCADE) class Meta: ordering = ['y'] class LoopY(models.Model): x = models.ForeignKey(LoopX, models.CASCADE) class Meta: ordering = ['x'] class LoopZ(models.Model): z = models.ForeignKey('self', models.CASCADE) class Meta: ordering = ['z'] # A model and custom default manager combination. class CustomManager(models.Manager): def get_queryset(self): qs = super().get_queryset() return qs.filter(public=True, tag__name='t1') class ManagedModel(models.Model): data = models.CharField(max_length=10) tag = models.ForeignKey(Tag, models.CASCADE) public = models.BooleanField(default=True) objects = CustomManager() normal_manager = models.Manager() def __str__(self): return self.data # An inter-related setup with multiple paths from Child to Detail. class Detail(models.Model): data = models.CharField(max_length=10) class MemberManager(models.Manager): def get_queryset(self): return super().get_queryset().select_related("details") class Member(models.Model): name = models.CharField(max_length=10) details = models.OneToOneField(Detail, models.CASCADE, primary_key=True) objects = MemberManager() class Child(models.Model): person = models.OneToOneField(Member, models.CASCADE, primary_key=True) parent = models.ForeignKey(Member, models.CASCADE, related_name="children") # Custom primary keys interfered with ordering in the past. class CustomPk(models.Model): name = models.CharField(max_length=10, primary_key=True) extra = models.CharField(max_length=10) class Meta: ordering = ['name', 'extra'] class Related(models.Model): custom = models.ForeignKey(CustomPk, models.CASCADE, null=True) class CustomPkTag(models.Model): id = models.CharField(max_length=20, primary_key=True) custom_pk = models.ManyToManyField(CustomPk) tag = models.CharField(max_length=20) # An inter-related setup with a model subclass that has a nullable # path to another model, and a return path from that model. class Celebrity(models.Model): name = models.CharField("Name", max_length=20) greatest_fan = models.ForeignKey("Fan", models.SET_NULL, null=True, unique=True) def __str__(self): return self.name class TvChef(Celebrity): pass class Fan(models.Model): fan_of = models.ForeignKey(Celebrity, models.CASCADE) # Multiple foreign keys class LeafA(models.Model): data = models.CharField(max_length=10) def __str__(self): return self.data class LeafB(models.Model): data = models.CharField(max_length=10) class Join(models.Model): a = models.ForeignKey(LeafA, models.CASCADE) b = models.ForeignKey(LeafB, models.CASCADE) class ReservedName(models.Model): name = models.CharField(max_length=20) order = models.IntegerField() def __str__(self): return self.name # A simpler shared-foreign-key setup that can expose some problems. class SharedConnection(models.Model): data = models.CharField(max_length=10) def __str__(self): return self.data class PointerA(models.Model): connection = models.ForeignKey(SharedConnection, models.CASCADE) class PointerB(models.Model): connection = models.ForeignKey(SharedConnection, models.CASCADE) # Multi-layer ordering class SingleObject(models.Model): name = models.CharField(max_length=10) class Meta: ordering = ['name'] def __str__(self): return self.name class RelatedObject(models.Model): single = models.ForeignKey(SingleObject, models.SET_NULL, null=True) f = models.IntegerField(null=True) class Meta: ordering = ['single'] class Plaything(models.Model): name = models.CharField(max_length=10) others = models.ForeignKey(RelatedObject, models.SET_NULL, null=True) class Meta: ordering = ['others'] def __str__(self): return self.name class Article(models.Model): name = models.CharField(max_length=20) created = models.DateTimeField() def __str__(self): return self.name class Food(models.Model): name = models.CharField(max_length=20, unique=True) def __str__(self): return self.name class Eaten(models.Model): food = models.ForeignKey(Food, models.SET_NULL, to_field="name", null=True) meal = models.CharField(max_length=20) def __str__(self): return "%s at %s" % (self.food, self.meal) class Node(models.Model): num = models.IntegerField(unique=True) parent = models.ForeignKey("self", models.SET_NULL, to_field="num", null=True) def __str__(self): return str(self.num) # Bug #12252 class ObjectA(models.Model): name = models.CharField(max_length=50) def __str__(self): return self.name def __iter__(self): # Ticket #23721 assert False, 'type checking should happen without calling model __iter__' class ProxyObjectA(ObjectA): class Meta: proxy = True class ChildObjectA(ObjectA): pass class ObjectB(models.Model): name = models.CharField(max_length=50) objecta = models.ForeignKey(ObjectA, models.CASCADE) num = models.PositiveSmallIntegerField() def __str__(self): return self.name class ProxyObjectB(ObjectB): class Meta: proxy = True class ObjectC(models.Model): name = models.CharField(max_length=50) objecta = models.ForeignKey(ObjectA, models.SET_NULL, null=True) objectb = models.ForeignKey(ObjectB, models.SET_NULL, null=True) childobjecta = models.ForeignKey(ChildObjectA, models.SET_NULL, null=True, related_name='ca_pk') def __str__(self): return self.name class SimpleCategory(models.Model): name = models.CharField(max_length=15) def __str__(self): return self.name class SpecialCategory(SimpleCategory): special_name = models.CharField(max_length=15) def __str__(self): return self.name + " " + self.special_name class CategoryItem(models.Model): category = models.ForeignKey(SimpleCategory, models.CASCADE) def __str__(self): return "category item: " + str(self.category) class MixedCaseFieldCategoryItem(models.Model): CaTeGoRy = models.ForeignKey(SimpleCategory, models.CASCADE) class MixedCaseDbColumnCategoryItem(models.Model): category = models.ForeignKey(SimpleCategory, models.CASCADE, db_column='CaTeGoRy_Id') class OneToOneCategory(models.Model): new_name = models.CharField(max_length=15) category = models.OneToOneField(SimpleCategory, models.CASCADE) def __str__(self): return "one2one " + self.new_name class CategoryRelationship(models.Model): first = models.ForeignKey(SimpleCategory, models.CASCADE, related_name='first_rel') second = models.ForeignKey(SimpleCategory, models.CASCADE, related_name='second_rel') class CommonMixedCaseForeignKeys(models.Model): category = models.ForeignKey(CategoryItem, models.CASCADE) mixed_case_field_category = models.ForeignKey(MixedCaseFieldCategoryItem, models.CASCADE) mixed_case_db_column_category = models.ForeignKey(MixedCaseDbColumnCategoryItem, models.CASCADE) class NullableName(models.Model): name = models.CharField(max_length=20, null=True) class Meta: ordering = ['id'] class ModelD(models.Model): name = models.TextField() class ModelC(models.Model): name = models.TextField() class ModelB(models.Model): name = models.TextField() c = models.ForeignKey(ModelC, models.CASCADE) class ModelA(models.Model): name = models.TextField() b = models.ForeignKey(ModelB, models.SET_NULL, null=True) d = models.ForeignKey(ModelD, models.CASCADE) class Job(models.Model): name = models.CharField(max_length=20, unique=True) def __str__(self): return self.name class JobResponsibilities(models.Model): job = models.ForeignKey(Job, models.CASCADE, to_field='name') responsibility = models.ForeignKey('Responsibility', models.CASCADE, to_field='description') class Responsibility(models.Model): description = models.CharField(max_length=20, unique=True) jobs = models.ManyToManyField(Job, through=JobResponsibilities, related_name='responsibilities') def __str__(self): return self.description # Models for disjunction join promotion low level testing. class FK1(models.Model): f1 = models.TextField() f2 = models.TextField() class FK2(models.Model): f1 = models.TextField() f2 = models.TextField() class FK3(models.Model): f1 = models.TextField() f2 = models.TextField() class BaseA(models.Model): a = models.ForeignKey(FK1, models.SET_NULL, null=True) b = models.ForeignKey(FK2, models.SET_NULL, null=True) c = models.ForeignKey(FK3, models.SET_NULL, null=True) class Identifier(models.Model): name = models.CharField(max_length=100) def __str__(self): return self.name class Program(models.Model): identifier = models.OneToOneField(Identifier, models.CASCADE) class Channel(models.Model): programs = models.ManyToManyField(Program) identifier = models.OneToOneField(Identifier, models.CASCADE) class Book(models.Model): title = models.TextField() chapter = models.ForeignKey('Chapter', models.CASCADE) class Chapter(models.Model): title = models.TextField() paragraph = models.ForeignKey('Paragraph', models.CASCADE) class Paragraph(models.Model): text = models.TextField() page = models.ManyToManyField('Page') class Page(models.Model): text = models.TextField() class MyObject(models.Model): parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True, related_name='children') data = models.CharField(max_length=100) created_at = models.DateTimeField(auto_now_add=True) # Models for #17600 regressions class Order(models.Model): id = models.IntegerField(primary_key=True) name = models.CharField(max_length=12, null=True, default='') class Meta: ordering = ('pk',) def __str__(self): return str(self.pk) class OrderItem(models.Model): order = models.ForeignKey(Order, models.CASCADE, related_name='items') status = models.IntegerField() class Meta: ordering = ('pk',) def __str__(self): return str(self.pk) class BaseUser(models.Model): pass class Task(models.Model): title = models.CharField(max_length=10) owner = models.ForeignKey(BaseUser, models.CASCADE, related_name='owner') creator = models.ForeignKey(BaseUser, models.CASCADE, related_name='creator') def __str__(self): return self.title class Staff(models.Model): name = models.CharField(max_length=10) def __str__(self): return self.name class StaffUser(BaseUser): staff = models.OneToOneField(Staff, models.CASCADE, related_name='user') def __str__(self): return self.staff class Ticket21203Parent(models.Model): parentid = models.AutoField(primary_key=True) parent_bool = models.BooleanField(default=True) created = models.DateTimeField(auto_now=True) class Ticket21203Child(models.Model): childid = models.AutoField(primary_key=True) parent = models.ForeignKey(Ticket21203Parent, models.CASCADE) class Person(models.Model): name = models.CharField(max_length=128) class Company(models.Model): name = models.CharField(max_length=128) employees = models.ManyToManyField(Person, related_name='employers', through='Employment') def __str__(self): return self.name class Employment(models.Model): employer = models.ForeignKey(Company, models.CASCADE) employee = models.ForeignKey(Person, models.CASCADE) title = models.CharField(max_length=128) class School(models.Model): pass class Student(models.Model): school = models.ForeignKey(School, models.CASCADE) class Classroom(models.Model): name = models.CharField(max_length=20) has_blackboard = models.BooleanField(null=True) school = models.ForeignKey(School, models.CASCADE) students = models.ManyToManyField(Student, related_name='classroom') class Teacher(models.Model): schools = models.ManyToManyField(School) friends = models.ManyToManyField('self') class Ticket23605AParent(models.Model): pass class Ticket23605A(Ticket23605AParent): pass class Ticket23605B(models.Model): modela_fk = models.ForeignKey(Ticket23605A, models.CASCADE) modelc_fk = models.ForeignKey("Ticket23605C", models.CASCADE) field_b0 = models.IntegerField(null=True) field_b1 = models.BooleanField(default=False) class Ticket23605C(models.Model): field_c0 = models.FloatField() # db_table names have capital letters to ensure they are quoted in queries. class Individual(models.Model): alive = models.BooleanField() class Meta: db_table = 'Individual' class RelatedIndividual(models.Model): related = models.ForeignKey(Individual, models.CASCADE, related_name='related_individual') class Meta: db_table = 'RelatedIndividual' class CustomDbColumn(models.Model): custom_column = models.IntegerField(db_column='custom_name', null=True) ip_address = models.GenericIPAddressField(null=True) class CreatedField(models.DateTimeField): db_returning = True def __init__(self, *args, **kwargs): kwargs.setdefault('default', Now) super().__init__(*args, **kwargs) class ReturningModel(models.Model): created = CreatedField(editable=False) class NonIntegerPKReturningModel(models.Model): created = CreatedField(editable=False, primary_key=True) class JSONFieldNullable(models.Model): json_field = models.JSONField(blank=True, null=True) class Meta: required_db_features = {'supports_json_field'}
4ad5275689a2711f881675aa904754f2fcab24bcf74b46e27892a3955735199f
import unittest from django.core.checks import Error, Warning from django.core.checks.model_checks import _check_lazy_references from django.db import connection, connections, models from django.db.models.functions import Lower from django.db.models.signals import post_init from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from django.test.utils import isolate_apps, override_settings, register_lookup class EmptyRouter: pass def get_max_column_name_length(): allowed_len = None db_alias = None for db in ('default', 'other'): connection = connections[db] max_name_length = connection.ops.max_name_length() if max_name_length is not None and not connection.features.truncates_names: if allowed_len is None or max_name_length < allowed_len: allowed_len = max_name_length db_alias = db return (allowed_len, db_alias) @isolate_apps('invalid_models_tests') class IndexTogetherTests(SimpleTestCase): def test_non_iterable(self): class Model(models.Model): class Meta: index_together = 42 self.assertEqual(Model.check(), [ Error( "'index_together' must be a list or tuple.", obj=Model, id='models.E008', ), ]) def test_non_list(self): class Model(models.Model): class Meta: index_together = 'not-a-list' self.assertEqual(Model.check(), [ Error( "'index_together' must be a list or tuple.", obj=Model, id='models.E008', ), ]) def test_list_containing_non_iterable(self): class Model(models.Model): class Meta: index_together = [('a', 'b'), 42] self.assertEqual(Model.check(), [ Error( "All 'index_together' elements must be lists or tuples.", obj=Model, id='models.E009', ), ]) def test_pointing_to_missing_field(self): class Model(models.Model): class Meta: index_together = [['missing_field']] self.assertEqual(Model.check(), [ Error( "'index_together' refers to the nonexistent field 'missing_field'.", obj=Model, id='models.E012', ), ]) def test_pointing_to_non_local_field(self): class Foo(models.Model): field1 = models.IntegerField() class Bar(Foo): field2 = models.IntegerField() class Meta: index_together = [['field2', 'field1']] self.assertEqual(Bar.check(), [ Error( "'index_together' refers to field 'field1' which is not " "local to model 'Bar'.", hint='This issue may be caused by multi-table inheritance.', obj=Bar, id='models.E016', ), ]) def test_pointing_to_m2m_field(self): class Model(models.Model): m2m = models.ManyToManyField('self') class Meta: index_together = [['m2m']] self.assertEqual(Model.check(), [ Error( "'index_together' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'index_together'.", obj=Model, id='models.E013', ), ]) def test_pointing_to_fk(self): class Foo(models.Model): pass class Bar(models.Model): foo_1 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_1') foo_2 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_2') class Meta: index_together = [['foo_1_id', 'foo_2']] self.assertEqual(Bar.check(), []) # unique_together tests are very similar to index_together tests. @isolate_apps('invalid_models_tests') class UniqueTogetherTests(SimpleTestCase): def test_non_iterable(self): class Model(models.Model): class Meta: unique_together = 42 self.assertEqual(Model.check(), [ Error( "'unique_together' must be a list or tuple.", obj=Model, id='models.E010', ), ]) def test_list_containing_non_iterable(self): class Model(models.Model): one = models.IntegerField() two = models.IntegerField() class Meta: unique_together = [('a', 'b'), 42] self.assertEqual(Model.check(), [ Error( "All 'unique_together' elements must be lists or tuples.", obj=Model, id='models.E011', ), ]) def test_non_list(self): class Model(models.Model): class Meta: unique_together = 'not-a-list' self.assertEqual(Model.check(), [ Error( "'unique_together' must be a list or tuple.", obj=Model, id='models.E010', ), ]) def test_valid_model(self): class Model(models.Model): one = models.IntegerField() two = models.IntegerField() class Meta: # unique_together can be a simple tuple unique_together = ('one', 'two') self.assertEqual(Model.check(), []) def test_pointing_to_missing_field(self): class Model(models.Model): class Meta: unique_together = [['missing_field']] self.assertEqual(Model.check(), [ Error( "'unique_together' refers to the nonexistent field 'missing_field'.", obj=Model, id='models.E012', ), ]) def test_pointing_to_m2m(self): class Model(models.Model): m2m = models.ManyToManyField('self') class Meta: unique_together = [['m2m']] self.assertEqual(Model.check(), [ Error( "'unique_together' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'unique_together'.", obj=Model, id='models.E013', ), ]) def test_pointing_to_fk(self): class Foo(models.Model): pass class Bar(models.Model): foo_1 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_1') foo_2 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_2') class Meta: unique_together = [['foo_1_id', 'foo_2']] self.assertEqual(Bar.check(), []) @isolate_apps('invalid_models_tests') class IndexesTests(TestCase): def test_pointing_to_missing_field(self): class Model(models.Model): class Meta: indexes = [models.Index(fields=['missing_field'], name='name')] self.assertEqual(Model.check(), [ Error( "'indexes' refers to the nonexistent field 'missing_field'.", obj=Model, id='models.E012', ), ]) def test_pointing_to_m2m_field(self): class Model(models.Model): m2m = models.ManyToManyField('self') class Meta: indexes = [models.Index(fields=['m2m'], name='name')] self.assertEqual(Model.check(), [ Error( "'indexes' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'indexes'.", obj=Model, id='models.E013', ), ]) def test_pointing_to_non_local_field(self): class Foo(models.Model): field1 = models.IntegerField() class Bar(Foo): field2 = models.IntegerField() class Meta: indexes = [models.Index(fields=['field2', 'field1'], name='name')] self.assertEqual(Bar.check(), [ Error( "'indexes' refers to field 'field1' which is not local to " "model 'Bar'.", hint='This issue may be caused by multi-table inheritance.', obj=Bar, id='models.E016', ), ]) def test_pointing_to_fk(self): class Foo(models.Model): pass class Bar(models.Model): foo_1 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_1') foo_2 = models.ForeignKey(Foo, on_delete=models.CASCADE, related_name='bar_2') class Meta: indexes = [models.Index(fields=['foo_1_id', 'foo_2'], name='index_name')] self.assertEqual(Bar.check(), []) def test_name_constraints(self): class Model(models.Model): class Meta: indexes = [ models.Index(fields=['id'], name='_index_name'), models.Index(fields=['id'], name='5index_name'), ] self.assertEqual(Model.check(), [ Error( "The index name '%sindex_name' cannot start with an " "underscore or a number." % prefix, obj=Model, id='models.E033', ) for prefix in ('_', '5') ]) def test_max_name_length(self): index_name = 'x' * 31 class Model(models.Model): class Meta: indexes = [models.Index(fields=['id'], name=index_name)] self.assertEqual(Model.check(), [ Error( "The index name '%s' cannot be longer than 30 characters." % index_name, obj=Model, id='models.E034', ), ]) def test_index_with_condition(self): class Model(models.Model): age = models.IntegerField() class Meta: indexes = [ models.Index( fields=['age'], name='index_age_gte_10', condition=models.Q(age__gte=10), ), ] errors = Model.check(databases=self.databases) expected = [] if connection.features.supports_partial_indexes else [ Warning( '%s does not support indexes with conditions.' % connection.display_name, hint=( "Conditions will be ignored. Silence this warning if you " "don't care about it." ), obj=Model, id='models.W037', ) ] self.assertEqual(errors, expected) def test_index_with_condition_required_db_features(self): class Model(models.Model): age = models.IntegerField() class Meta: required_db_features = {'supports_partial_indexes'} indexes = [ models.Index( fields=['age'], name='index_age_gte_10', condition=models.Q(age__gte=10), ), ] self.assertEqual(Model.check(databases=self.databases), []) def test_index_with_include(self): class Model(models.Model): age = models.IntegerField() class Meta: indexes = [ models.Index( fields=['age'], name='index_age_include_id', include=['id'], ), ] errors = Model.check(databases=self.databases) expected = [] if connection.features.supports_covering_indexes else [ Warning( '%s does not support indexes with non-key columns.' % connection.display_name, hint=( "Non-key columns will be ignored. Silence this warning if " "you don't care about it." ), obj=Model, id='models.W040', ) ] self.assertEqual(errors, expected) def test_index_with_include_required_db_features(self): class Model(models.Model): age = models.IntegerField() class Meta: required_db_features = {'supports_covering_indexes'} indexes = [ models.Index( fields=['age'], name='index_age_include_id', include=['id'], ), ] self.assertEqual(Model.check(databases=self.databases), []) @skipUnlessDBFeature('supports_covering_indexes') def test_index_include_pointing_to_missing_field(self): class Model(models.Model): class Meta: indexes = [ models.Index(fields=['id'], include=['missing_field'], name='name'), ] self.assertEqual(Model.check(databases=self.databases), [ Error( "'indexes' refers to the nonexistent field 'missing_field'.", obj=Model, id='models.E012', ), ]) @skipUnlessDBFeature('supports_covering_indexes') def test_index_include_pointing_to_m2m_field(self): class Model(models.Model): m2m = models.ManyToManyField('self') class Meta: indexes = [models.Index(fields=['id'], include=['m2m'], name='name')] self.assertEqual(Model.check(databases=self.databases), [ Error( "'indexes' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'indexes'.", obj=Model, id='models.E013', ), ]) @skipUnlessDBFeature('supports_covering_indexes') def test_index_include_pointing_to_non_local_field(self): class Parent(models.Model): field1 = models.IntegerField() class Child(Parent): field2 = models.IntegerField() class Meta: indexes = [ models.Index(fields=['field2'], include=['field1'], name='name'), ] self.assertEqual(Child.check(databases=self.databases), [ Error( "'indexes' refers to field 'field1' which is not local to " "model 'Child'.", hint='This issue may be caused by multi-table inheritance.', obj=Child, id='models.E016', ), ]) @skipUnlessDBFeature('supports_covering_indexes') def test_index_include_pointing_to_fk(self): class Target(models.Model): pass class Model(models.Model): fk_1 = models.ForeignKey(Target, models.CASCADE, related_name='target_1') fk_2 = models.ForeignKey(Target, models.CASCADE, related_name='target_2') class Meta: constraints = [ models.Index( fields=['id'], include=['fk_1_id', 'fk_2'], name='name', ), ] self.assertEqual(Model.check(databases=self.databases), []) @isolate_apps('invalid_models_tests') class FieldNamesTests(TestCase): databases = {'default', 'other'} def test_ending_with_underscore(self): class Model(models.Model): field_ = models.CharField(max_length=10) m2m_ = models.ManyToManyField('self') self.assertEqual(Model.check(), [ Error( 'Field names must not end with an underscore.', obj=Model._meta.get_field('field_'), id='fields.E001', ), Error( 'Field names must not end with an underscore.', obj=Model._meta.get_field('m2m_'), id='fields.E001', ), ]) max_column_name_length, column_limit_db_alias = get_max_column_name_length() @unittest.skipIf(max_column_name_length is None, "The database doesn't have a column name length limit.") def test_M2M_long_column_name(self): """ #13711 -- Model check for long M2M column names when database has column name length limits. """ # A model with very long name which will be used to set relations to. class VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz(models.Model): title = models.CharField(max_length=11) # Main model for which checks will be performed. class ModelWithLongField(models.Model): m2m_field = models.ManyToManyField( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, related_name='rn1', ) m2m_field2 = models.ManyToManyField( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, related_name='rn2', through='m2msimple', ) m2m_field3 = models.ManyToManyField( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, related_name='rn3', through='m2mcomplex', ) fk = models.ForeignKey( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, models.CASCADE, related_name='rn4', ) # Models used for setting `through` in M2M field. class m2msimple(models.Model): id2 = models.ForeignKey(ModelWithLongField, models.CASCADE) class m2mcomplex(models.Model): id2 = models.ForeignKey(ModelWithLongField, models.CASCADE) long_field_name = 'a' * (self.max_column_name_length + 1) models.ForeignKey( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, models.CASCADE, ).contribute_to_class(m2msimple, long_field_name) models.ForeignKey( VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz, models.CASCADE, db_column=long_field_name ).contribute_to_class(m2mcomplex, long_field_name) errors = ModelWithLongField.check(databases=('default', 'other')) # First error because of M2M field set on the model with long name. m2m_long_name = "verylongmodelnamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz_id" if self.max_column_name_length > len(m2m_long_name): # Some databases support names longer than the test name. expected = [] else: expected = [ Error( 'Autogenerated column name too long for M2M field "%s". ' 'Maximum length is "%s" for database "%s".' % (m2m_long_name, self.max_column_name_length, self.column_limit_db_alias), hint="Use 'through' to create a separate model for " "M2M and then set column_name using 'db_column'.", obj=ModelWithLongField, id='models.E019', ) ] # Second error because the FK specified in the `through` model # `m2msimple` has auto-generated name longer than allowed. # There will be no check errors in the other M2M because it # specifies db_column for the FK in `through` model even if the actual # name is longer than the limits of the database. expected.append( Error( 'Autogenerated column name too long for M2M field "%s_id". ' 'Maximum length is "%s" for database "%s".' % (long_field_name, self.max_column_name_length, self.column_limit_db_alias), hint="Use 'through' to create a separate model for " "M2M and then set column_name using 'db_column'.", obj=ModelWithLongField, id='models.E019', ) ) self.assertEqual(errors, expected) # Check for long column names is called only for specified database # aliases. self.assertEqual(ModelWithLongField.check(databases=None), []) @unittest.skipIf(max_column_name_length is None, "The database doesn't have a column name length limit.") def test_local_field_long_column_name(self): """ #13711 -- Model check for long column names when database does not support long names. """ class ModelWithLongField(models.Model): title = models.CharField(max_length=11) long_field_name = 'a' * (self.max_column_name_length + 1) long_field_name2 = 'b' * (self.max_column_name_length + 1) models.CharField(max_length=11).contribute_to_class(ModelWithLongField, long_field_name) models.CharField(max_length=11, db_column='vlmn').contribute_to_class(ModelWithLongField, long_field_name2) self.assertEqual(ModelWithLongField.check(databases=('default', 'other')), [ Error( 'Autogenerated column name too long for field "%s". ' 'Maximum length is "%s" for database "%s".' % (long_field_name, self.max_column_name_length, self.column_limit_db_alias), hint="Set the column name manually using 'db_column'.", obj=ModelWithLongField, id='models.E018', ) ]) # Check for long column names is called only for specified database # aliases. self.assertEqual(ModelWithLongField.check(databases=None), []) def test_including_separator(self): class Model(models.Model): some__field = models.IntegerField() self.assertEqual(Model.check(), [ Error( 'Field names must not contain "__".', obj=Model._meta.get_field('some__field'), id='fields.E002', ) ]) def test_pk(self): class Model(models.Model): pk = models.IntegerField() self.assertEqual(Model.check(), [ Error( "'pk' is a reserved word that cannot be used as a field name.", obj=Model._meta.get_field('pk'), id='fields.E003', ) ]) def test_db_column_clash(self): class Model(models.Model): foo = models.IntegerField() bar = models.IntegerField(db_column='foo') self.assertEqual(Model.check(), [ Error( "Field 'bar' has column name 'foo' that is used by " "another field.", hint="Specify a 'db_column' for the field.", obj=Model, id='models.E007', ) ]) @isolate_apps('invalid_models_tests') class ShadowingFieldsTests(SimpleTestCase): def test_field_name_clash_with_child_accessor(self): class Parent(models.Model): pass class Child(Parent): child = models.CharField(max_length=100) self.assertEqual(Child.check(), [ Error( "The field 'child' clashes with the field " "'child' from model 'invalid_models_tests.parent'.", obj=Child._meta.get_field('child'), id='models.E006', ) ]) def test_multiinheritance_clash(self): class Mother(models.Model): clash = models.IntegerField() class Father(models.Model): clash = models.IntegerField() class Child(Mother, Father): # Here we have two clashed: id (automatic field) and clash, because # both parents define these fields. pass self.assertEqual(Child.check(), [ Error( "The field 'id' from parent model " "'invalid_models_tests.mother' clashes with the field 'id' " "from parent model 'invalid_models_tests.father'.", obj=Child, id='models.E005', ), Error( "The field 'clash' from parent model " "'invalid_models_tests.mother' clashes with the field 'clash' " "from parent model 'invalid_models_tests.father'.", obj=Child, id='models.E005', ) ]) def test_inheritance_clash(self): class Parent(models.Model): f_id = models.IntegerField() class Target(models.Model): # This field doesn't result in a clash. f_id = models.IntegerField() class Child(Parent): # This field clashes with parent "f_id" field. f = models.ForeignKey(Target, models.CASCADE) self.assertEqual(Child.check(), [ Error( "The field 'f' clashes with the field 'f_id' " "from model 'invalid_models_tests.parent'.", obj=Child._meta.get_field('f'), id='models.E006', ) ]) def test_multigeneration_inheritance(self): class GrandParent(models.Model): clash = models.IntegerField() class Parent(GrandParent): pass class Child(Parent): pass class GrandChild(Child): clash = models.IntegerField() self.assertEqual(GrandChild.check(), [ Error( "The field 'clash' clashes with the field 'clash' " "from model 'invalid_models_tests.grandparent'.", obj=GrandChild._meta.get_field('clash'), id='models.E006', ) ]) def test_id_clash(self): class Target(models.Model): pass class Model(models.Model): fk = models.ForeignKey(Target, models.CASCADE) fk_id = models.IntegerField() self.assertEqual(Model.check(), [ Error( "The field 'fk_id' clashes with the field 'fk' from model " "'invalid_models_tests.model'.", obj=Model._meta.get_field('fk_id'), id='models.E006', ) ]) @isolate_apps('invalid_models_tests') class OtherModelTests(SimpleTestCase): def test_unique_primary_key(self): invalid_id = models.IntegerField(primary_key=False) class Model(models.Model): id = invalid_id self.assertEqual(Model.check(), [ Error( "'id' can only be used as a field name if the field also sets " "'primary_key=True'.", obj=Model, id='models.E004', ), ]) def test_ordering_non_iterable(self): class Model(models.Model): class Meta: ordering = 'missing_field' self.assertEqual(Model.check(), [ Error( "'ordering' must be a tuple or list " "(even if you want to order by only one field).", obj=Model, id='models.E014', ), ]) def test_just_ordering_no_errors(self): class Model(models.Model): order = models.PositiveIntegerField() class Meta: ordering = ['order'] self.assertEqual(Model.check(), []) def test_just_order_with_respect_to_no_errors(self): class Question(models.Model): pass class Answer(models.Model): question = models.ForeignKey(Question, models.CASCADE) class Meta: order_with_respect_to = 'question' self.assertEqual(Answer.check(), []) def test_ordering_with_order_with_respect_to(self): class Question(models.Model): pass class Answer(models.Model): question = models.ForeignKey(Question, models.CASCADE) order = models.IntegerField() class Meta: order_with_respect_to = 'question' ordering = ['order'] self.assertEqual(Answer.check(), [ Error( "'ordering' and 'order_with_respect_to' cannot be used together.", obj=Answer, id='models.E021', ), ]) def test_non_valid(self): class RelationModel(models.Model): pass class Model(models.Model): relation = models.ManyToManyField(RelationModel) class Meta: ordering = ['relation'] self.assertEqual(Model.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'relation'.", obj=Model, id='models.E015', ), ]) def test_ordering_pointing_to_missing_field(self): class Model(models.Model): class Meta: ordering = ('missing_field',) self.assertEqual(Model.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'missing_field'.", obj=Model, id='models.E015', ) ]) def test_ordering_pointing_to_missing_foreignkey_field(self): class Model(models.Model): missing_fk_field = models.IntegerField() class Meta: ordering = ('missing_fk_field_id',) self.assertEqual(Model.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'missing_fk_field_id'.", obj=Model, id='models.E015', ) ]) def test_ordering_pointing_to_missing_related_field(self): class Model(models.Model): test = models.IntegerField() class Meta: ordering = ('missing_related__id',) self.assertEqual(Model.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'missing_related__id'.", obj=Model, id='models.E015', ) ]) def test_ordering_pointing_to_missing_related_model_field(self): class Parent(models.Model): pass class Child(models.Model): parent = models.ForeignKey(Parent, models.CASCADE) class Meta: ordering = ('parent__missing_field',) self.assertEqual(Child.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'parent__missing_field'.", obj=Child, id='models.E015', ) ]) def test_ordering_pointing_to_non_related_field(self): class Child(models.Model): parent = models.IntegerField() class Meta: ordering = ('parent__missing_field',) self.assertEqual(Child.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'parent__missing_field'.", obj=Child, id='models.E015', ) ]) def test_ordering_pointing_to_two_related_model_field(self): class Parent2(models.Model): pass class Parent1(models.Model): parent2 = models.ForeignKey(Parent2, models.CASCADE) class Child(models.Model): parent1 = models.ForeignKey(Parent1, models.CASCADE) class Meta: ordering = ('parent1__parent2__missing_field',) self.assertEqual(Child.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'parent1__parent2__missing_field'.", obj=Child, id='models.E015', ) ]) def test_ordering_pointing_multiple_times_to_model_fields(self): class Parent(models.Model): field1 = models.CharField(max_length=100) field2 = models.CharField(max_length=100) class Child(models.Model): parent = models.ForeignKey(Parent, models.CASCADE) class Meta: ordering = ('parent__field1__field2',) self.assertEqual(Child.check(), [ Error( "'ordering' refers to the nonexistent field, related field, " "or lookup 'parent__field1__field2'.", obj=Child, id='models.E015', ) ]) def test_ordering_allows_registered_lookups(self): class Model(models.Model): test = models.CharField(max_length=100) class Meta: ordering = ('test__lower',) with register_lookup(models.CharField, Lower): self.assertEqual(Model.check(), []) def test_ordering_pointing_to_lookup_not_transform(self): class Model(models.Model): test = models.CharField(max_length=100) class Meta: ordering = ('test__isnull',) self.assertEqual(Model.check(), []) def test_ordering_pointing_to_related_model_pk(self): class Parent(models.Model): pass class Child(models.Model): parent = models.ForeignKey(Parent, models.CASCADE) class Meta: ordering = ('parent__pk',) self.assertEqual(Child.check(), []) def test_ordering_pointing_to_foreignkey_field(self): class Parent(models.Model): pass class Child(models.Model): parent = models.ForeignKey(Parent, models.CASCADE) class Meta: ordering = ('parent_id',) self.assertFalse(Child.check()) def test_name_beginning_with_underscore(self): class _Model(models.Model): pass self.assertEqual(_Model.check(), [ Error( "The model name '_Model' cannot start or end with an underscore " "as it collides with the query lookup syntax.", obj=_Model, id='models.E023', ) ]) def test_name_ending_with_underscore(self): class Model_(models.Model): pass self.assertEqual(Model_.check(), [ Error( "The model name 'Model_' cannot start or end with an underscore " "as it collides with the query lookup syntax.", obj=Model_, id='models.E023', ) ]) def test_name_contains_double_underscores(self): class Test__Model(models.Model): pass self.assertEqual(Test__Model.check(), [ Error( "The model name 'Test__Model' cannot contain double underscores " "as it collides with the query lookup syntax.", obj=Test__Model, id='models.E024', ) ]) def test_property_and_related_field_accessor_clash(self): class Model(models.Model): fk = models.ForeignKey('self', models.CASCADE) @property def fk_id(self): pass self.assertEqual(Model.check(), [ Error( "The property 'fk_id' clashes with a related field accessor.", obj=Model, id='models.E025', ) ]) def test_single_primary_key(self): class Model(models.Model): foo = models.IntegerField(primary_key=True) bar = models.IntegerField(primary_key=True) self.assertEqual(Model.check(), [ Error( "The model cannot have more than one field with 'primary_key=True'.", obj=Model, id='models.E026', ) ]) @override_settings(TEST_SWAPPED_MODEL_BAD_VALUE='not-a-model') def test_swappable_missing_app_name(self): class Model(models.Model): class Meta: swappable = 'TEST_SWAPPED_MODEL_BAD_VALUE' self.assertEqual(Model.check(), [ Error( "'TEST_SWAPPED_MODEL_BAD_VALUE' is not of the form 'app_label.app_name'.", id='models.E001', ), ]) @override_settings(TEST_SWAPPED_MODEL_BAD_MODEL='not_an_app.Target') def test_swappable_missing_app(self): class Model(models.Model): class Meta: swappable = 'TEST_SWAPPED_MODEL_BAD_MODEL' self.assertEqual(Model.check(), [ Error( "'TEST_SWAPPED_MODEL_BAD_MODEL' references 'not_an_app.Target', " 'which has not been installed, or is abstract.', id='models.E002', ), ]) def test_two_m2m_through_same_relationship(self): class Person(models.Model): pass class Group(models.Model): primary = models.ManyToManyField(Person, through='Membership', related_name='primary') secondary = models.ManyToManyField(Person, through='Membership', related_name='secondary') class Membership(models.Model): person = models.ForeignKey(Person, models.CASCADE) group = models.ForeignKey(Group, models.CASCADE) self.assertEqual(Group.check(), [ Error( "The model has two identical many-to-many relations through " "the intermediate model 'invalid_models_tests.Membership'.", obj=Group, id='models.E003', ) ]) def test_two_m2m_through_same_model_with_different_through_fields(self): class Country(models.Model): pass class ShippingMethod(models.Model): to_countries = models.ManyToManyField( Country, through='ShippingMethodPrice', through_fields=('method', 'to_country'), ) from_countries = models.ManyToManyField( Country, through='ShippingMethodPrice', through_fields=('method', 'from_country'), related_name='+', ) class ShippingMethodPrice(models.Model): method = models.ForeignKey(ShippingMethod, models.CASCADE) to_country = models.ForeignKey(Country, models.CASCADE) from_country = models.ForeignKey(Country, models.CASCADE) self.assertEqual(ShippingMethod.check(), []) def test_onetoone_with_parent_model(self): class Place(models.Model): pass class ParkingLot(Place): other_place = models.OneToOneField(Place, models.CASCADE, related_name='other_parking') self.assertEqual(ParkingLot.check(), []) def test_onetoone_with_explicit_parent_link_parent_model(self): class Place(models.Model): pass class ParkingLot(Place): place = models.OneToOneField(Place, models.CASCADE, parent_link=True, primary_key=True) other_place = models.OneToOneField(Place, models.CASCADE, related_name='other_parking') self.assertEqual(ParkingLot.check(), []) def test_m2m_table_name_clash(self): class Foo(models.Model): bar = models.ManyToManyField('Bar', db_table='myapp_bar') class Meta: db_table = 'myapp_foo' class Bar(models.Model): class Meta: db_table = 'myapp_bar' self.assertEqual(Foo.check(), [ Error( "The field's intermediary table 'myapp_bar' clashes with the " "table name of 'invalid_models_tests.Bar'.", obj=Foo._meta.get_field('bar'), id='fields.E340', ) ]) @override_settings(DATABASE_ROUTERS=['invalid_models_tests.test_models.EmptyRouter']) def test_m2m_table_name_clash_database_routers_installed(self): class Foo(models.Model): bar = models.ManyToManyField('Bar', db_table='myapp_bar') class Meta: db_table = 'myapp_foo' class Bar(models.Model): class Meta: db_table = 'myapp_bar' self.assertEqual(Foo.check(), [ Warning( "The field's intermediary table 'myapp_bar' clashes with the " "table name of 'invalid_models_tests.Bar'.", obj=Foo._meta.get_field('bar'), hint=( "You have configured settings.DATABASE_ROUTERS. Verify " "that the table of 'invalid_models_tests.Bar' is " "correctly routed to a separate database." ), id='fields.W344', ), ]) def test_m2m_field_table_name_clash(self): class Foo(models.Model): pass class Bar(models.Model): foos = models.ManyToManyField(Foo, db_table='clash') class Baz(models.Model): foos = models.ManyToManyField(Foo, db_table='clash') self.assertEqual(Bar.check() + Baz.check(), [ Error( "The field's intermediary table 'clash' clashes with the " "table name of 'invalid_models_tests.Baz.foos'.", obj=Bar._meta.get_field('foos'), id='fields.E340', ), Error( "The field's intermediary table 'clash' clashes with the " "table name of 'invalid_models_tests.Bar.foos'.", obj=Baz._meta.get_field('foos'), id='fields.E340', ) ]) @override_settings(DATABASE_ROUTERS=['invalid_models_tests.test_models.EmptyRouter']) def test_m2m_field_table_name_clash_database_routers_installed(self): class Foo(models.Model): pass class Bar(models.Model): foos = models.ManyToManyField(Foo, db_table='clash') class Baz(models.Model): foos = models.ManyToManyField(Foo, db_table='clash') self.assertEqual(Bar.check() + Baz.check(), [ Warning( "The field's intermediary table 'clash' clashes with the " "table name of 'invalid_models_tests.%s.foos'." % clashing_model, obj=model_cls._meta.get_field('foos'), hint=( "You have configured settings.DATABASE_ROUTERS. Verify " "that the table of 'invalid_models_tests.%s.foos' is " "correctly routed to a separate database." % clashing_model ), id='fields.W344', ) for model_cls, clashing_model in [(Bar, 'Baz'), (Baz, 'Bar')] ]) def test_m2m_autogenerated_table_name_clash(self): class Foo(models.Model): class Meta: db_table = 'bar_foos' class Bar(models.Model): # The autogenerated `db_table` will be bar_foos. foos = models.ManyToManyField(Foo) class Meta: db_table = 'bar' self.assertEqual(Bar.check(), [ Error( "The field's intermediary table 'bar_foos' clashes with the " "table name of 'invalid_models_tests.Foo'.", obj=Bar._meta.get_field('foos'), id='fields.E340', ) ]) @override_settings(DATABASE_ROUTERS=['invalid_models_tests.test_models.EmptyRouter']) def test_m2m_autogenerated_table_name_clash_database_routers_installed(self): class Foo(models.Model): class Meta: db_table = 'bar_foos' class Bar(models.Model): # The autogenerated db_table is bar_foos. foos = models.ManyToManyField(Foo) class Meta: db_table = 'bar' self.assertEqual(Bar.check(), [ Warning( "The field's intermediary table 'bar_foos' clashes with the " "table name of 'invalid_models_tests.Foo'.", obj=Bar._meta.get_field('foos'), hint=( "You have configured settings.DATABASE_ROUTERS. Verify " "that the table of 'invalid_models_tests.Foo' is " "correctly routed to a separate database." ), id='fields.W344', ), ]) def test_m2m_unmanaged_shadow_models_not_checked(self): class A1(models.Model): pass class C1(models.Model): mm_a = models.ManyToManyField(A1, db_table='d1') # Unmanaged models that shadow the above models. Reused table names # shouldn't be flagged by any checks. class A2(models.Model): class Meta: managed = False class C2(models.Model): mm_a = models.ManyToManyField(A2, through='Intermediate') class Meta: managed = False class Intermediate(models.Model): a2 = models.ForeignKey(A2, models.CASCADE, db_column='a1_id') c2 = models.ForeignKey(C2, models.CASCADE, db_column='c1_id') class Meta: db_table = 'd1' managed = False self.assertEqual(C1.check(), []) self.assertEqual(C2.check(), []) def test_m2m_to_concrete_and_proxy_allowed(self): class A(models.Model): pass class Through(models.Model): a = models.ForeignKey('A', models.CASCADE) c = models.ForeignKey('C', models.CASCADE) class ThroughProxy(Through): class Meta: proxy = True class C(models.Model): mm_a = models.ManyToManyField(A, through=Through) mm_aproxy = models.ManyToManyField(A, through=ThroughProxy, related_name='proxied_m2m') self.assertEqual(C.check(), []) @isolate_apps('django.contrib.auth', kwarg_name='apps') def test_lazy_reference_checks(self, apps): class DummyModel(models.Model): author = models.ForeignKey('Author', models.CASCADE) class Meta: app_label = 'invalid_models_tests' class DummyClass: def __call__(self, **kwargs): pass def dummy_method(self): pass def dummy_function(*args, **kwargs): pass apps.lazy_model_operation(dummy_function, ('auth', 'imaginarymodel')) apps.lazy_model_operation(dummy_function, ('fanciful_app', 'imaginarymodel')) post_init.connect(dummy_function, sender='missing-app.Model', apps=apps) post_init.connect(DummyClass(), sender='missing-app.Model', apps=apps) post_init.connect(DummyClass().dummy_method, sender='missing-app.Model', apps=apps) self.assertEqual(_check_lazy_references(apps), [ Error( "%r contains a lazy reference to auth.imaginarymodel, " "but app 'auth' doesn't provide model 'imaginarymodel'." % dummy_function, obj=dummy_function, id='models.E022', ), Error( "%r contains a lazy reference to fanciful_app.imaginarymodel, " "but app 'fanciful_app' isn't installed." % dummy_function, obj=dummy_function, id='models.E022', ), Error( "An instance of class 'DummyClass' was connected to " "the 'post_init' signal with a lazy reference to the sender " "'missing-app.model', but app 'missing-app' isn't installed.", hint=None, obj='invalid_models_tests.test_models', id='signals.E001', ), Error( "Bound method 'DummyClass.dummy_method' was connected to the " "'post_init' signal with a lazy reference to the sender " "'missing-app.model', but app 'missing-app' isn't installed.", hint=None, obj='invalid_models_tests.test_models', id='signals.E001', ), Error( "The field invalid_models_tests.DummyModel.author was declared " "with a lazy reference to 'invalid_models_tests.author', but app " "'invalid_models_tests' isn't installed.", hint=None, obj=DummyModel.author.field, id='fields.E307', ), Error( "The function 'dummy_function' was connected to the 'post_init' " "signal with a lazy reference to the sender " "'missing-app.model', but app 'missing-app' isn't installed.", hint=None, obj='invalid_models_tests.test_models', id='signals.E001', ), ]) @isolate_apps('invalid_models_tests') class JSONFieldTests(TestCase): @skipUnlessDBFeature('supports_json_field') def test_ordering_pointing_to_json_field_value(self): class Model(models.Model): field = models.JSONField() class Meta: ordering = ['field__value'] self.assertEqual(Model.check(databases=self.databases), []) def test_check_jsonfield(self): class Model(models.Model): field = models.JSONField() error = Error( '%s does not support JSONFields.' % connection.display_name, obj=Model, id='fields.E180', ) expected = [] if connection.features.supports_json_field else [error] self.assertEqual(Model.check(databases=self.databases), expected) def test_check_jsonfield_required_db_features(self): class Model(models.Model): field = models.JSONField() class Meta: required_db_features = {'supports_json_field'} self.assertEqual(Model.check(databases=self.databases), []) @isolate_apps('invalid_models_tests') class ConstraintsTests(TestCase): def test_check_constraints(self): class Model(models.Model): age = models.IntegerField() class Meta: constraints = [models.CheckConstraint(check=models.Q(age__gte=18), name='is_adult')] errors = Model.check(databases=self.databases) warn = Warning( '%s does not support check constraints.' % connection.display_name, hint=( "A constraint won't be created. Silence this warning if you " "don't care about it." ), obj=Model, id='models.W027', ) expected = [] if connection.features.supports_table_check_constraints else [warn] self.assertCountEqual(errors, expected) def test_check_constraints_required_db_features(self): class Model(models.Model): age = models.IntegerField() class Meta: required_db_features = {'supports_table_check_constraints'} constraints = [models.CheckConstraint(check=models.Q(age__gte=18), name='is_adult')] self.assertEqual(Model.check(databases=self.databases), []) def test_check_constraint_pointing_to_missing_field(self): class Model(models.Model): class Meta: required_db_features = {'supports_table_check_constraints'} constraints = [ models.CheckConstraint( name='name', check=models.Q(missing_field=2), ), ] self.assertEqual(Model.check(databases=self.databases), [ Error( "'constraints' refers to the nonexistent field " "'missing_field'.", obj=Model, id='models.E012', ), ] if connection.features.supports_table_check_constraints else []) @skipUnlessDBFeature('supports_table_check_constraints') def test_check_constraint_pointing_to_reverse_fk(self): class Model(models.Model): parent = models.ForeignKey('self', models.CASCADE, related_name='parents') class Meta: constraints = [ models.CheckConstraint(name='name', check=models.Q(parents=3)), ] self.assertEqual(Model.check(databases=self.databases), [ Error( "'constraints' refers to the nonexistent field 'parents'.", obj=Model, id='models.E012', ), ]) @skipUnlessDBFeature('supports_table_check_constraints') def test_check_constraint_pointing_to_m2m_field(self): class Model(models.Model): m2m = models.ManyToManyField('self') class Meta: constraints = [ models.CheckConstraint(name='name', check=models.Q(m2m=2)), ] self.assertEqual(Model.check(databases=self.databases), [ Error( "'constraints' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'constraints'.", obj=Model, id='models.E013', ), ]) @skipUnlessDBFeature('supports_table_check_constraints') def test_check_constraint_pointing_to_fk(self): class Target(models.Model): pass class Model(models.Model): fk_1 = models.ForeignKey(Target, models.CASCADE, related_name='target_1') fk_2 = models.ForeignKey(Target, models.CASCADE, related_name='target_2') class Meta: constraints = [ models.CheckConstraint( name='name', check=models.Q(fk_1_id=2) | models.Q(fk_2=2), ), ] self.assertEqual(Model.check(databases=self.databases), []) @skipUnlessDBFeature('supports_table_check_constraints') def test_check_constraint_pointing_to_pk(self): class Model(models.Model): age = models.SmallIntegerField() class Meta: constraints = [ models.CheckConstraint( name='name', check=models.Q(pk__gt=5) & models.Q(age__gt=models.F('pk')), ), ] self.assertEqual(Model.check(databases=self.databases), []) @skipUnlessDBFeature('supports_table_check_constraints') def test_check_constraint_pointing_to_non_local_field(self): class Parent(models.Model): field1 = models.IntegerField() class Child(Parent): pass class Meta: constraints = [ models.CheckConstraint(name='name', check=models.Q(field1=1)), ] self.assertEqual(Child.check(databases=self.databases), [ Error( "'constraints' refers to field 'field1' which is not local to " "model 'Child'.", hint='This issue may be caused by multi-table inheritance.', obj=Child, id='models.E016', ), ]) @skipUnlessDBFeature('supports_table_check_constraints') def test_check_constraint_pointing_to_joined_fields(self): class Model(models.Model): name = models.CharField(max_length=10) field1 = models.PositiveSmallIntegerField() field2 = models.PositiveSmallIntegerField() field3 = models.PositiveSmallIntegerField() parent = models.ForeignKey('self', models.CASCADE) class Meta: constraints = [ models.CheckConstraint( name='name1', check=models.Q( field1__lt=models.F('parent__field1') + models.F('parent__field2') ) ), models.CheckConstraint( name='name2', check=models.Q(name=Lower('parent__name')) ), models.CheckConstraint( name='name3', check=models.Q(parent__field3=models.F('field1')) ), ] joined_fields = ['parent__field1', 'parent__field2', 'parent__field3', 'parent__name'] errors = Model.check(databases=self.databases) expected_errors = [ Error( "'constraints' refers to the joined field '%s'." % field_name, obj=Model, id='models.E041', ) for field_name in joined_fields ] self.assertCountEqual(errors, expected_errors) @skipUnlessDBFeature('supports_table_check_constraints') def test_check_constraint_pointing_to_joined_fields_complex_check(self): class Model(models.Model): name = models.PositiveSmallIntegerField() field1 = models.PositiveSmallIntegerField() field2 = models.PositiveSmallIntegerField() parent = models.ForeignKey('self', models.CASCADE) class Meta: constraints = [ models.CheckConstraint( name='name', check=models.Q( ( models.Q(name='test') & models.Q(field1__lt=models.F('parent__field1')) ) | ( models.Q(name__startswith=Lower('parent__name')) & models.Q(field1__gte=( models.F('parent__field1') + models.F('parent__field2') )) ) ) | (models.Q(name='test1')) ), ] joined_fields = ['parent__field1', 'parent__field2', 'parent__name'] errors = Model.check(databases=self.databases) expected_errors = [ Error( "'constraints' refers to the joined field '%s'." % field_name, obj=Model, id='models.E041', ) for field_name in joined_fields ] self.assertCountEqual(errors, expected_errors) def test_unique_constraint_with_condition(self): class Model(models.Model): age = models.IntegerField() class Meta: constraints = [ models.UniqueConstraint( fields=['age'], name='unique_age_gte_100', condition=models.Q(age__gte=100), ), ] errors = Model.check(databases=self.databases) expected = [] if connection.features.supports_partial_indexes else [ Warning( '%s does not support unique constraints with conditions.' % connection.display_name, hint=( "A constraint won't be created. Silence this warning if " "you don't care about it." ), obj=Model, id='models.W036', ), ] self.assertEqual(errors, expected) def test_unique_constraint_with_condition_required_db_features(self): class Model(models.Model): age = models.IntegerField() class Meta: required_db_features = {'supports_partial_indexes'} constraints = [ models.UniqueConstraint( fields=['age'], name='unique_age_gte_100', condition=models.Q(age__gte=100), ), ] self.assertEqual(Model.check(databases=self.databases), []) def test_unique_constraint_condition_pointing_to_missing_field(self): class Model(models.Model): age = models.SmallIntegerField() class Meta: required_db_features = {'supports_partial_indexes'} constraints = [ models.UniqueConstraint( name='name', fields=['age'], condition=models.Q(missing_field=2), ), ] self.assertEqual(Model.check(databases=self.databases), [ Error( "'constraints' refers to the nonexistent field " "'missing_field'.", obj=Model, id='models.E012', ), ] if connection.features.supports_partial_indexes else []) def test_unique_constraint_condition_pointing_to_joined_fields(self): class Model(models.Model): age = models.SmallIntegerField() parent = models.ForeignKey('self', models.CASCADE) class Meta: required_db_features = {'supports_partial_indexes'} constraints = [ models.UniqueConstraint( name='name', fields=['age'], condition=models.Q(parent__age__lt=2), ), ] self.assertEqual(Model.check(databases=self.databases), [ Error( "'constraints' refers to the joined field 'parent__age__lt'.", obj=Model, id='models.E041', ) ] if connection.features.supports_partial_indexes else []) def test_deferrable_unique_constraint(self): class Model(models.Model): age = models.IntegerField() class Meta: constraints = [ models.UniqueConstraint( fields=['age'], name='unique_age_deferrable', deferrable=models.Deferrable.DEFERRED, ), ] errors = Model.check(databases=self.databases) expected = [] if connection.features.supports_deferrable_unique_constraints else [ Warning( '%s does not support deferrable unique constraints.' % connection.display_name, hint=( "A constraint won't be created. Silence this warning if " "you don't care about it." ), obj=Model, id='models.W038', ), ] self.assertEqual(errors, expected) def test_deferrable_unique_constraint_required_db_features(self): class Model(models.Model): age = models.IntegerField() class Meta: required_db_features = {'supports_deferrable_unique_constraints'} constraints = [ models.UniqueConstraint( fields=['age'], name='unique_age_deferrable', deferrable=models.Deferrable.IMMEDIATE, ), ] self.assertEqual(Model.check(databases=self.databases), []) def test_unique_constraint_pointing_to_missing_field(self): class Model(models.Model): class Meta: constraints = [models.UniqueConstraint(fields=['missing_field'], name='name')] self.assertEqual(Model.check(databases=self.databases), [ Error( "'constraints' refers to the nonexistent field " "'missing_field'.", obj=Model, id='models.E012', ), ]) def test_unique_constraint_pointing_to_m2m_field(self): class Model(models.Model): m2m = models.ManyToManyField('self') class Meta: constraints = [models.UniqueConstraint(fields=['m2m'], name='name')] self.assertEqual(Model.check(databases=self.databases), [ Error( "'constraints' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'constraints'.", obj=Model, id='models.E013', ), ]) def test_unique_constraint_pointing_to_non_local_field(self): class Parent(models.Model): field1 = models.IntegerField() class Child(Parent): field2 = models.IntegerField() class Meta: constraints = [ models.UniqueConstraint(fields=['field2', 'field1'], name='name'), ] self.assertEqual(Child.check(databases=self.databases), [ Error( "'constraints' refers to field 'field1' which is not local to " "model 'Child'.", hint='This issue may be caused by multi-table inheritance.', obj=Child, id='models.E016', ), ]) def test_unique_constraint_pointing_to_fk(self): class Target(models.Model): pass class Model(models.Model): fk_1 = models.ForeignKey(Target, models.CASCADE, related_name='target_1') fk_2 = models.ForeignKey(Target, models.CASCADE, related_name='target_2') class Meta: constraints = [ models.UniqueConstraint(fields=['fk_1_id', 'fk_2'], name='name'), ] self.assertEqual(Model.check(databases=self.databases), []) def test_unique_constraint_with_include(self): class Model(models.Model): age = models.IntegerField() class Meta: constraints = [ models.UniqueConstraint( fields=['age'], name='unique_age_include_id', include=['id'], ), ] errors = Model.check(databases=self.databases) expected = [] if connection.features.supports_covering_indexes else [ Warning( '%s does not support unique constraints with non-key columns.' % connection.display_name, hint=( "A constraint won't be created. Silence this warning if " "you don't care about it." ), obj=Model, id='models.W039', ), ] self.assertEqual(errors, expected) def test_unique_constraint_with_include_required_db_features(self): class Model(models.Model): age = models.IntegerField() class Meta: required_db_features = {'supports_covering_indexes'} constraints = [ models.UniqueConstraint( fields=['age'], name='unique_age_include_id', include=['id'], ), ] self.assertEqual(Model.check(databases=self.databases), []) @skipUnlessDBFeature('supports_covering_indexes') def test_unique_constraint_include_pointing_to_missing_field(self): class Model(models.Model): class Meta: constraints = [ models.UniqueConstraint( fields=['id'], include=['missing_field'], name='name', ), ] self.assertEqual(Model.check(databases=self.databases), [ Error( "'constraints' refers to the nonexistent field " "'missing_field'.", obj=Model, id='models.E012', ), ]) @skipUnlessDBFeature('supports_covering_indexes') def test_unique_constraint_include_pointing_to_m2m_field(self): class Model(models.Model): m2m = models.ManyToManyField('self') class Meta: constraints = [ models.UniqueConstraint( fields=['id'], include=['m2m'], name='name', ), ] self.assertEqual(Model.check(databases=self.databases), [ Error( "'constraints' refers to a ManyToManyField 'm2m', but " "ManyToManyFields are not permitted in 'constraints'.", obj=Model, id='models.E013', ), ]) @skipUnlessDBFeature('supports_covering_indexes') def test_unique_constraint_include_pointing_to_non_local_field(self): class Parent(models.Model): field1 = models.IntegerField() class Child(Parent): field2 = models.IntegerField() class Meta: constraints = [ models.UniqueConstraint( fields=['field2'], include=['field1'], name='name', ), ] self.assertEqual(Child.check(databases=self.databases), [ Error( "'constraints' refers to field 'field1' which is not local to " "model 'Child'.", hint='This issue may be caused by multi-table inheritance.', obj=Child, id='models.E016', ), ]) @skipUnlessDBFeature('supports_covering_indexes') def test_unique_constraint_include_pointing_to_fk(self): class Target(models.Model): pass class Model(models.Model): fk_1 = models.ForeignKey(Target, models.CASCADE, related_name='target_1') fk_2 = models.ForeignKey(Target, models.CASCADE, related_name='target_2') class Meta: constraints = [ models.UniqueConstraint( fields=['id'], include=['fk_1_id', 'fk_2'], name='name', ), ] self.assertEqual(Model.check(databases=self.databases), [])
d72c6637a5654a2553513531201c58fdeda10ebc85fbe67555ff6773d2855cc1
import datetime import pickle import unittest import uuid from copy import deepcopy from unittest import mock from django.core.exceptions import FieldError from django.db import DatabaseError, NotSupportedError, connection from django.db.models import ( Avg, BooleanField, Case, CharField, Count, DateField, DateTimeField, DurationField, Exists, Expression, ExpressionList, ExpressionWrapper, F, Func, IntegerField, Max, Min, Model, OrderBy, OuterRef, Q, StdDev, Subquery, Sum, TimeField, UUIDField, Value, Variance, When, ) from django.db.models.expressions import Col, Combinable, Random, RawSQL, Ref from django.db.models.functions import ( Coalesce, Concat, Left, Length, Lower, Substr, Upper, ) from django.db.models.sql import constants from django.db.models.sql.datastructures import Join from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from django.test.utils import Approximate, isolate_apps from django.utils.functional import SimpleLazyObject from .models import ( UUID, UUIDPK, Company, Employee, Experiment, Manager, Number, RemoteEmployee, Result, SimulationRun, Time, ) class BasicExpressionsTests(TestCase): @classmethod def setUpTestData(cls): cls.example_inc = Company.objects.create( name="Example Inc.", num_employees=2300, num_chairs=5, ceo=Employee.objects.create(firstname="Joe", lastname="Smith", salary=10) ) cls.foobar_ltd = Company.objects.create( name="Foobar Ltd.", num_employees=3, num_chairs=4, based_in_eu=True, ceo=Employee.objects.create(firstname="Frank", lastname="Meyer", salary=20) ) cls.max = Employee.objects.create(firstname='Max', lastname='Mustermann', salary=30) cls.gmbh = Company.objects.create(name='Test GmbH', num_employees=32, num_chairs=1, ceo=cls.max) def setUp(self): self.company_query = Company.objects.values( "name", "num_employees", "num_chairs" ).order_by( "name", "num_employees", "num_chairs" ) def test_annotate_values_aggregate(self): companies = Company.objects.annotate( salaries=F('ceo__salary'), ).values('num_employees', 'salaries').aggregate( result=Sum( F('salaries') + F('num_employees'), output_field=IntegerField() ), ) self.assertEqual(companies['result'], 2395) def test_annotate_values_filter(self): companies = Company.objects.annotate( foo=RawSQL('%s', ['value']), ).filter(foo='value').order_by('name') self.assertQuerysetEqual( companies, ['<Company: Example Inc.>', '<Company: Foobar Ltd.>', '<Company: Test GmbH>'], ) def test_annotate_values_count(self): companies = Company.objects.annotate(foo=RawSQL('%s', ['value'])) self.assertEqual(companies.count(), 3) @skipUnlessDBFeature('supports_boolean_expr_in_select_clause') def test_filtering_on_annotate_that_uses_q(self): self.assertEqual( Company.objects.annotate( num_employees_check=ExpressionWrapper(Q(num_employees__gt=3), output_field=BooleanField()) ).filter(num_employees_check=True).count(), 2, ) def test_filtering_on_q_that_is_boolean(self): self.assertEqual( Company.objects.filter( ExpressionWrapper(Q(num_employees__gt=3), output_field=BooleanField()) ).count(), 2, ) def test_filtering_on_rawsql_that_is_boolean(self): self.assertEqual( Company.objects.filter( RawSQL('num_employees > %s', (3,), output_field=BooleanField()), ).count(), 2, ) def test_filter_inter_attribute(self): # We can filter on attribute relationships on same model obj, e.g. # find companies where the number of employees is greater # than the number of chairs. self.assertSequenceEqual( self.company_query.filter(num_employees__gt=F("num_chairs")), [ { "num_chairs": 5, "name": "Example Inc.", "num_employees": 2300, }, { "num_chairs": 1, "name": "Test GmbH", "num_employees": 32 }, ], ) def test_update(self): # We can set one field to have the value of another field # Make sure we have enough chairs self.company_query.update(num_chairs=F("num_employees")) self.assertSequenceEqual( self.company_query, [ { "num_chairs": 2300, "name": "Example Inc.", "num_employees": 2300 }, { "num_chairs": 3, "name": "Foobar Ltd.", "num_employees": 3 }, { "num_chairs": 32, "name": "Test GmbH", "num_employees": 32 } ], ) def test_arithmetic(self): # We can perform arithmetic operations in expressions # Make sure we have 2 spare chairs self.company_query.update(num_chairs=F("num_employees") + 2) self.assertSequenceEqual( self.company_query, [ { 'num_chairs': 2302, 'name': 'Example Inc.', 'num_employees': 2300 }, { 'num_chairs': 5, 'name': 'Foobar Ltd.', 'num_employees': 3 }, { 'num_chairs': 34, 'name': 'Test GmbH', 'num_employees': 32 } ], ) def test_order_of_operations(self): # Law of order of operations is followed self.company_query.update(num_chairs=F('num_employees') + 2 * F('num_employees')) self.assertSequenceEqual( self.company_query, [ { 'num_chairs': 6900, 'name': 'Example Inc.', 'num_employees': 2300 }, { 'num_chairs': 9, 'name': 'Foobar Ltd.', 'num_employees': 3 }, { 'num_chairs': 96, 'name': 'Test GmbH', 'num_employees': 32 } ], ) def test_parenthesis_priority(self): # Law of order of operations can be overridden by parentheses self.company_query.update(num_chairs=(F('num_employees') + 2) * F('num_employees')) self.assertSequenceEqual( self.company_query, [ { 'num_chairs': 5294600, 'name': 'Example Inc.', 'num_employees': 2300 }, { 'num_chairs': 15, 'name': 'Foobar Ltd.', 'num_employees': 3 }, { 'num_chairs': 1088, 'name': 'Test GmbH', 'num_employees': 32 } ], ) def test_update_with_fk(self): # ForeignKey can become updated with the value of another ForeignKey. self.assertEqual(Company.objects.update(point_of_contact=F('ceo')), 3) self.assertQuerysetEqual( Company.objects.all(), ['Joe Smith', 'Frank Meyer', 'Max Mustermann'], lambda c: str(c.point_of_contact), ordered=False ) def test_update_with_none(self): Number.objects.create(integer=1, float=1.0) Number.objects.create(integer=2) Number.objects.filter(float__isnull=False).update(float=Value(None)) self.assertQuerysetEqual( Number.objects.all(), [None, None], lambda n: n.float, ordered=False ) def test_filter_with_join(self): # F Expressions can also span joins Company.objects.update(point_of_contact=F('ceo')) c = Company.objects.first() c.point_of_contact = Employee.objects.create(firstname="Guido", lastname="van Rossum") c.save() self.assertQuerysetEqual( Company.objects.filter(ceo__firstname=F('point_of_contact__firstname')), ['Foobar Ltd.', 'Test GmbH'], lambda c: c.name, ordered=False ) Company.objects.exclude( ceo__firstname=F("point_of_contact__firstname") ).update(name="foo") self.assertEqual( Company.objects.exclude( ceo__firstname=F('point_of_contact__firstname') ).get().name, "foo", ) msg = "Joined field references are not permitted in this query" with self.assertRaisesMessage(FieldError, msg): Company.objects.exclude( ceo__firstname=F('point_of_contact__firstname') ).update(name=F('point_of_contact__lastname')) def test_object_update(self): # F expressions can be used to update attributes on single objects self.gmbh.num_employees = F('num_employees') + 4 self.gmbh.save() self.gmbh.refresh_from_db() self.assertEqual(self.gmbh.num_employees, 36) def test_new_object_save(self): # We should be able to use Funcs when inserting new data test_co = Company(name=Lower(Value('UPPER')), num_employees=32, num_chairs=1, ceo=self.max) test_co.save() test_co.refresh_from_db() self.assertEqual(test_co.name, "upper") def test_new_object_create(self): test_co = Company.objects.create(name=Lower(Value('UPPER')), num_employees=32, num_chairs=1, ceo=self.max) test_co.refresh_from_db() self.assertEqual(test_co.name, "upper") def test_object_create_with_aggregate(self): # Aggregates are not allowed when inserting new data msg = 'Aggregate functions are not allowed in this query (num_employees=Max(Value(1))).' with self.assertRaisesMessage(FieldError, msg): Company.objects.create( name='Company', num_employees=Max(Value(1)), num_chairs=1, ceo=Employee.objects.create(firstname="Just", lastname="Doit", salary=30), ) def test_object_update_fk(self): # F expressions cannot be used to update attributes which are foreign # keys, or attributes which involve joins. test_gmbh = Company.objects.get(pk=self.gmbh.pk) msg = 'F(ceo)": "Company.point_of_contact" must be a "Employee" instance.' with self.assertRaisesMessage(ValueError, msg): test_gmbh.point_of_contact = F('ceo') test_gmbh.point_of_contact = self.gmbh.ceo test_gmbh.save() test_gmbh.name = F('ceo__lastname') msg = 'Joined field references are not permitted in this query' with self.assertRaisesMessage(FieldError, msg): test_gmbh.save() def test_update_inherited_field_value(self): msg = 'Joined field references are not permitted in this query' with self.assertRaisesMessage(FieldError, msg): RemoteEmployee.objects.update(adjusted_salary=F('salary') * 5) def test_object_update_unsaved_objects(self): # F expressions cannot be used to update attributes on objects which do # not yet exist in the database acme = Company(name='The Acme Widget Co.', num_employees=12, num_chairs=5, ceo=self.max) acme.num_employees = F("num_employees") + 16 msg = ( 'Failed to insert expression "Col(expressions_company, ' 'expressions.Company.num_employees) + Value(16)" on ' 'expressions.Company.num_employees. F() expressions can only be ' 'used to update, not to insert.' ) with self.assertRaisesMessage(ValueError, msg): acme.save() acme.num_employees = 12 acme.name = Lower(F('name')) msg = ( 'Failed to insert expression "Lower(Col(expressions_company, ' 'expressions.Company.name))" on expressions.Company.name. F() ' 'expressions can only be used to update, not to insert.' ) with self.assertRaisesMessage(ValueError, msg): acme.save() def test_ticket_11722_iexact_lookup(self): Employee.objects.create(firstname="John", lastname="Doe") Employee.objects.create(firstname="Test", lastname="test") queryset = Employee.objects.filter(firstname__iexact=F('lastname')) self.assertQuerysetEqual(queryset, ["<Employee: Test test>"]) def test_ticket_16731_startswith_lookup(self): Employee.objects.create(firstname="John", lastname="Doe") e2 = Employee.objects.create(firstname="Jack", lastname="Jackson") e3 = Employee.objects.create(firstname="Jack", lastname="jackson") self.assertSequenceEqual( Employee.objects.filter(lastname__startswith=F('firstname')), [e2, e3] if connection.features.has_case_insensitive_like else [e2] ) qs = Employee.objects.filter(lastname__istartswith=F('firstname')).order_by('pk') self.assertSequenceEqual(qs, [e2, e3]) def test_ticket_18375_join_reuse(self): # Reverse multijoin F() references and the lookup target the same join. # Pre #18375 the F() join was generated first and the lookup couldn't # reuse that join. qs = Employee.objects.filter(company_ceo_set__num_chairs=F('company_ceo_set__num_employees')) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_ticket_18375_kwarg_ordering(self): # The next query was dict-randomization dependent - if the "gte=1" # was seen first, then the F() will reuse the join generated by the # gte lookup, if F() was seen first, then it generated a join the # other lookups could not reuse. qs = Employee.objects.filter( company_ceo_set__num_chairs=F('company_ceo_set__num_employees'), company_ceo_set__num_chairs__gte=1, ) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_ticket_18375_kwarg_ordering_2(self): # Another similar case for F() than above. Now we have the same join # in two filter kwargs, one in the lhs lookup, one in F. Here pre # #18375 the amount of joins generated was random if dict # randomization was enabled, that is the generated query dependent # on which clause was seen first. qs = Employee.objects.filter( company_ceo_set__num_employees=F('pk'), pk=F('company_ceo_set__num_employees') ) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_ticket_18375_chained_filters(self): # F() expressions do not reuse joins from previous filter. qs = Employee.objects.filter( company_ceo_set__num_employees=F('pk') ).filter( company_ceo_set__num_employees=F('company_ceo_set__num_employees') ) self.assertEqual(str(qs.query).count('JOIN'), 2) def test_order_by_exists(self): mary = Employee.objects.create(firstname='Mary', lastname='Mustermann', salary=20) mustermanns_by_seniority = Employee.objects.filter(lastname='Mustermann').order_by( # Order by whether the employee is the CEO of a company Exists(Company.objects.filter(ceo=OuterRef('pk'))).desc() ) self.assertSequenceEqual(mustermanns_by_seniority, [self.max, mary]) def test_order_by_multiline_sql(self): raw_order_by = ( RawSQL(''' CASE WHEN num_employees > 1000 THEN num_chairs ELSE 0 END ''', []).desc(), RawSQL(''' CASE WHEN num_chairs > 1 THEN 1 ELSE 0 END ''', []).asc() ) for qs in ( Company.objects.all(), Company.objects.distinct(), ): with self.subTest(qs=qs): self.assertSequenceEqual( qs.order_by(*raw_order_by), [self.example_inc, self.gmbh, self.foobar_ltd], ) def test_outerref(self): inner = Company.objects.filter(point_of_contact=OuterRef('pk')) msg = ( 'This queryset contains a reference to an outer query and may only ' 'be used in a subquery.' ) with self.assertRaisesMessage(ValueError, msg): inner.exists() outer = Employee.objects.annotate(is_point_of_contact=Exists(inner)) self.assertIs(outer.exists(), True) def test_exist_single_field_output_field(self): queryset = Company.objects.values('pk') self.assertIsInstance(Exists(queryset).output_field, BooleanField) def test_subquery(self): Company.objects.filter(name='Example Inc.').update( point_of_contact=Employee.objects.get(firstname='Joe', lastname='Smith'), ceo=self.max, ) Employee.objects.create(firstname='Bob', lastname='Brown', salary=40) qs = Employee.objects.annotate( is_point_of_contact=Exists(Company.objects.filter(point_of_contact=OuterRef('pk'))), is_not_point_of_contact=~Exists(Company.objects.filter(point_of_contact=OuterRef('pk'))), is_ceo_of_small_company=Exists(Company.objects.filter(num_employees__lt=200, ceo=OuterRef('pk'))), is_ceo_small_2=~~Exists(Company.objects.filter(num_employees__lt=200, ceo=OuterRef('pk'))), largest_company=Subquery(Company.objects.order_by('-num_employees').filter( Q(ceo=OuterRef('pk')) | Q(point_of_contact=OuterRef('pk')) ).values('name')[:1], output_field=CharField()) ).values( 'firstname', 'is_point_of_contact', 'is_not_point_of_contact', 'is_ceo_of_small_company', 'is_ceo_small_2', 'largest_company', ).order_by('firstname') results = list(qs) # Could use Coalesce(subq, Value('')) instead except for the bug in # cx_Oracle mentioned in #23843. bob = results[0] if bob['largest_company'] == '' and connection.features.interprets_empty_strings_as_nulls: bob['largest_company'] = None self.assertEqual(results, [ { 'firstname': 'Bob', 'is_point_of_contact': False, 'is_not_point_of_contact': True, 'is_ceo_of_small_company': False, 'is_ceo_small_2': False, 'largest_company': None, }, { 'firstname': 'Frank', 'is_point_of_contact': False, 'is_not_point_of_contact': True, 'is_ceo_of_small_company': True, 'is_ceo_small_2': True, 'largest_company': 'Foobar Ltd.', }, { 'firstname': 'Joe', 'is_point_of_contact': True, 'is_not_point_of_contact': False, 'is_ceo_of_small_company': False, 'is_ceo_small_2': False, 'largest_company': 'Example Inc.', }, { 'firstname': 'Max', 'is_point_of_contact': False, 'is_not_point_of_contact': True, 'is_ceo_of_small_company': True, 'is_ceo_small_2': True, 'largest_company': 'Example Inc.' } ]) # A less elegant way to write the same query: this uses a LEFT OUTER # JOIN and an IS NULL, inside a WHERE NOT IN which is probably less # efficient than EXISTS. self.assertCountEqual( qs.filter(is_point_of_contact=True).values('pk'), Employee.objects.exclude(company_point_of_contact_set=None).values('pk') ) def test_subquery_eq(self): qs = Employee.objects.annotate( is_ceo=Exists(Company.objects.filter(ceo=OuterRef('pk'))), is_point_of_contact=Exists( Company.objects.filter(point_of_contact=OuterRef('pk')), ), small_company=Exists( queryset=Company.objects.filter(num_employees__lt=200), ), ).filter(is_ceo=True, is_point_of_contact=False, small_company=True) self.assertNotEqual( qs.query.annotations['is_ceo'], qs.query.annotations['is_point_of_contact'], ) self.assertNotEqual( qs.query.annotations['is_ceo'], qs.query.annotations['small_company'], ) def test_in_subquery(self): # This is a contrived test (and you really wouldn't write this query), # but it is a succinct way to test the __in=Subquery() construct. small_companies = Company.objects.filter(num_employees__lt=200).values('pk') subquery_test = Company.objects.filter(pk__in=Subquery(small_companies)) self.assertCountEqual(subquery_test, [self.foobar_ltd, self.gmbh]) subquery_test2 = Company.objects.filter(pk=Subquery(small_companies.filter(num_employees=3))) self.assertCountEqual(subquery_test2, [self.foobar_ltd]) def test_uuid_pk_subquery(self): u = UUIDPK.objects.create() UUID.objects.create(uuid_fk=u) qs = UUIDPK.objects.filter(id__in=Subquery(UUID.objects.values('uuid_fk__id'))) self.assertCountEqual(qs, [u]) def test_nested_subquery(self): inner = Company.objects.filter(point_of_contact=OuterRef('pk')) outer = Employee.objects.annotate(is_point_of_contact=Exists(inner)) contrived = Employee.objects.annotate( is_point_of_contact=Subquery( outer.filter(pk=OuterRef('pk')).values('is_point_of_contact'), output_field=BooleanField(), ), ) self.assertCountEqual(contrived.values_list(), outer.values_list()) def test_nested_subquery_join_outer_ref(self): inner = Employee.objects.filter(pk=OuterRef('ceo__pk')).values('pk') qs = Employee.objects.annotate( ceo_company=Subquery( Company.objects.filter( ceo__in=inner, ceo__pk=OuterRef('pk'), ).values('pk'), ), ) self.assertSequenceEqual( qs.values_list('ceo_company', flat=True), [self.example_inc.pk, self.foobar_ltd.pk, self.gmbh.pk], ) def test_nested_subquery_outer_ref_2(self): first = Time.objects.create(time='09:00') second = Time.objects.create(time='17:00') third = Time.objects.create(time='21:00') SimulationRun.objects.bulk_create([ SimulationRun(start=first, end=second, midpoint='12:00'), SimulationRun(start=first, end=third, midpoint='15:00'), SimulationRun(start=second, end=first, midpoint='00:00'), ]) inner = Time.objects.filter(time=OuterRef(OuterRef('time')), pk=OuterRef('start')).values('time') middle = SimulationRun.objects.annotate(other=Subquery(inner)).values('other')[:1] outer = Time.objects.annotate(other=Subquery(middle, output_field=TimeField())) # This is a contrived example. It exercises the double OuterRef form. self.assertCountEqual(outer, [first, second, third]) def test_nested_subquery_outer_ref_with_autofield(self): first = Time.objects.create(time='09:00') second = Time.objects.create(time='17:00') SimulationRun.objects.create(start=first, end=second, midpoint='12:00') inner = SimulationRun.objects.filter(start=OuterRef(OuterRef('pk'))).values('start') middle = Time.objects.annotate(other=Subquery(inner)).values('other')[:1] outer = Time.objects.annotate(other=Subquery(middle, output_field=IntegerField())) # This exercises the double OuterRef form with AutoField as pk. self.assertCountEqual(outer, [first, second]) def test_annotations_within_subquery(self): Company.objects.filter(num_employees__lt=50).update(ceo=Employee.objects.get(firstname='Frank')) inner = Company.objects.filter( ceo=OuterRef('pk') ).values('ceo').annotate(total_employees=Sum('num_employees')).values('total_employees') outer = Employee.objects.annotate(total_employees=Subquery(inner)).filter(salary__lte=Subquery(inner)) self.assertSequenceEqual( outer.order_by('-total_employees').values('salary', 'total_employees'), [{'salary': 10, 'total_employees': 2300}, {'salary': 20, 'total_employees': 35}], ) def test_subquery_references_joined_table_twice(self): inner = Company.objects.filter( num_chairs__gte=OuterRef('ceo__salary'), num_employees__gte=OuterRef('point_of_contact__salary'), ) # Another contrived example (there is no need to have a subquery here) outer = Company.objects.filter(pk__in=Subquery(inner.values('pk'))) self.assertFalse(outer.exists()) def test_subquery_filter_by_aggregate(self): Number.objects.create(integer=1000, float=1.2) Employee.objects.create(salary=1000) qs = Number.objects.annotate( min_valuable_count=Subquery( Employee.objects.filter( salary=OuterRef('integer'), ).annotate(cnt=Count('salary')).filter(cnt__gt=0).values('cnt')[:1] ), ) self.assertEqual(qs.get().float, 1.2) def test_subquery_filter_by_lazy(self): self.max.manager = Manager.objects.create(name='Manager') self.max.save() max_manager = SimpleLazyObject( lambda: Manager.objects.get(pk=self.max.manager.pk) ) qs = Company.objects.annotate( ceo_manager=Subquery( Employee.objects.filter( lastname=OuterRef('ceo__lastname'), ).values('manager'), ), ).filter(ceo_manager=max_manager) self.assertEqual(qs.get(), self.gmbh) def test_aggregate_subquery_annotation(self): with self.assertNumQueries(1) as ctx: aggregate = Company.objects.annotate( ceo_salary=Subquery( Employee.objects.filter( id=OuterRef('ceo_id'), ).values('salary') ), ).aggregate( ceo_salary_gt_20=Count('pk', filter=Q(ceo_salary__gt=20)), ) self.assertEqual(aggregate, {'ceo_salary_gt_20': 1}) # Aggregation over a subquery annotation doesn't annotate the subquery # twice in the inner query. sql = ctx.captured_queries[0]['sql'] self.assertLessEqual(sql.count('SELECT'), 3) # GROUP BY isn't required to aggregate over a query that doesn't # contain nested aggregates. self.assertNotIn('GROUP BY', sql) def test_explicit_output_field(self): class FuncA(Func): output_field = CharField() class FuncB(Func): pass expr = FuncB(FuncA()) self.assertEqual(expr.output_field, FuncA.output_field) def test_outerref_mixed_case_table_name(self): inner = Result.objects.filter(result_time__gte=OuterRef('experiment__assigned')) outer = Result.objects.filter(pk__in=Subquery(inner.values('pk'))) self.assertFalse(outer.exists()) def test_outerref_with_operator(self): inner = Company.objects.filter(num_employees=OuterRef('ceo__salary') + 2) outer = Company.objects.filter(pk__in=Subquery(inner.values('pk'))) self.assertEqual(outer.get().name, 'Test GmbH') def test_nested_outerref_with_function(self): self.gmbh.point_of_contact = Employee.objects.get(lastname='Meyer') self.gmbh.save() inner = Employee.objects.filter( lastname__startswith=Left(OuterRef(OuterRef('lastname')), 1), ) qs = Employee.objects.annotate( ceo_company=Subquery( Company.objects.filter( point_of_contact__in=inner, ceo__pk=OuterRef('pk'), ).values('name'), ), ).filter(ceo_company__isnull=False) self.assertEqual(qs.get().ceo_company, 'Test GmbH') def test_annotation_with_outerref(self): gmbh_salary = Company.objects.annotate( max_ceo_salary_raise=Subquery( Company.objects.annotate( salary_raise=OuterRef('num_employees') + F('num_employees'), ).order_by('-salary_raise').values('salary_raise')[:1], output_field=IntegerField(), ), ).get(pk=self.gmbh.pk) self.assertEqual(gmbh_salary.max_ceo_salary_raise, 2332) def test_annotation_with_nested_outerref(self): self.gmbh.point_of_contact = Employee.objects.get(lastname='Meyer') self.gmbh.save() inner = Employee.objects.annotate( outer_lastname=OuterRef(OuterRef('lastname')), ).filter(lastname__startswith=Left('outer_lastname', 1)) qs = Employee.objects.annotate( ceo_company=Subquery( Company.objects.filter( point_of_contact__in=inner, ceo__pk=OuterRef('pk'), ).values('name'), ), ).filter(ceo_company__isnull=False) self.assertEqual(qs.get().ceo_company, 'Test GmbH') def test_pickle_expression(self): expr = Value(1, output_field=IntegerField()) expr.convert_value # populate cached property self.assertEqual(pickle.loads(pickle.dumps(expr)), expr) def test_incorrect_field_in_F_expression(self): with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."): list(Employee.objects.filter(firstname=F('nope'))) def test_incorrect_joined_field_in_F_expression(self): with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."): list(Company.objects.filter(ceo__pk=F('point_of_contact__nope'))) def test_exists_in_filter(self): inner = Company.objects.filter(ceo=OuterRef('pk')).values('pk') qs1 = Employee.objects.filter(Exists(inner)) qs2 = Employee.objects.annotate(found=Exists(inner)).filter(found=True) self.assertCountEqual(qs1, qs2) self.assertFalse(Employee.objects.exclude(Exists(inner)).exists()) self.assertCountEqual(qs2, Employee.objects.exclude(~Exists(inner))) def test_subquery_in_filter(self): inner = Company.objects.filter(ceo=OuterRef('pk')).values('based_in_eu') self.assertSequenceEqual( Employee.objects.filter(Subquery(inner)), [self.foobar_ltd.ceo], ) def test_subquery_group_by_outerref_in_filter(self): inner = Company.objects.annotate( employee=OuterRef('pk'), ).values('employee').annotate( min_num_chairs=Min('num_chairs'), ).values('ceo') self.assertIs(Employee.objects.filter(pk__in=Subquery(inner)).exists(), True) def test_case_in_filter_if_boolean_output_field(self): is_ceo = Company.objects.filter(ceo=OuterRef('pk')) is_poc = Company.objects.filter(point_of_contact=OuterRef('pk')) qs = Employee.objects.filter( Case( When(Exists(is_ceo), then=True), When(Exists(is_poc), then=True), default=False, output_field=BooleanField(), ), ) self.assertSequenceEqual(qs, [self.example_inc.ceo, self.foobar_ltd.ceo, self.max]) def test_boolean_expression_combined(self): is_ceo = Company.objects.filter(ceo=OuterRef('pk')) is_poc = Company.objects.filter(point_of_contact=OuterRef('pk')) self.gmbh.point_of_contact = self.max self.gmbh.save() self.assertSequenceEqual( Employee.objects.filter(Exists(is_ceo) | Exists(is_poc)), [self.example_inc.ceo, self.foobar_ltd.ceo, self.max], ) self.assertSequenceEqual( Employee.objects.filter(Exists(is_ceo) & Exists(is_poc)), [self.max], ) self.assertSequenceEqual( Employee.objects.filter(Exists(is_ceo) & Q(salary__gte=30)), [self.max], ) self.assertSequenceEqual( Employee.objects.filter(Exists(is_poc) | Q(salary__lt=15)), [self.example_inc.ceo, self.max], ) class IterableLookupInnerExpressionsTests(TestCase): @classmethod def setUpTestData(cls): ceo = Employee.objects.create(firstname='Just', lastname='Doit', salary=30) # MySQL requires that the values calculated for expressions don't pass # outside of the field's range, so it's inconvenient to use the values # in the more general tests. Company.objects.create(name='5020 Ltd', num_employees=50, num_chairs=20, ceo=ceo) Company.objects.create(name='5040 Ltd', num_employees=50, num_chairs=40, ceo=ceo) Company.objects.create(name='5050 Ltd', num_employees=50, num_chairs=50, ceo=ceo) Company.objects.create(name='5060 Ltd', num_employees=50, num_chairs=60, ceo=ceo) Company.objects.create(name='99300 Ltd', num_employees=99, num_chairs=300, ceo=ceo) def test_in_lookup_allows_F_expressions_and_expressions_for_integers(self): # __in lookups can use F() expressions for integers. queryset = Company.objects.filter(num_employees__in=([F('num_chairs') - 10])) self.assertQuerysetEqual(queryset, ['<Company: 5060 Ltd>'], ordered=False) self.assertQuerysetEqual( Company.objects.filter(num_employees__in=([F('num_chairs') - 10, F('num_chairs') + 10])), ['<Company: 5040 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter( num_employees__in=([F('num_chairs') - 10, F('num_chairs'), F('num_chairs') + 10]) ), ['<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) def test_expressions_in_lookups_join_choice(self): midpoint = datetime.time(13, 0) t1 = Time.objects.create(time=datetime.time(12, 0)) t2 = Time.objects.create(time=datetime.time(14, 0)) SimulationRun.objects.create(start=t1, end=t2, midpoint=midpoint) SimulationRun.objects.create(start=t1, end=None, midpoint=midpoint) SimulationRun.objects.create(start=None, end=t2, midpoint=midpoint) SimulationRun.objects.create(start=None, end=None, midpoint=midpoint) queryset = SimulationRun.objects.filter(midpoint__range=[F('start__time'), F('end__time')]) self.assertQuerysetEqual( queryset, ['<SimulationRun: 13:00:00 (12:00:00 to 14:00:00)>'], ordered=False ) for alias in queryset.query.alias_map.values(): if isinstance(alias, Join): self.assertEqual(alias.join_type, constants.INNER) queryset = SimulationRun.objects.exclude(midpoint__range=[F('start__time'), F('end__time')]) self.assertQuerysetEqual(queryset, [], ordered=False) for alias in queryset.query.alias_map.values(): if isinstance(alias, Join): self.assertEqual(alias.join_type, constants.LOUTER) def test_range_lookup_allows_F_expressions_and_expressions_for_integers(self): # Range lookups can use F() expressions for integers. Company.objects.filter(num_employees__exact=F("num_chairs")) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(F('num_chairs'), 100)), ['<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(F('num_chairs') - 10, F('num_chairs') + 10)), ['<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(F('num_chairs') - 10, 100)), ['<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(1, 100)), [ '<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>', '<Company: 99300 Ltd>', ], ordered=False ) @unittest.skipUnless(connection.vendor == 'sqlite', "This defensive test only works on databases that don't validate parameter types") def test_complex_expressions_do_not_introduce_sql_injection_via_untrusted_string_inclusion(self): """ This tests that SQL injection isn't possible using compilation of expressions in iterable filters, as their compilation happens before the main query compilation. It's limited to SQLite, as PostgreSQL, Oracle and other vendors have defense in depth against this by type checking. Testing against SQLite (the most permissive of the built-in databases) demonstrates that the problem doesn't exist while keeping the test simple. """ queryset = Company.objects.filter(name__in=[F('num_chairs') + '1)) OR ((1==1']) self.assertQuerysetEqual(queryset, [], ordered=False) def test_in_lookup_allows_F_expressions_and_expressions_for_datetimes(self): start = datetime.datetime(2016, 2, 3, 15, 0, 0) end = datetime.datetime(2016, 2, 5, 15, 0, 0) experiment_1 = Experiment.objects.create( name='Integrity testing', assigned=start.date(), start=start, end=end, completed=end.date(), estimated_time=end - start, ) experiment_2 = Experiment.objects.create( name='Taste testing', assigned=start.date(), start=start, end=end, completed=end.date(), estimated_time=end - start, ) Result.objects.create( experiment=experiment_1, result_time=datetime.datetime(2016, 2, 4, 15, 0, 0), ) Result.objects.create( experiment=experiment_1, result_time=datetime.datetime(2016, 3, 10, 2, 0, 0), ) Result.objects.create( experiment=experiment_2, result_time=datetime.datetime(2016, 1, 8, 5, 0, 0), ) within_experiment_time = [F('experiment__start'), F('experiment__end')] queryset = Result.objects.filter(result_time__range=within_experiment_time) self.assertQuerysetEqual(queryset, ["<Result: Result at 2016-02-04 15:00:00>"]) within_experiment_time = [F('experiment__start'), F('experiment__end')] queryset = Result.objects.filter(result_time__range=within_experiment_time) self.assertQuerysetEqual(queryset, ["<Result: Result at 2016-02-04 15:00:00>"]) class FTests(SimpleTestCase): def test_deepcopy(self): f = F("foo") g = deepcopy(f) self.assertEqual(f.name, g.name) def test_deconstruct(self): f = F('name') path, args, kwargs = f.deconstruct() self.assertEqual(path, 'django.db.models.expressions.F') self.assertEqual(args, (f.name,)) self.assertEqual(kwargs, {}) def test_equal(self): f = F('name') same_f = F('name') other_f = F('username') self.assertEqual(f, same_f) self.assertNotEqual(f, other_f) def test_hash(self): d = {F('name'): 'Bob'} self.assertIn(F('name'), d) self.assertEqual(d[F('name')], 'Bob') def test_not_equal_Value(self): f = F('name') value = Value('name') self.assertNotEqual(f, value) self.assertNotEqual(value, f) class ExpressionsTests(TestCase): def test_F_reuse(self): f = F('id') n = Number.objects.create(integer=-1) c = Company.objects.create( name="Example Inc.", num_employees=2300, num_chairs=5, ceo=Employee.objects.create(firstname="Joe", lastname="Smith") ) c_qs = Company.objects.filter(id=f) self.assertEqual(c_qs.get(), c) # Reuse the same F-object for another queryset n_qs = Number.objects.filter(id=f) self.assertEqual(n_qs.get(), n) # The original query still works correctly self.assertEqual(c_qs.get(), c) def test_patterns_escape(self): r""" Special characters (e.g. %, _ and \) stored in database are properly escaped when using a pattern lookup with an expression refs #16731 """ Employee.objects.bulk_create([ Employee(firstname="%Joh\\nny", lastname="%Joh\\n"), Employee(firstname="Johnny", lastname="%John"), Employee(firstname="Jean-Claude", lastname="Claud_"), Employee(firstname="Jean-Claude", lastname="Claude"), Employee(firstname="Jean-Claude", lastname="Claude%"), Employee(firstname="Johnny", lastname="Joh\\n"), Employee(firstname="Johnny", lastname="John"), Employee(firstname="Johnny", lastname="_ohn"), ]) self.assertQuerysetEqual( Employee.objects.filter(firstname__contains=F('lastname')), ["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Jean-Claude Claude>", "<Employee: Johnny John>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__startswith=F('lastname')), ["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Johnny John>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__endswith=F('lastname')), ["<Employee: Jean-Claude Claude>"], ordered=False, ) def test_insensitive_patterns_escape(self): r""" Special characters (e.g. %, _ and \) stored in database are properly escaped when using a case insensitive pattern lookup with an expression -- refs #16731 """ Employee.objects.bulk_create([ Employee(firstname="%Joh\\nny", lastname="%joh\\n"), Employee(firstname="Johnny", lastname="%john"), Employee(firstname="Jean-Claude", lastname="claud_"), Employee(firstname="Jean-Claude", lastname="claude"), Employee(firstname="Jean-Claude", lastname="claude%"), Employee(firstname="Johnny", lastname="joh\\n"), Employee(firstname="Johnny", lastname="john"), Employee(firstname="Johnny", lastname="_ohn"), ]) self.assertQuerysetEqual( Employee.objects.filter(firstname__icontains=F('lastname')), ["<Employee: %Joh\\nny %joh\\n>", "<Employee: Jean-Claude claude>", "<Employee: Johnny john>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__istartswith=F('lastname')), ["<Employee: %Joh\\nny %joh\\n>", "<Employee: Johnny john>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__iendswith=F('lastname')), ["<Employee: Jean-Claude claude>"], ordered=False, ) @isolate_apps('expressions') class SimpleExpressionTests(SimpleTestCase): def test_equal(self): self.assertEqual(Expression(), Expression()) self.assertEqual( Expression(IntegerField()), Expression(output_field=IntegerField()) ) self.assertEqual(Expression(IntegerField()), mock.ANY) self.assertNotEqual( Expression(IntegerField()), Expression(CharField()) ) class TestModel(Model): field = IntegerField() other_field = IntegerField() self.assertNotEqual( Expression(TestModel._meta.get_field('field')), Expression(TestModel._meta.get_field('other_field')), ) def test_hash(self): self.assertEqual(hash(Expression()), hash(Expression())) self.assertEqual( hash(Expression(IntegerField())), hash(Expression(output_field=IntegerField())) ) self.assertNotEqual( hash(Expression(IntegerField())), hash(Expression(CharField())), ) class TestModel(Model): field = IntegerField() other_field = IntegerField() self.assertNotEqual( hash(Expression(TestModel._meta.get_field('field'))), hash(Expression(TestModel._meta.get_field('other_field'))), ) class ExpressionsNumericTests(TestCase): @classmethod def setUpTestData(cls): Number(integer=-1).save() Number(integer=42).save() Number(integer=1337).save() Number.objects.update(float=F('integer')) def test_fill_with_value_from_same_object(self): """ We can fill a value in all objects with an other value of the same object. """ self.assertQuerysetEqual( Number.objects.all(), ['<Number: -1, -1.000>', '<Number: 42, 42.000>', '<Number: 1337, 1337.000>'], ordered=False ) def test_increment_value(self): """ We can increment a value of all objects in a query set. """ self.assertEqual(Number.objects.filter(integer__gt=0).update(integer=F('integer') + 1), 2) self.assertQuerysetEqual( Number.objects.all(), ['<Number: -1, -1.000>', '<Number: 43, 42.000>', '<Number: 1338, 1337.000>'], ordered=False ) def test_filter_not_equals_other_field(self): """ We can filter for objects, where a value is not equals the value of an other field. """ self.assertEqual(Number.objects.filter(integer__gt=0).update(integer=F('integer') + 1), 2) self.assertQuerysetEqual( Number.objects.exclude(float=F('integer')), ['<Number: 43, 42.000>', '<Number: 1338, 1337.000>'], ordered=False ) def test_complex_expressions(self): """ Complex expressions of different connection types are possible. """ n = Number.objects.create(integer=10, float=123.45) self.assertEqual(Number.objects.filter(pk=n.pk).update( float=F('integer') + F('float') * 2), 1) self.assertEqual(Number.objects.get(pk=n.pk).integer, 10) self.assertEqual(Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3)) class ExpressionOperatorTests(TestCase): @classmethod def setUpTestData(cls): cls.n = Number.objects.create(integer=42, float=15.5) cls.n1 = Number.objects.create(integer=-42, float=-15.5) def test_lefthand_addition(self): # LH Addition of floats and integers Number.objects.filter(pk=self.n.pk).update( integer=F('integer') + 15, float=F('float') + 42.7 ) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3)) def test_lefthand_subtraction(self): # LH Subtraction of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') - 15, float=F('float') - 42.7) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3)) def test_lefthand_multiplication(self): # Multiplication of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') * 15, float=F('float') * 42.7) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3)) def test_lefthand_division(self): # LH Division of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') / 2, float=F('float') / 42.7) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3)) def test_lefthand_modulo(self): # LH Modulo arithmetic on integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') % 20) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2) def test_lefthand_bitwise_and(self): # LH Bitwise ands on integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitand(56)) Number.objects.filter(pk=self.n1.pk).update(integer=F('integer').bitand(-56)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -64) def test_lefthand_bitwise_left_shift_operator(self): Number.objects.update(integer=F('integer').bitleftshift(2)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 168) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -168) def test_lefthand_bitwise_right_shift_operator(self): Number.objects.update(integer=F('integer').bitrightshift(2)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 10) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -11) def test_lefthand_bitwise_or(self): # LH Bitwise or on integers Number.objects.update(integer=F('integer').bitor(48)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -10) def test_lefthand_power(self): # LH Power arithmetic operation on floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') ** 2, float=F('float') ** 1.5) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 1764) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(61.02, places=2)) @unittest.skipIf(connection.vendor == 'oracle', "Oracle doesn't support bitwise XOR.") def test_lefthand_bitwise_xor(self): Number.objects.update(integer=F('integer').bitxor(48)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 26) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -26) @unittest.skipIf(connection.vendor == 'oracle', "Oracle doesn't support bitwise XOR.") def test_lefthand_bitwise_xor_null(self): employee = Employee.objects.create(firstname='John', lastname='Doe') Employee.objects.update(salary=F('salary').bitxor(48)) employee.refresh_from_db() self.assertIsNone(employee.salary) @unittest.skipUnless(connection.vendor == 'oracle', "Oracle doesn't support bitwise XOR.") def test_lefthand_bitwise_xor_not_supported(self): msg = 'Bitwise XOR is not supported in Oracle.' with self.assertRaisesMessage(NotSupportedError, msg): Number.objects.update(integer=F('integer').bitxor(48)) def test_right_hand_addition(self): # Right hand operators Number.objects.filter(pk=self.n.pk).update(integer=15 + F('integer'), float=42.7 + F('float')) # RH Addition of floats and integers self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3)) def test_right_hand_subtraction(self): Number.objects.filter(pk=self.n.pk).update(integer=15 - F('integer'), float=42.7 - F('float')) # RH Subtraction of floats and integers self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3)) def test_right_hand_multiplication(self): # RH Multiplication of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=15 * F('integer'), float=42.7 * F('float')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3)) def test_right_hand_division(self): # RH Division of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=640 / F('integer'), float=42.7 / F('float')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3)) def test_right_hand_modulo(self): # RH Modulo arithmetic on integers Number.objects.filter(pk=self.n.pk).update(integer=69 % F('integer')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27) def test_righthand_power(self): # RH Power arithmetic operation on floats and integers Number.objects.filter(pk=self.n.pk).update(integer=2 ** F('integer'), float=1.5 ** F('float')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 4398046511104) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(536.308, places=3)) class FTimeDeltaTests(TestCase): @classmethod def setUpTestData(cls): cls.sday = sday = datetime.date(2010, 6, 25) cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000) midnight = datetime.time(0) delta0 = datetime.timedelta(0) delta1 = datetime.timedelta(microseconds=253000) delta2 = datetime.timedelta(seconds=44) delta3 = datetime.timedelta(hours=21, minutes=8) delta4 = datetime.timedelta(days=10) delta5 = datetime.timedelta(days=90) # Test data is set so that deltas and delays will be # strictly increasing. cls.deltas = [] cls.delays = [] cls.days_long = [] # e0: started same day as assigned, zero duration end = stime + delta0 cls.e0 = Experiment.objects.create( name='e0', assigned=sday, start=stime, end=end, completed=end.date(), estimated_time=delta0, ) cls.deltas.append(delta0) cls.delays.append(cls.e0.start - datetime.datetime.combine(cls.e0.assigned, midnight)) cls.days_long.append(cls.e0.completed - cls.e0.assigned) # e1: started one day after assigned, tiny duration, data # set so that end time has no fractional seconds, which # tests an edge case on sqlite. delay = datetime.timedelta(1) end = stime + delay + delta1 e1 = Experiment.objects.create( name='e1', assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta1, ) cls.deltas.append(delta1) cls.delays.append(e1.start - datetime.datetime.combine(e1.assigned, midnight)) cls.days_long.append(e1.completed - e1.assigned) # e2: started three days after assigned, small duration end = stime + delta2 e2 = Experiment.objects.create( name='e2', assigned=sday - datetime.timedelta(3), start=stime, end=end, completed=end.date(), estimated_time=datetime.timedelta(hours=1), ) cls.deltas.append(delta2) cls.delays.append(e2.start - datetime.datetime.combine(e2.assigned, midnight)) cls.days_long.append(e2.completed - e2.assigned) # e3: started four days after assigned, medium duration delay = datetime.timedelta(4) end = stime + delay + delta3 e3 = Experiment.objects.create( name='e3', assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta3, ) cls.deltas.append(delta3) cls.delays.append(e3.start - datetime.datetime.combine(e3.assigned, midnight)) cls.days_long.append(e3.completed - e3.assigned) # e4: started 10 days after assignment, long duration end = stime + delta4 e4 = Experiment.objects.create( name='e4', assigned=sday - datetime.timedelta(10), start=stime, end=end, completed=end.date(), estimated_time=delta4 - datetime.timedelta(1), ) cls.deltas.append(delta4) cls.delays.append(e4.start - datetime.datetime.combine(e4.assigned, midnight)) cls.days_long.append(e4.completed - e4.assigned) # e5: started a month after assignment, very long duration delay = datetime.timedelta(30) end = stime + delay + delta5 e5 = Experiment.objects.create( name='e5', assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta5, ) cls.deltas.append(delta5) cls.delays.append(e5.start - datetime.datetime.combine(e5.assigned, midnight)) cls.days_long.append(e5.completed - e5.assigned) cls.expnames = [e.name for e in Experiment.objects.all()] def test_multiple_query_compilation(self): # Ticket #21643 queryset = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1)) q1 = str(queryset.query) q2 = str(queryset.query) self.assertEqual(q1, q2) def test_query_clone(self): # Ticket #21643 - Crash when compiling query more than once qs = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1)) qs2 = qs.all() list(qs) list(qs2) # Intentionally no assert def test_delta_add(self): for i, delta in enumerate(self.deltas): test_set = [e.name for e in Experiment.objects.filter(end__lt=F('start') + delta)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(end__lt=delta + F('start'))] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(end__lte=F('start') + delta)] self.assertEqual(test_set, self.expnames[:i + 1]) def test_delta_subtract(self): for i, delta in enumerate(self.deltas): test_set = [e.name for e in Experiment.objects.filter(start__gt=F('end') - delta)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(start__gte=F('end') - delta)] self.assertEqual(test_set, self.expnames[:i + 1]) def test_exclude(self): for i, delta in enumerate(self.deltas): test_set = [e.name for e in Experiment.objects.exclude(end__lt=F('start') + delta)] self.assertEqual(test_set, self.expnames[i:]) test_set = [e.name for e in Experiment.objects.exclude(end__lte=F('start') + delta)] self.assertEqual(test_set, self.expnames[i + 1:]) def test_date_comparison(self): for i, days in enumerate(self.days_long): test_set = [e.name for e in Experiment.objects.filter(completed__lt=F('assigned') + days)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(completed__lte=F('assigned') + days)] self.assertEqual(test_set, self.expnames[:i + 1]) @skipUnlessDBFeature("supports_mixed_date_datetime_comparisons") def test_mixed_comparisons1(self): for i, delay in enumerate(self.delays): test_set = [e.name for e in Experiment.objects.filter(assigned__gt=F('start') - delay)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(assigned__gte=F('start') - delay)] self.assertEqual(test_set, self.expnames[:i + 1]) def test_mixed_comparisons2(self): for i, delay in enumerate(self.delays): delay = datetime.timedelta(delay.days) test_set = [e.name for e in Experiment.objects.filter(start__lt=F('assigned') + delay)] self.assertEqual(test_set, self.expnames[:i]) test_set = [ e.name for e in Experiment.objects.filter(start__lte=F('assigned') + delay + datetime.timedelta(1)) ] self.assertEqual(test_set, self.expnames[:i + 1]) def test_delta_update(self): for delta in self.deltas: exps = Experiment.objects.all() expected_durations = [e.duration() for e in exps] expected_starts = [e.start + delta for e in exps] expected_ends = [e.end + delta for e in exps] Experiment.objects.update(start=F('start') + delta, end=F('end') + delta) exps = Experiment.objects.all() new_starts = [e.start for e in exps] new_ends = [e.end for e in exps] new_durations = [e.duration() for e in exps] self.assertEqual(expected_starts, new_starts) self.assertEqual(expected_ends, new_ends) self.assertEqual(expected_durations, new_durations) def test_invalid_operator(self): with self.assertRaises(DatabaseError): list(Experiment.objects.filter(start=F('start') * datetime.timedelta(0))) def test_durationfield_add(self): zeros = [e.name for e in Experiment.objects.filter(start=F('start') + F('estimated_time'))] self.assertEqual(zeros, ['e0']) end_less = [e.name for e in Experiment.objects.filter(end__lt=F('start') + F('estimated_time'))] self.assertEqual(end_less, ['e2']) delta_math = [ e.name for e in Experiment.objects.filter(end__gte=F('start') + F('estimated_time') + datetime.timedelta(hours=1)) ] self.assertEqual(delta_math, ['e4']) queryset = Experiment.objects.annotate(shifted=ExpressionWrapper( F('start') + Value(None, output_field=DurationField()), output_field=DateTimeField(), )) self.assertIsNone(queryset.first().shifted) def test_duration_expressions(self): for delta in self.deltas: qs = Experiment.objects.annotate(duration=F('estimated_time') + delta) for obj in qs: self.assertEqual(obj.duration, obj.estimated_time + delta) @skipUnlessDBFeature('supports_temporal_subtraction') def test_date_subtraction(self): queryset = Experiment.objects.annotate( completion_duration=F('completed') - F('assigned'), ) at_least_5_days = {e.name for e in queryset.filter(completion_duration__gte=datetime.timedelta(days=5))} self.assertEqual(at_least_5_days, {'e3', 'e4', 'e5'}) at_least_120_days = {e.name for e in queryset.filter(completion_duration__gte=datetime.timedelta(days=120))} self.assertEqual(at_least_120_days, {'e5'}) less_than_5_days = {e.name for e in queryset.filter(completion_duration__lt=datetime.timedelta(days=5))} self.assertEqual(less_than_5_days, {'e0', 'e1', 'e2'}) queryset = Experiment.objects.annotate( difference=F('completed') - Value(None, output_field=DateField()), ) self.assertIsNone(queryset.first().difference) queryset = Experiment.objects.annotate(shifted=ExpressionWrapper( F('completed') - Value(None, output_field=DurationField()), output_field=DateField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_date_subquery_subtraction(self): subquery = Experiment.objects.filter(pk=OuterRef('pk')).values('completed') queryset = Experiment.objects.annotate( difference=subquery - F('completed'), ).filter(difference=datetime.timedelta()) self.assertTrue(queryset.exists()) @skipUnlessDBFeature('supports_temporal_subtraction') def test_date_case_subtraction(self): queryset = Experiment.objects.annotate( date_case=Case( When(Q(name='e0'), then=F('completed')), output_field=DateField(), ), completed_value=Value( self.e0.completed, output_field=DateField(), ), difference=F('date_case') - F('completed_value'), ).filter(difference=datetime.timedelta()) self.assertEqual(queryset.get(), self.e0) @skipUnlessDBFeature('supports_temporal_subtraction') def test_time_subtraction(self): Time.objects.create(time=datetime.time(12, 30, 15, 2345)) queryset = Time.objects.annotate( difference=F('time') - Value(datetime.time(11, 15, 0), output_field=TimeField()), ) self.assertEqual( queryset.get().difference, datetime.timedelta(hours=1, minutes=15, seconds=15, microseconds=2345) ) queryset = Time.objects.annotate( difference=F('time') - Value(None, output_field=TimeField()), ) self.assertIsNone(queryset.first().difference) queryset = Time.objects.annotate(shifted=ExpressionWrapper( F('time') - Value(None, output_field=DurationField()), output_field=TimeField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_time_subquery_subtraction(self): Time.objects.create(time=datetime.time(12, 30, 15, 2345)) subquery = Time.objects.filter(pk=OuterRef('pk')).values('time') queryset = Time.objects.annotate( difference=subquery - F('time'), ).filter(difference=datetime.timedelta()) self.assertTrue(queryset.exists()) @skipUnlessDBFeature('supports_temporal_subtraction') def test_datetime_subtraction(self): under_estimate = [ e.name for e in Experiment.objects.filter(estimated_time__gt=F('end') - F('start')) ] self.assertEqual(under_estimate, ['e2']) over_estimate = [ e.name for e in Experiment.objects.filter(estimated_time__lt=F('end') - F('start')) ] self.assertEqual(over_estimate, ['e4']) queryset = Experiment.objects.annotate( difference=F('start') - Value(None, output_field=DateTimeField()), ) self.assertIsNone(queryset.first().difference) queryset = Experiment.objects.annotate(shifted=ExpressionWrapper( F('start') - Value(None, output_field=DurationField()), output_field=DateTimeField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_datetime_subquery_subtraction(self): subquery = Experiment.objects.filter(pk=OuterRef('pk')).values('start') queryset = Experiment.objects.annotate( difference=subquery - F('start'), ).filter(difference=datetime.timedelta()) self.assertTrue(queryset.exists()) @skipUnlessDBFeature('supports_temporal_subtraction') def test_datetime_subtraction_microseconds(self): delta = datetime.timedelta(microseconds=8999999999999999) Experiment.objects.update(end=F('start') + delta) qs = Experiment.objects.annotate(delta=F('end') - F('start')) for e in qs: self.assertEqual(e.delta, delta) def test_duration_with_datetime(self): # Exclude e1 which has very high precision so we can test this on all # backends regardless of whether or not it supports # microsecond_precision. over_estimate = Experiment.objects.exclude(name='e1').filter( completed__gt=self.stime + F('estimated_time'), ).order_by('name') self.assertQuerysetEqual(over_estimate, ['e3', 'e4', 'e5'], lambda e: e.name) def test_duration_with_datetime_microseconds(self): delta = datetime.timedelta(microseconds=8999999999999999) qs = Experiment.objects.annotate(dt=ExpressionWrapper( F('start') + delta, output_field=DateTimeField(), )) for e in qs: self.assertEqual(e.dt, e.start + delta) def test_date_minus_duration(self): more_than_4_days = Experiment.objects.filter( assigned__lt=F('completed') - Value(datetime.timedelta(days=4), output_field=DurationField()) ) self.assertQuerysetEqual(more_than_4_days, ['e3', 'e4', 'e5'], lambda e: e.name) def test_negative_timedelta_update(self): # subtract 30 seconds, 30 minutes, 2 hours and 2 days experiments = Experiment.objects.filter(name='e0').annotate( start_sub_seconds=F('start') + datetime.timedelta(seconds=-30), ).annotate( start_sub_minutes=F('start_sub_seconds') + datetime.timedelta(minutes=-30), ).annotate( start_sub_hours=F('start_sub_minutes') + datetime.timedelta(hours=-2), ).annotate( new_start=F('start_sub_hours') + datetime.timedelta(days=-2), ) expected_start = datetime.datetime(2010, 6, 23, 9, 45, 0) # subtract 30 microseconds experiments = experiments.annotate(new_start=F('new_start') + datetime.timedelta(microseconds=-30)) expected_start += datetime.timedelta(microseconds=+746970) experiments.update(start=F('new_start')) e0 = Experiment.objects.get(name='e0') self.assertEqual(e0.start, expected_start) class ValueTests(TestCase): def test_update_TimeField_using_Value(self): Time.objects.create() Time.objects.update(time=Value(datetime.time(1), output_field=TimeField())) self.assertEqual(Time.objects.get().time, datetime.time(1)) def test_update_UUIDField_using_Value(self): UUID.objects.create() UUID.objects.update(uuid=Value(uuid.UUID('12345678901234567890123456789012'), output_field=UUIDField())) self.assertEqual(UUID.objects.get().uuid, uuid.UUID('12345678901234567890123456789012')) def test_deconstruct(self): value = Value('name') path, args, kwargs = value.deconstruct() self.assertEqual(path, 'django.db.models.expressions.Value') self.assertEqual(args, (value.value,)) self.assertEqual(kwargs, {}) def test_deconstruct_output_field(self): value = Value('name', output_field=CharField()) path, args, kwargs = value.deconstruct() self.assertEqual(path, 'django.db.models.expressions.Value') self.assertEqual(args, (value.value,)) self.assertEqual(len(kwargs), 1) self.assertEqual(kwargs['output_field'].deconstruct(), CharField().deconstruct()) def test_equal(self): value = Value('name') self.assertEqual(value, Value('name')) self.assertNotEqual(value, Value('username')) def test_hash(self): d = {Value('name'): 'Bob'} self.assertIn(Value('name'), d) self.assertEqual(d[Value('name')], 'Bob') def test_equal_output_field(self): value = Value('name', output_field=CharField()) same_value = Value('name', output_field=CharField()) other_value = Value('name', output_field=TimeField()) no_output_field = Value('name') self.assertEqual(value, same_value) self.assertNotEqual(value, other_value) self.assertNotEqual(value, no_output_field) def test_raise_empty_expressionlist(self): msg = 'ExpressionList requires at least one expression' with self.assertRaisesMessage(ValueError, msg): ExpressionList() class FieldTransformTests(TestCase): @classmethod def setUpTestData(cls): cls.sday = sday = datetime.date(2010, 6, 25) cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000) cls.ex1 = Experiment.objects.create( name='Experiment 1', assigned=sday, completed=sday + datetime.timedelta(2), estimated_time=datetime.timedelta(2), start=stime, end=stime + datetime.timedelta(2), ) def test_month_aggregation(self): self.assertEqual( Experiment.objects.aggregate(month_count=Count('assigned__month')), {'month_count': 1} ) def test_transform_in_values(self): self.assertQuerysetEqual( Experiment.objects.values('assigned__month'), ["{'assigned__month': 6}"] ) def test_multiple_transforms_in_values(self): self.assertQuerysetEqual( Experiment.objects.values('end__date__month'), ["{'end__date__month': 6}"] ) class ReprTests(SimpleTestCase): def test_expressions(self): self.assertEqual( repr(Case(When(a=1))), "<Case: CASE WHEN <Q: (AND: ('a', 1))> THEN Value(None), ELSE Value(None)>" ) self.assertEqual( repr(When(Q(age__gte=18), then=Value('legal'))), "<When: WHEN <Q: (AND: ('age__gte', 18))> THEN Value(legal)>" ) self.assertEqual(repr(Col('alias', 'field')), "Col(alias, field)") self.assertEqual(repr(F('published')), "F(published)") self.assertEqual(repr(F('cost') + F('tax')), "<CombinedExpression: F(cost) + F(tax)>") self.assertEqual( repr(ExpressionWrapper(F('cost') + F('tax'), IntegerField())), "ExpressionWrapper(F(cost) + F(tax))" ) self.assertEqual(repr(Func('published', function='TO_CHAR')), "Func(F(published), function=TO_CHAR)") self.assertEqual(repr(OrderBy(Value(1))), 'OrderBy(Value(1), descending=False)') self.assertEqual(repr(Random()), "Random()") self.assertEqual(repr(RawSQL('table.col', [])), "RawSQL(table.col, [])") self.assertEqual(repr(Ref('sum_cost', Sum('cost'))), "Ref(sum_cost, Sum(F(cost)))") self.assertEqual(repr(Value(1)), "Value(1)") self.assertEqual( repr(ExpressionList(F('col'), F('anothercol'))), 'ExpressionList(F(col), F(anothercol))' ) self.assertEqual( repr(ExpressionList(OrderBy(F('col'), descending=False))), 'ExpressionList(OrderBy(F(col), descending=False))' ) def test_functions(self): self.assertEqual(repr(Coalesce('a', 'b')), "Coalesce(F(a), F(b))") self.assertEqual(repr(Concat('a', 'b')), "Concat(ConcatPair(F(a), F(b)))") self.assertEqual(repr(Length('a')), "Length(F(a))") self.assertEqual(repr(Lower('a')), "Lower(F(a))") self.assertEqual(repr(Substr('a', 1, 3)), "Substr(F(a), Value(1), Value(3))") self.assertEqual(repr(Upper('a')), "Upper(F(a))") def test_aggregates(self): self.assertEqual(repr(Avg('a')), "Avg(F(a))") self.assertEqual(repr(Count('a')), "Count(F(a))") self.assertEqual(repr(Count('*')), "Count('*')") self.assertEqual(repr(Max('a')), "Max(F(a))") self.assertEqual(repr(Min('a')), "Min(F(a))") self.assertEqual(repr(StdDev('a')), "StdDev(F(a), sample=False)") self.assertEqual(repr(Sum('a')), "Sum(F(a))") self.assertEqual(repr(Variance('a', sample=True)), "Variance(F(a), sample=True)") def test_distinct_aggregates(self): self.assertEqual(repr(Count('a', distinct=True)), "Count(F(a), distinct=True)") self.assertEqual(repr(Count('*', distinct=True)), "Count('*', distinct=True)") def test_filtered_aggregates(self): filter = Q(a=1) self.assertEqual(repr(Avg('a', filter=filter)), "Avg(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(Count('a', filter=filter)), "Count(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(Max('a', filter=filter)), "Max(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(Min('a', filter=filter)), "Min(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(StdDev('a', filter=filter)), "StdDev(F(a), filter=(AND: ('a', 1)), sample=False)") self.assertEqual(repr(Sum('a', filter=filter)), "Sum(F(a), filter=(AND: ('a', 1)))") self.assertEqual( repr(Variance('a', sample=True, filter=filter)), "Variance(F(a), filter=(AND: ('a', 1)), sample=True)" ) self.assertEqual( repr(Count('a', filter=filter, distinct=True)), "Count(F(a), distinct=True, filter=(AND: ('a', 1)))" ) class CombinableTests(SimpleTestCase): bitwise_msg = 'Use .bitand() and .bitor() for bitwise logical operations.' def test_negation(self): c = Combinable() self.assertEqual(-c, c * -1) def test_and(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): Combinable() & Combinable() def test_or(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): Combinable() | Combinable() def test_reversed_and(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): object() & Combinable() def test_reversed_or(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): object() | Combinable() class ExpressionWrapperTests(SimpleTestCase): def test_empty_group_by(self): expr = ExpressionWrapper(Value(3), output_field=IntegerField()) self.assertEqual(expr.get_group_by_cols(alias=None), []) def test_non_empty_group_by(self): expr = ExpressionWrapper(Lower(Value('f')), output_field=IntegerField()) group_by_cols = expr.get_group_by_cols(alias=None) self.assertEqual(group_by_cols, [expr.expression]) self.assertEqual(group_by_cols[0].output_field, expr.output_field)
857710c1d9ed3f31e926a7033077265e03125e5cf3ede4ecdd0b35603d37c712
import ctypes import faulthandler import io import itertools import logging import multiprocessing import os import pickle import sys import textwrap import unittest from importlib import import_module from io import StringIO from django.core.management import call_command from django.db import connections from django.test import SimpleTestCase, TestCase from django.test.utils import ( setup_databases as _setup_databases, setup_test_environment, teardown_databases as _teardown_databases, teardown_test_environment, ) from django.utils.datastructures import OrderedSet from django.utils.version import PY37 try: import ipdb as pdb except ImportError: import pdb try: import tblib.pickling_support except ImportError: tblib = None class DebugSQLTextTestResult(unittest.TextTestResult): def __init__(self, stream, descriptions, verbosity): self.logger = logging.getLogger('django.db.backends') self.logger.setLevel(logging.DEBUG) super().__init__(stream, descriptions, verbosity) def startTest(self, test): self.debug_sql_stream = StringIO() self.handler = logging.StreamHandler(self.debug_sql_stream) self.logger.addHandler(self.handler) super().startTest(test) def stopTest(self, test): super().stopTest(test) self.logger.removeHandler(self.handler) if self.showAll: self.debug_sql_stream.seek(0) self.stream.write(self.debug_sql_stream.read()) self.stream.writeln(self.separator2) def addError(self, test, err): super().addError(test, err) self.debug_sql_stream.seek(0) self.errors[-1] = self.errors[-1] + (self.debug_sql_stream.read(),) def addFailure(self, test, err): super().addFailure(test, err) self.debug_sql_stream.seek(0) self.failures[-1] = self.failures[-1] + (self.debug_sql_stream.read(),) def addSubTest(self, test, subtest, err): super().addSubTest(test, subtest, err) if err is not None: self.debug_sql_stream.seek(0) errors = self.failures if issubclass(err[0], test.failureException) else self.errors errors[-1] = errors[-1] + (self.debug_sql_stream.read(),) def printErrorList(self, flavour, errors): for test, err, sql_debug in errors: self.stream.writeln(self.separator1) self.stream.writeln("%s: %s" % (flavour, self.getDescription(test))) self.stream.writeln(self.separator2) self.stream.writeln(err) self.stream.writeln(self.separator2) self.stream.writeln(sql_debug) class PDBDebugResult(unittest.TextTestResult): """ Custom result class that triggers a PDB session when an error or failure occurs. """ def addError(self, test, err): super().addError(test, err) self.debug(err) def addFailure(self, test, err): super().addFailure(test, err) self.debug(err) def debug(self, error): exc_type, exc_value, traceback = error print("\nOpening PDB: %r" % exc_value) pdb.post_mortem(traceback) class RemoteTestResult: """ Record information about which tests have succeeded and which have failed. The sole purpose of this class is to record events in the child processes so they can be replayed in the master process. As a consequence it doesn't inherit unittest.TestResult and doesn't attempt to implement all its API. The implementation matches the unpythonic coding style of unittest2. """ def __init__(self): if tblib is not None: tblib.pickling_support.install() self.events = [] self.failfast = False self.shouldStop = False self.testsRun = 0 @property def test_index(self): return self.testsRun - 1 def _confirm_picklable(self, obj): """ Confirm that obj can be pickled and unpickled as multiprocessing will need to pickle the exception in the child process and unpickle it in the parent process. Let the exception rise, if not. """ pickle.loads(pickle.dumps(obj)) def _print_unpicklable_subtest(self, test, subtest, pickle_exc): print(""" Subtest failed: test: {} subtest: {} Unfortunately, the subtest that failed cannot be pickled, so the parallel test runner cannot handle it cleanly. Here is the pickling error: > {} You should re-run this test with --parallel=1 to reproduce the failure with a cleaner failure message. """.format(test, subtest, pickle_exc)) def check_picklable(self, test, err): # Ensure that sys.exc_info() tuples are picklable. This displays a # clear multiprocessing.pool.RemoteTraceback generated in the child # process instead of a multiprocessing.pool.MaybeEncodingError, making # the root cause easier to figure out for users who aren't familiar # with the multiprocessing module. Since we're in a forked process, # our best chance to communicate with them is to print to stdout. try: self._confirm_picklable(err) except Exception as exc: original_exc_txt = repr(err[1]) original_exc_txt = textwrap.fill(original_exc_txt, 75, initial_indent=' ', subsequent_indent=' ') pickle_exc_txt = repr(exc) pickle_exc_txt = textwrap.fill(pickle_exc_txt, 75, initial_indent=' ', subsequent_indent=' ') if tblib is None: print(""" {} failed: {} Unfortunately, tracebacks cannot be pickled, making it impossible for the parallel test runner to handle this exception cleanly. In order to see the traceback, you should install tblib: python -m pip install tblib """.format(test, original_exc_txt)) else: print(""" {} failed: {} Unfortunately, the exception it raised cannot be pickled, making it impossible for the parallel test runner to handle it cleanly. Here's the error encountered while trying to pickle the exception: {} You should re-run this test with the --parallel=1 option to reproduce the failure and get a correct traceback. """.format(test, original_exc_txt, pickle_exc_txt)) raise def check_subtest_picklable(self, test, subtest): try: self._confirm_picklable(subtest) except Exception as exc: self._print_unpicklable_subtest(test, subtest, exc) raise def stop_if_failfast(self): if self.failfast: self.stop() def stop(self): self.shouldStop = True def startTestRun(self): self.events.append(('startTestRun',)) def stopTestRun(self): self.events.append(('stopTestRun',)) def startTest(self, test): self.testsRun += 1 self.events.append(('startTest', self.test_index)) def stopTest(self, test): self.events.append(('stopTest', self.test_index)) def addError(self, test, err): self.check_picklable(test, err) self.events.append(('addError', self.test_index, err)) self.stop_if_failfast() def addFailure(self, test, err): self.check_picklable(test, err) self.events.append(('addFailure', self.test_index, err)) self.stop_if_failfast() def addSubTest(self, test, subtest, err): # Follow Python 3.5's implementation of unittest.TestResult.addSubTest() # by not doing anything when a subtest is successful. if err is not None: # Call check_picklable() before check_subtest_picklable() since # check_picklable() performs the tblib check. self.check_picklable(test, err) self.check_subtest_picklable(test, subtest) self.events.append(('addSubTest', self.test_index, subtest, err)) self.stop_if_failfast() def addSuccess(self, test): self.events.append(('addSuccess', self.test_index)) def addSkip(self, test, reason): self.events.append(('addSkip', self.test_index, reason)) def addExpectedFailure(self, test, err): # If tblib isn't installed, pickling the traceback will always fail. # However we don't want tblib to be required for running the tests # when they pass or fail as expected. Drop the traceback when an # expected failure occurs. if tblib is None: err = err[0], err[1], None self.check_picklable(test, err) self.events.append(('addExpectedFailure', self.test_index, err)) def addUnexpectedSuccess(self, test): self.events.append(('addUnexpectedSuccess', self.test_index)) self.stop_if_failfast() class RemoteTestRunner: """ Run tests and record everything but don't display anything. The implementation matches the unpythonic coding style of unittest2. """ resultclass = RemoteTestResult def __init__(self, failfast=False, resultclass=None): self.failfast = failfast if resultclass is not None: self.resultclass = resultclass def run(self, test): result = self.resultclass() unittest.registerResult(result) result.failfast = self.failfast test(result) return result def default_test_processes(): """Default number of test processes when using the --parallel option.""" # The current implementation of the parallel test runner requires # multiprocessing to start subprocesses with fork(). if multiprocessing.get_start_method() != 'fork': return 1 try: return int(os.environ['DJANGO_TEST_PROCESSES']) except KeyError: return multiprocessing.cpu_count() _worker_id = 0 def _init_worker(counter): """ Switch to databases dedicated to this worker. This helper lives at module-level because of the multiprocessing module's requirements. """ global _worker_id with counter.get_lock(): counter.value += 1 _worker_id = counter.value for alias in connections: connection = connections[alias] settings_dict = connection.creation.get_test_db_clone_settings(str(_worker_id)) # connection.settings_dict must be updated in place for changes to be # reflected in django.db.connections. If the following line assigned # connection.settings_dict = settings_dict, new threads would connect # to the default database instead of the appropriate clone. connection.settings_dict.update(settings_dict) connection.close() def _run_subsuite(args): """ Run a suite of tests with a RemoteTestRunner and return a RemoteTestResult. This helper lives at module-level and its arguments are wrapped in a tuple because of the multiprocessing module's requirements. """ runner_class, subsuite_index, subsuite, failfast = args runner = runner_class(failfast=failfast) result = runner.run(subsuite) return subsuite_index, result.events class ParallelTestSuite(unittest.TestSuite): """ Run a series of tests in parallel in several processes. While the unittest module's documentation implies that orchestrating the execution of tests is the responsibility of the test runner, in practice, it appears that TestRunner classes are more concerned with formatting and displaying test results. Since there are fewer use cases for customizing TestSuite than TestRunner, implementing parallelization at the level of the TestSuite improves interoperability with existing custom test runners. A single instance of a test runner can still collect results from all tests without being aware that they have been run in parallel. """ # In case someone wants to modify these in a subclass. init_worker = _init_worker run_subsuite = _run_subsuite runner_class = RemoteTestRunner def __init__(self, suite, processes, failfast=False): self.subsuites = partition_suite_by_case(suite) self.processes = processes self.failfast = failfast super().__init__() def run(self, result): """ Distribute test cases across workers. Return an identifier of each test case with its result in order to use imap_unordered to show results as soon as they're available. To minimize pickling errors when getting results from workers: - pass back numeric indexes in self.subsuites instead of tests - make tracebacks picklable with tblib, if available Even with tblib, errors may still occur for dynamically created exception classes which cannot be unpickled. """ counter = multiprocessing.Value(ctypes.c_int, 0) pool = multiprocessing.Pool( processes=self.processes, initializer=self.init_worker.__func__, initargs=[counter], ) args = [ (self.runner_class, index, subsuite, self.failfast) for index, subsuite in enumerate(self.subsuites) ] test_results = pool.imap_unordered(self.run_subsuite.__func__, args) while True: if result.shouldStop: pool.terminate() break try: subsuite_index, events = test_results.next(timeout=0.1) except multiprocessing.TimeoutError: continue except StopIteration: pool.close() break tests = list(self.subsuites[subsuite_index]) for event in events: event_name = event[0] handler = getattr(result, event_name, None) if handler is None: continue test = tests[event[1]] args = event[2:] handler(test, *args) pool.join() return result def __iter__(self): return iter(self.subsuites) class DiscoverRunner: """A Django test runner that uses unittest2 test discovery.""" test_suite = unittest.TestSuite parallel_test_suite = ParallelTestSuite test_runner = unittest.TextTestRunner test_loader = unittest.defaultTestLoader reorder_by = (TestCase, SimpleTestCase) def __init__(self, pattern=None, top_level=None, verbosity=1, interactive=True, failfast=False, keepdb=False, reverse=False, debug_mode=False, debug_sql=False, parallel=0, tags=None, exclude_tags=None, test_name_patterns=None, pdb=False, buffer=False, enable_faulthandler=True, **kwargs): self.pattern = pattern self.top_level = top_level self.verbosity = verbosity self.interactive = interactive self.failfast = failfast self.keepdb = keepdb self.reverse = reverse self.debug_mode = debug_mode self.debug_sql = debug_sql self.parallel = parallel self.tags = set(tags or []) self.exclude_tags = set(exclude_tags or []) if not faulthandler.is_enabled() and enable_faulthandler: try: faulthandler.enable(file=sys.stderr.fileno()) except (AttributeError, io.UnsupportedOperation): faulthandler.enable(file=sys.__stderr__.fileno()) self.pdb = pdb if self.pdb and self.parallel > 1: raise ValueError('You cannot use --pdb with parallel tests; pass --parallel=1 to use it.') self.buffer = buffer if self.buffer and self.parallel > 1: raise ValueError( 'You cannot use -b/--buffer with parallel tests; pass ' '--parallel=1 to use it.' ) self.test_name_patterns = None if test_name_patterns: # unittest does not export the _convert_select_pattern function # that converts command-line arguments to patterns. self.test_name_patterns = { pattern if '*' in pattern else '*%s*' % pattern for pattern in test_name_patterns } @classmethod def add_arguments(cls, parser): parser.add_argument( '-t', '--top-level-directory', dest='top_level', help='Top level of project for unittest discovery.', ) parser.add_argument( '-p', '--pattern', default="test*.py", help='The test matching pattern. Defaults to test*.py.', ) parser.add_argument( '--keepdb', action='store_true', help='Preserves the test DB between runs.' ) parser.add_argument( '-r', '--reverse', action='store_true', help='Reverses test cases order.', ) parser.add_argument( '--debug-mode', action='store_true', help='Sets settings.DEBUG to True.', ) parser.add_argument( '-d', '--debug-sql', action='store_true', help='Prints logged SQL queries on failure.', ) parser.add_argument( '--parallel', nargs='?', default=1, type=int, const=default_test_processes(), metavar='N', help='Run tests using up to N parallel processes.', ) parser.add_argument( '--tag', action='append', dest='tags', help='Run only tests with the specified tag. Can be used multiple times.', ) parser.add_argument( '--exclude-tag', action='append', dest='exclude_tags', help='Do not run tests with the specified tag. Can be used multiple times.', ) parser.add_argument( '--pdb', action='store_true', help='Runs a debugger (pdb, or ipdb if installed) on error or failure.' ) parser.add_argument( '-b', '--buffer', action='store_true', help='Discard output from passing tests.', ) parser.add_argument( '--no-faulthandler', action='store_false', dest='enable_faulthandler', help='Disables the Python faulthandler module during tests.', ) if PY37: parser.add_argument( '-k', action='append', dest='test_name_patterns', help=( 'Only run test methods and classes that match the pattern ' 'or substring. Can be used multiple times. Same as ' 'unittest -k option.' ), ) def setup_test_environment(self, **kwargs): setup_test_environment(debug=self.debug_mode) unittest.installHandler() def build_suite(self, test_labels=None, extra_tests=None, **kwargs): suite = self.test_suite() test_labels = test_labels or ['.'] extra_tests = extra_tests or [] self.test_loader.testNamePatterns = self.test_name_patterns discover_kwargs = {} if self.pattern is not None: discover_kwargs['pattern'] = self.pattern if self.top_level is not None: discover_kwargs['top_level_dir'] = self.top_level for label in test_labels: kwargs = discover_kwargs.copy() tests = None label_as_path = os.path.abspath(label) # if a module, or "module.ClassName[.method_name]", just run those if not os.path.exists(label_as_path): tests = self.test_loader.loadTestsFromName(label) elif os.path.isdir(label_as_path) and not self.top_level: # Try to be a bit smarter than unittest about finding the # default top-level for a given directory path, to avoid # breaking relative imports. (Unittest's default is to set # top-level equal to the path, which means relative imports # will result in "Attempted relative import in non-package."). # We'd be happy to skip this and require dotted module paths # (which don't cause this problem) instead of file paths (which # do), but in the case of a directory in the cwd, which would # be equally valid if considered as a top-level module or as a # directory path, unittest unfortunately prefers the latter. top_level = label_as_path while True: init_py = os.path.join(top_level, '__init__.py') if os.path.exists(init_py): try_next = os.path.dirname(top_level) if try_next == top_level: # __init__.py all the way down? give up. break top_level = try_next continue break kwargs['top_level_dir'] = top_level if not (tests and tests.countTestCases()) and is_discoverable(label): # Try discovery if path is a package or directory tests = self.test_loader.discover(start_dir=label, **kwargs) # Make unittest forget the top-level dir it calculated from this # run, to support running tests from two different top-levels. self.test_loader._top_level_dir = None suite.addTests(tests) for test in extra_tests: suite.addTest(test) if self.tags or self.exclude_tags: if self.verbosity >= 2: if self.tags: print('Including test tag(s): %s.' % ', '.join(sorted(self.tags))) if self.exclude_tags: print('Excluding test tag(s): %s.' % ', '.join(sorted(self.exclude_tags))) suite = filter_tests_by_tags(suite, self.tags, self.exclude_tags) suite = reorder_suite(suite, self.reorder_by, self.reverse) if self.parallel > 1: parallel_suite = self.parallel_test_suite(suite, self.parallel, self.failfast) # Since tests are distributed across processes on a per-TestCase # basis, there's no need for more processes than TestCases. parallel_units = len(parallel_suite.subsuites) self.parallel = min(self.parallel, parallel_units) # If there's only one TestCase, parallelization isn't needed. if self.parallel > 1: suite = parallel_suite return suite def setup_databases(self, **kwargs): return _setup_databases( self.verbosity, self.interactive, self.keepdb, self.debug_sql, self.parallel, **kwargs ) def get_resultclass(self): if self.debug_sql: return DebugSQLTextTestResult elif self.pdb: return PDBDebugResult def get_test_runner_kwargs(self): return { 'failfast': self.failfast, 'resultclass': self.get_resultclass(), 'verbosity': self.verbosity, 'buffer': self.buffer, } def run_checks(self, databases): # Checks are run after database creation since some checks require # database access. call_command('check', verbosity=self.verbosity, databases=databases) def run_suite(self, suite, **kwargs): kwargs = self.get_test_runner_kwargs() runner = self.test_runner(**kwargs) return runner.run(suite) def teardown_databases(self, old_config, **kwargs): """Destroy all the non-mirror databases.""" _teardown_databases( old_config, verbosity=self.verbosity, parallel=self.parallel, keepdb=self.keepdb, ) def teardown_test_environment(self, **kwargs): unittest.removeHandler() teardown_test_environment() def suite_result(self, suite, result, **kwargs): return len(result.failures) + len(result.errors) def _get_databases(self, suite): databases = set() for test in suite: if isinstance(test, unittest.TestCase): test_databases = getattr(test, 'databases', None) if test_databases == '__all__': return set(connections) if test_databases: databases.update(test_databases) else: databases.update(self._get_databases(test)) return databases def get_databases(self, suite): databases = self._get_databases(suite) if self.verbosity >= 2: unused_databases = [alias for alias in connections if alias not in databases] if unused_databases: print('Skipping setup of unused database(s): %s.' % ', '.join(sorted(unused_databases))) return databases def run_tests(self, test_labels, extra_tests=None, **kwargs): """ Run the unit tests for all the test labels in the provided list. Test labels should be dotted Python paths to test modules, test classes, or test methods. A list of 'extra' tests may also be provided; these tests will be added to the test suite. Return the number of tests that failed. """ self.setup_test_environment() suite = self.build_suite(test_labels, extra_tests) databases = self.get_databases(suite) old_config = self.setup_databases(aliases=databases) run_failed = False try: self.run_checks(databases) result = self.run_suite(suite) except Exception: run_failed = True raise finally: try: self.teardown_databases(old_config) self.teardown_test_environment() except Exception: # Silence teardown exceptions if an exception was raised during # runs to avoid shadowing it. if not run_failed: raise return self.suite_result(suite, result) def is_discoverable(label): """ Check if a test label points to a Python package or file directory. Relative labels like "." and ".." are seen as directories. """ try: mod = import_module(label) except (ImportError, TypeError): pass else: return hasattr(mod, '__path__') return os.path.isdir(os.path.abspath(label)) def reorder_suite(suite, classes, reverse=False): """ Reorder a test suite by test type. `classes` is a sequence of types All tests of type classes[0] are placed first, then tests of type classes[1], etc. Tests with no match in classes are placed last. If `reverse` is True, sort tests within classes in opposite order but don't reverse test classes. """ class_count = len(classes) suite_class = type(suite) bins = [OrderedSet() for i in range(class_count + 1)] partition_suite_by_type(suite, classes, bins, reverse=reverse) reordered_suite = suite_class() for i in range(class_count + 1): reordered_suite.addTests(bins[i]) return reordered_suite def partition_suite_by_type(suite, classes, bins, reverse=False): """ Partition a test suite by test type. Also prevent duplicated tests. classes is a sequence of types bins is a sequence of TestSuites, one more than classes reverse changes the ordering of tests within bins Tests of type classes[i] are added to bins[i], tests with no match found in classes are place in bins[-1] """ suite_class = type(suite) if reverse: suite = reversed(tuple(suite)) for test in suite: if isinstance(test, suite_class): partition_suite_by_type(test, classes, bins, reverse=reverse) else: for i in range(len(classes)): if isinstance(test, classes[i]): bins[i].add(test) break else: bins[-1].add(test) def partition_suite_by_case(suite): """Partition a test suite by test case, preserving the order of tests.""" groups = [] suite_class = type(suite) for test_type, test_group in itertools.groupby(suite, type): if issubclass(test_type, unittest.TestCase): groups.append(suite_class(test_group)) else: for item in test_group: groups.extend(partition_suite_by_case(item)) return groups def filter_tests_by_tags(suite, tags, exclude_tags): suite_class = type(suite) filtered_suite = suite_class() for test in suite: if isinstance(test, suite_class): filtered_suite.addTests(filter_tests_by_tags(test, tags, exclude_tags)) else: test_tags = set(getattr(test, 'tags', set())) test_fn_name = getattr(test, '_testMethodName', str(test)) test_fn = getattr(test, test_fn_name, test) test_fn_tags = set(getattr(test_fn, 'tags', set())) all_tags = test_tags.union(test_fn_tags) matched_tags = all_tags.intersection(tags) if (matched_tags or not tags) and not all_tags.intersection(exclude_tags): filtered_suite.addTest(test) return filtered_suite
3353a3b3d3780206885d94395448fd646b11a1d70f82b25098c29acaf1f8b0ac
import asyncio import difflib import json import posixpath import sys import threading import unittest import warnings from collections import Counter from contextlib import contextmanager from copy import copy, deepcopy from difflib import get_close_matches from functools import wraps from unittest.suite import _DebugResult from unittest.util import safe_repr from urllib.parse import ( parse_qsl, unquote, urlencode, urljoin, urlparse, urlsplit, urlunparse, ) from urllib.request import url2pathname from asgiref.sync import async_to_sync from django.apps import apps from django.conf import settings from django.core import mail from django.core.exceptions import ImproperlyConfigured, ValidationError from django.core.files import locks from django.core.handlers.wsgi import WSGIHandler, get_path_info from django.core.management import call_command from django.core.management.color import no_style from django.core.management.sql import emit_post_migrate_signal from django.core.servers.basehttp import ThreadedWSGIServer, WSGIRequestHandler from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction from django.forms.fields import CharField from django.http import QueryDict from django.http.request import split_domain_port, validate_host from django.test.client import AsyncClient, Client from django.test.html import HTMLParseError, parse_html from django.test.signals import setting_changed, template_rendered from django.test.utils import ( CaptureQueriesContext, ContextList, compare_xml, modify_settings, override_settings, ) from django.utils.deprecation import RemovedInDjango41Warning from django.utils.functional import classproperty from django.views.static import serve __all__ = ('TestCase', 'TransactionTestCase', 'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature') def to_list(value): """ Put value into a list if it's not already one. Return an empty list if value is None. """ if value is None: value = [] elif not isinstance(value, list): value = [value] return value def assert_and_parse_html(self, html, user_msg, msg): try: dom = parse_html(html) except HTMLParseError as e: standardMsg = '%s\n%s' % (msg, e) self.fail(self._formatMessage(user_msg, standardMsg)) return dom class _AssertNumQueriesContext(CaptureQueriesContext): def __init__(self, test_case, num, connection): self.test_case = test_case self.num = num super().__init__(connection) def __exit__(self, exc_type, exc_value, traceback): super().__exit__(exc_type, exc_value, traceback) if exc_type is not None: return executed = len(self) self.test_case.assertEqual( executed, self.num, "%d queries executed, %d expected\nCaptured queries were:\n%s" % ( executed, self.num, '\n'.join( '%d. %s' % (i, query['sql']) for i, query in enumerate(self.captured_queries, start=1) ) ) ) class _AssertTemplateUsedContext: def __init__(self, test_case, template_name): self.test_case = test_case self.template_name = template_name self.rendered_templates = [] self.rendered_template_names = [] self.context = ContextList() def on_template_render(self, sender, signal, template, context, **kwargs): self.rendered_templates.append(template) self.rendered_template_names.append(template.name) self.context.append(copy(context)) def test(self): return self.template_name in self.rendered_template_names def message(self): return '%s was not rendered.' % self.template_name def __enter__(self): template_rendered.connect(self.on_template_render) return self def __exit__(self, exc_type, exc_value, traceback): template_rendered.disconnect(self.on_template_render) if exc_type is not None: return if not self.test(): message = self.message() if self.rendered_templates: message += ' Following templates were rendered: %s' % ( ', '.join(self.rendered_template_names) ) else: message += ' No template was rendered.' self.test_case.fail(message) class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext): def test(self): return self.template_name not in self.rendered_template_names def message(self): return '%s was rendered.' % self.template_name class _DatabaseFailure: def __init__(self, wrapped, message): self.wrapped = wrapped self.message = message def __call__(self): raise AssertionError(self.message) class SimpleTestCase(unittest.TestCase): # The class we'll use for the test client self.client. # Can be overridden in derived classes. client_class = Client async_client_class = AsyncClient _overridden_settings = None _modified_settings = None databases = set() _disallowed_database_msg = ( 'Database %(operation)s to %(alias)r are not allowed in SimpleTestCase ' 'subclasses. Either subclass TestCase or TransactionTestCase to ensure ' 'proper test isolation or add %(alias)r to %(test)s.databases to silence ' 'this failure.' ) _disallowed_connection_methods = [ ('connect', 'connections'), ('temporary_connection', 'connections'), ('cursor', 'queries'), ('chunked_cursor', 'queries'), ] @classmethod def setUpClass(cls): super().setUpClass() if cls._overridden_settings: cls._cls_overridden_context = override_settings(**cls._overridden_settings) cls._cls_overridden_context.enable() if cls._modified_settings: cls._cls_modified_context = modify_settings(cls._modified_settings) cls._cls_modified_context.enable() cls._add_databases_failures() @classmethod def _validate_databases(cls): if cls.databases == '__all__': return frozenset(connections) for alias in cls.databases: if alias not in connections: message = '%s.%s.databases refers to %r which is not defined in settings.DATABASES.' % ( cls.__module__, cls.__qualname__, alias, ) close_matches = get_close_matches(alias, list(connections)) if close_matches: message += ' Did you mean %r?' % close_matches[0] raise ImproperlyConfigured(message) return frozenset(cls.databases) @classmethod def _add_databases_failures(cls): cls.databases = cls._validate_databases() for alias in connections: if alias in cls.databases: continue connection = connections[alias] for name, operation in cls._disallowed_connection_methods: message = cls._disallowed_database_msg % { 'test': '%s.%s' % (cls.__module__, cls.__qualname__), 'alias': alias, 'operation': operation, } method = getattr(connection, name) setattr(connection, name, _DatabaseFailure(method, message)) @classmethod def _remove_databases_failures(cls): for alias in connections: if alias in cls.databases: continue connection = connections[alias] for name, _ in cls._disallowed_connection_methods: method = getattr(connection, name) setattr(connection, name, method.wrapped) @classmethod def tearDownClass(cls): cls._remove_databases_failures() if hasattr(cls, '_cls_modified_context'): cls._cls_modified_context.disable() delattr(cls, '_cls_modified_context') if hasattr(cls, '_cls_overridden_context'): cls._cls_overridden_context.disable() delattr(cls, '_cls_overridden_context') super().tearDownClass() def __call__(self, result=None): """ Wrapper around default __call__ method to perform common Django test set up. This means that user-defined Test Cases aren't required to include a call to super().setUp(). """ self._setup_and_call(result) def debug(self): """Perform the same as __call__(), without catching the exception.""" debug_result = _DebugResult() self._setup_and_call(debug_result, debug=True) def _setup_and_call(self, result, debug=False): """ Perform the following in order: pre-setup, run test, post-teardown, skipping pre/post hooks if test is set to be skipped. If debug=True, reraise any errors in setup and use super().debug() instead of __call__() to run the test. """ testMethod = getattr(self, self._testMethodName) skipped = ( getattr(self.__class__, "__unittest_skip__", False) or getattr(testMethod, "__unittest_skip__", False) ) # Convert async test methods. if asyncio.iscoroutinefunction(testMethod): setattr(self, self._testMethodName, async_to_sync(testMethod)) if not skipped: try: self._pre_setup() except Exception: if debug: raise result.addError(self, sys.exc_info()) return if debug: super().debug() else: super().__call__(result) if not skipped: try: self._post_teardown() except Exception: if debug: raise result.addError(self, sys.exc_info()) return def _pre_setup(self): """ Perform pre-test setup: * Create a test client. * Clear the mail test outbox. """ self.client = self.client_class() self.async_client = self.async_client_class() mail.outbox = [] def _post_teardown(self): """Perform post-test things.""" pass def settings(self, **kwargs): """ A context manager that temporarily sets a setting and reverts to the original value when exiting the context. """ return override_settings(**kwargs) def modify_settings(self, **kwargs): """ A context manager that temporarily applies changes a list setting and reverts back to the original value when exiting the context. """ return modify_settings(**kwargs) def assertRedirects(self, response, expected_url, status_code=302, target_status_code=200, msg_prefix='', fetch_redirect_response=True): """ Assert that a response redirected to a specific URL and that the redirect URL can be loaded. Won't work for external links since it uses the test client to do a request (use fetch_redirect_response=False to check such links without fetching them). """ if msg_prefix: msg_prefix += ": " if hasattr(response, 'redirect_chain'): # The request was a followed redirect self.assertTrue( response.redirect_chain, msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)" % (response.status_code, status_code) ) self.assertEqual( response.redirect_chain[0][1], status_code, msg_prefix + "Initial response didn't redirect as expected: Response code was %d (expected %d)" % (response.redirect_chain[0][1], status_code) ) url, status_code = response.redirect_chain[-1] scheme, netloc, path, query, fragment = urlsplit(url) self.assertEqual( response.status_code, target_status_code, msg_prefix + "Response didn't redirect as expected: Final Response code was %d (expected %d)" % (response.status_code, target_status_code) ) else: # Not a followed redirect self.assertEqual( response.status_code, status_code, msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)" % (response.status_code, status_code) ) url = response.url scheme, netloc, path, query, fragment = urlsplit(url) # Prepend the request path to handle relative path redirects. if not path.startswith('/'): url = urljoin(response.request['PATH_INFO'], url) path = urljoin(response.request['PATH_INFO'], path) if fetch_redirect_response: # netloc might be empty, or in cases where Django tests the # HTTP scheme, the convention is for netloc to be 'testserver'. # Trust both as "internal" URLs here. domain, port = split_domain_port(netloc) if domain and not validate_host(domain, settings.ALLOWED_HOSTS): raise ValueError( "The test client is unable to fetch remote URLs (got %s). " "If the host is served by Django, add '%s' to ALLOWED_HOSTS. " "Otherwise, use assertRedirects(..., fetch_redirect_response=False)." % (url, domain) ) # Get the redirection page, using the same client that was used # to obtain the original response. extra = response.client.extra or {} redirect_response = response.client.get( path, QueryDict(query), secure=(scheme == 'https'), **extra, ) self.assertEqual( redirect_response.status_code, target_status_code, msg_prefix + "Couldn't retrieve redirection page '%s': response code was %d (expected %d)" % (path, redirect_response.status_code, target_status_code) ) self.assertURLEqual( url, expected_url, msg_prefix + "Response redirected to '%s', expected '%s'" % (url, expected_url) ) def assertURLEqual(self, url1, url2, msg_prefix=''): """ Assert that two URLs are the same, ignoring the order of query string parameters except for parameters with the same name. For example, /path/?x=1&y=2 is equal to /path/?y=2&x=1, but /path/?a=1&a=2 isn't equal to /path/?a=2&a=1. """ def normalize(url): """Sort the URL's query string parameters.""" url = str(url) # Coerce reverse_lazy() URLs. scheme, netloc, path, params, query, fragment = urlparse(url) query_parts = sorted(parse_qsl(query)) return urlunparse((scheme, netloc, path, params, urlencode(query_parts), fragment)) self.assertEqual( normalize(url1), normalize(url2), msg_prefix + "Expected '%s' to equal '%s'." % (url1, url2) ) def _assert_contains(self, response, text, status_code, msg_prefix, html): # If the response supports deferred rendering and hasn't been rendered # yet, then ensure that it does get rendered before proceeding further. if hasattr(response, 'render') and callable(response.render) and not response.is_rendered: response.render() if msg_prefix: msg_prefix += ": " self.assertEqual( response.status_code, status_code, msg_prefix + "Couldn't retrieve content: Response code was %d" " (expected %d)" % (response.status_code, status_code) ) if response.streaming: content = b''.join(response.streaming_content) else: content = response.content if not isinstance(text, bytes) or html: text = str(text) content = content.decode(response.charset) text_repr = "'%s'" % text else: text_repr = repr(text) if html: content = assert_and_parse_html(self, content, None, "Response's content is not valid HTML:") text = assert_and_parse_html(self, text, None, "Second argument is not valid HTML:") real_count = content.count(text) return (text_repr, real_count, msg_prefix) def assertContains(self, response, text, count=None, status_code=200, msg_prefix='', html=False): """ Assert that a response indicates that some content was retrieved successfully, (i.e., the HTTP status code was as expected) and that ``text`` occurs ``count`` times in the content of the response. If ``count`` is None, the count doesn't matter - the assertion is true if the text occurs at least once in the response. """ text_repr, real_count, msg_prefix = self._assert_contains( response, text, status_code, msg_prefix, html) if count is not None: self.assertEqual( real_count, count, msg_prefix + "Found %d instances of %s in response (expected %d)" % (real_count, text_repr, count) ) else: self.assertTrue(real_count != 0, msg_prefix + "Couldn't find %s in response" % text_repr) def assertNotContains(self, response, text, status_code=200, msg_prefix='', html=False): """ Assert that a response indicates that some content was retrieved successfully, (i.e., the HTTP status code was as expected) and that ``text`` doesn't occurs in the content of the response. """ text_repr, real_count, msg_prefix = self._assert_contains( response, text, status_code, msg_prefix, html) self.assertEqual(real_count, 0, msg_prefix + "Response should not contain %s" % text_repr) def assertFormError(self, response, form, field, errors, msg_prefix=''): """ Assert that a form used to render the response has a specific field error. """ if msg_prefix: msg_prefix += ": " # Put context(s) into a list to simplify processing. contexts = to_list(response.context) if not contexts: self.fail(msg_prefix + "Response did not use any contexts to render the response") # Put error(s) into a list to simplify processing. errors = to_list(errors) # Search all contexts for the error. found_form = False for i, context in enumerate(contexts): if form not in context: continue found_form = True for err in errors: if field: if field in context[form].errors: field_errors = context[form].errors[field] self.assertTrue( err in field_errors, msg_prefix + "The field '%s' on form '%s' in" " context %d does not contain the error '%s'" " (actual errors: %s)" % (field, form, i, err, repr(field_errors)) ) elif field in context[form].fields: self.fail( msg_prefix + "The field '%s' on form '%s' in context %d contains no errors" % (field, form, i) ) else: self.fail( msg_prefix + "The form '%s' in context %d does not contain the field '%s'" % (form, i, field) ) else: non_field_errors = context[form].non_field_errors() self.assertTrue( err in non_field_errors, msg_prefix + "The form '%s' in context %d does not" " contain the non-field error '%s'" " (actual errors: %s)" % (form, i, err, non_field_errors or 'none') ) if not found_form: self.fail(msg_prefix + "The form '%s' was not used to render the response" % form) def assertFormsetError(self, response, formset, form_index, field, errors, msg_prefix=''): """ Assert that a formset used to render the response has a specific error. For field errors, specify the ``form_index`` and the ``field``. For non-field errors, specify the ``form_index`` and the ``field`` as None. For non-form errors, specify ``form_index`` as None and the ``field`` as None. """ # Add punctuation to msg_prefix if msg_prefix: msg_prefix += ": " # Put context(s) into a list to simplify processing. contexts = to_list(response.context) if not contexts: self.fail(msg_prefix + 'Response did not use any contexts to ' 'render the response') # Put error(s) into a list to simplify processing. errors = to_list(errors) # Search all contexts for the error. found_formset = False for i, context in enumerate(contexts): if formset not in context: continue found_formset = True for err in errors: if field is not None: if field in context[formset].forms[form_index].errors: field_errors = context[formset].forms[form_index].errors[field] self.assertTrue( err in field_errors, msg_prefix + "The field '%s' on formset '%s', " "form %d in context %d does not contain the " "error '%s' (actual errors: %s)" % (field, formset, form_index, i, err, repr(field_errors)) ) elif field in context[formset].forms[form_index].fields: self.fail( msg_prefix + "The field '%s' on formset '%s', form %d in context %d contains no errors" % (field, formset, form_index, i) ) else: self.fail( msg_prefix + "The formset '%s', form %d in context %d does not contain the field '%s'" % (formset, form_index, i, field) ) elif form_index is not None: non_field_errors = context[formset].forms[form_index].non_field_errors() self.assertFalse( not non_field_errors, msg_prefix + "The formset '%s', form %d in context %d " "does not contain any non-field errors." % (formset, form_index, i) ) self.assertTrue( err in non_field_errors, msg_prefix + "The formset '%s', form %d in context %d " "does not contain the non-field error '%s' (actual errors: %s)" % (formset, form_index, i, err, repr(non_field_errors)) ) else: non_form_errors = context[formset].non_form_errors() self.assertFalse( not non_form_errors, msg_prefix + "The formset '%s' in context %d does not " "contain any non-form errors." % (formset, i) ) self.assertTrue( err in non_form_errors, msg_prefix + "The formset '%s' in context %d does not " "contain the non-form error '%s' (actual errors: %s)" % (formset, i, err, repr(non_form_errors)) ) if not found_formset: self.fail(msg_prefix + "The formset '%s' was not used to render the response" % formset) def _assert_template_used(self, response, template_name, msg_prefix): if response is None and template_name is None: raise TypeError('response and/or template_name argument must be provided') if msg_prefix: msg_prefix += ": " if template_name is not None and response is not None and not hasattr(response, 'templates'): raise ValueError( "assertTemplateUsed() and assertTemplateNotUsed() are only " "usable on responses fetched using the Django test Client." ) if not hasattr(response, 'templates') or (response is None and template_name): if response: template_name = response response = None # use this template with context manager return template_name, None, msg_prefix template_names = [t.name for t in response.templates if t.name is not None] return None, template_names, msg_prefix def assertTemplateUsed(self, response=None, template_name=None, msg_prefix='', count=None): """ Assert that the template with the provided name was used in rendering the response. Also usable as context manager. """ context_mgr_template, template_names, msg_prefix = self._assert_template_used( response, template_name, msg_prefix) if context_mgr_template: # Use assertTemplateUsed as context manager. return _AssertTemplateUsedContext(self, context_mgr_template) if not template_names: self.fail(msg_prefix + "No templates used to render the response") self.assertTrue( template_name in template_names, msg_prefix + "Template '%s' was not a template used to render" " the response. Actual template(s) used: %s" % (template_name, ', '.join(template_names)) ) if count is not None: self.assertEqual( template_names.count(template_name), count, msg_prefix + "Template '%s' was expected to be rendered %d " "time(s) but was actually rendered %d time(s)." % (template_name, count, template_names.count(template_name)) ) def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''): """ Assert that the template with the provided name was NOT used in rendering the response. Also usable as context manager. """ context_mgr_template, template_names, msg_prefix = self._assert_template_used( response, template_name, msg_prefix ) if context_mgr_template: # Use assertTemplateNotUsed as context manager. return _AssertTemplateNotUsedContext(self, context_mgr_template) self.assertFalse( template_name in template_names, msg_prefix + "Template '%s' was used unexpectedly in rendering the response" % template_name ) @contextmanager def _assert_raises_or_warns_cm(self, func, cm_attr, expected_exception, expected_message): with func(expected_exception) as cm: yield cm self.assertIn(expected_message, str(getattr(cm, cm_attr))) def _assertFooMessage(self, func, cm_attr, expected_exception, expected_message, *args, **kwargs): callable_obj = None if args: callable_obj, *args = args cm = self._assert_raises_or_warns_cm(func, cm_attr, expected_exception, expected_message) # Assertion used in context manager fashion. if callable_obj is None: return cm # Assertion was passed a callable. with cm: callable_obj(*args, **kwargs) def assertRaisesMessage(self, expected_exception, expected_message, *args, **kwargs): """ Assert that expected_message is found in the message of a raised exception. Args: expected_exception: Exception class expected to be raised. expected_message: expected error message string value. args: Function to be called and extra positional args. kwargs: Extra kwargs. """ return self._assertFooMessage( self.assertRaises, 'exception', expected_exception, expected_message, *args, **kwargs ) def assertWarnsMessage(self, expected_warning, expected_message, *args, **kwargs): """ Same as assertRaisesMessage but for assertWarns() instead of assertRaises(). """ return self._assertFooMessage( self.assertWarns, 'warning', expected_warning, expected_message, *args, **kwargs ) def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None, field_kwargs=None, empty_value=''): """ Assert that a form field behaves correctly with various inputs. Args: fieldclass: the class of the field to be tested. valid: a dictionary mapping valid inputs to their expected cleaned values. invalid: a dictionary mapping invalid inputs to one or more raised error messages. field_args: the args passed to instantiate the field field_kwargs: the kwargs passed to instantiate the field empty_value: the expected clean output for inputs in empty_values """ if field_args is None: field_args = [] if field_kwargs is None: field_kwargs = {} required = fieldclass(*field_args, **field_kwargs) optional = fieldclass(*field_args, **{**field_kwargs, 'required': False}) # test valid inputs for input, output in valid.items(): self.assertEqual(required.clean(input), output) self.assertEqual(optional.clean(input), output) # test invalid inputs for input, errors in invalid.items(): with self.assertRaises(ValidationError) as context_manager: required.clean(input) self.assertEqual(context_manager.exception.messages, errors) with self.assertRaises(ValidationError) as context_manager: optional.clean(input) self.assertEqual(context_manager.exception.messages, errors) # test required inputs error_required = [required.error_messages['required']] for e in required.empty_values: with self.assertRaises(ValidationError) as context_manager: required.clean(e) self.assertEqual(context_manager.exception.messages, error_required) self.assertEqual(optional.clean(e), empty_value) # test that max_length and min_length are always accepted if issubclass(fieldclass, CharField): field_kwargs.update({'min_length': 2, 'max_length': 20}) self.assertIsInstance(fieldclass(*field_args, **field_kwargs), fieldclass) def assertHTMLEqual(self, html1, html2, msg=None): """ Assert that two HTML snippets are semantically the same. Whitespace in most cases is ignored, and attribute ordering is not significant. The arguments must be valid HTML. """ dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:') dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:') if dom1 != dom2: standardMsg = '%s != %s' % ( safe_repr(dom1, True), safe_repr(dom2, True)) diff = ('\n' + '\n'.join(difflib.ndiff( str(dom1).splitlines(), str(dom2).splitlines(), ))) standardMsg = self._truncateMessage(standardMsg, diff) self.fail(self._formatMessage(msg, standardMsg)) def assertHTMLNotEqual(self, html1, html2, msg=None): """Assert that two HTML snippets are not semantically equivalent.""" dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:') dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:') if dom1 == dom2: standardMsg = '%s == %s' % ( safe_repr(dom1, True), safe_repr(dom2, True)) self.fail(self._formatMessage(msg, standardMsg)) def assertInHTML(self, needle, haystack, count=None, msg_prefix=''): needle = assert_and_parse_html(self, needle, None, 'First argument is not valid HTML:') haystack = assert_and_parse_html(self, haystack, None, 'Second argument is not valid HTML:') real_count = haystack.count(needle) if count is not None: self.assertEqual( real_count, count, msg_prefix + "Found %d instances of '%s' in response (expected %d)" % (real_count, needle, count) ) else: self.assertTrue(real_count != 0, msg_prefix + "Couldn't find '%s' in response" % needle) def assertJSONEqual(self, raw, expected_data, msg=None): """ Assert that the JSON fragments raw and expected_data are equal. Usual JSON non-significant whitespace rules apply as the heavyweight is delegated to the json library. """ try: data = json.loads(raw) except json.JSONDecodeError: self.fail("First argument is not valid JSON: %r" % raw) if isinstance(expected_data, str): try: expected_data = json.loads(expected_data) except ValueError: self.fail("Second argument is not valid JSON: %r" % expected_data) self.assertEqual(data, expected_data, msg=msg) def assertJSONNotEqual(self, raw, expected_data, msg=None): """ Assert that the JSON fragments raw and expected_data are not equal. Usual JSON non-significant whitespace rules apply as the heavyweight is delegated to the json library. """ try: data = json.loads(raw) except json.JSONDecodeError: self.fail("First argument is not valid JSON: %r" % raw) if isinstance(expected_data, str): try: expected_data = json.loads(expected_data) except json.JSONDecodeError: self.fail("Second argument is not valid JSON: %r" % expected_data) self.assertNotEqual(data, expected_data, msg=msg) def assertXMLEqual(self, xml1, xml2, msg=None): """ Assert that two XML snippets are semantically the same. Whitespace in most cases is ignored and attribute ordering is not significant. The arguments must be valid XML. """ try: result = compare_xml(xml1, xml2) except Exception as e: standardMsg = 'First or second argument is not valid XML\n%s' % e self.fail(self._formatMessage(msg, standardMsg)) else: if not result: standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True)) diff = ('\n' + '\n'.join( difflib.ndiff(xml1.splitlines(), xml2.splitlines()) )) standardMsg = self._truncateMessage(standardMsg, diff) self.fail(self._formatMessage(msg, standardMsg)) def assertXMLNotEqual(self, xml1, xml2, msg=None): """ Assert that two XML snippets are not semantically equivalent. Whitespace in most cases is ignored and attribute ordering is not significant. The arguments must be valid XML. """ try: result = compare_xml(xml1, xml2) except Exception as e: standardMsg = 'First or second argument is not valid XML\n%s' % e self.fail(self._formatMessage(msg, standardMsg)) else: if result: standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True)) self.fail(self._formatMessage(msg, standardMsg)) class TransactionTestCase(SimpleTestCase): # Subclasses can ask for resetting of auto increment sequence before each # test case reset_sequences = False # Subclasses can enable only a subset of apps for faster tests available_apps = None # Subclasses can define fixtures which will be automatically installed. fixtures = None databases = {DEFAULT_DB_ALIAS} _disallowed_database_msg = ( 'Database %(operation)s to %(alias)r are not allowed in this test. ' 'Add %(alias)r to %(test)s.databases to ensure proper test isolation ' 'and silence this failure.' ) # If transactions aren't available, Django will serialize the database # contents into a fixture during setup and flush and reload them # during teardown (as flush does not restore data from migrations). # This can be slow; this flag allows enabling on a per-case basis. serialized_rollback = False def _pre_setup(self): """ Perform pre-test setup: * If the class has an 'available_apps' attribute, restrict the app registry to these applications, then fire the post_migrate signal -- it must run with the correct set of applications for the test case. * If the class has a 'fixtures' attribute, install those fixtures. """ super()._pre_setup() if self.available_apps is not None: apps.set_available_apps(self.available_apps) setting_changed.send( sender=settings._wrapped.__class__, setting='INSTALLED_APPS', value=self.available_apps, enter=True, ) for db_name in self._databases_names(include_mirrors=False): emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name) try: self._fixture_setup() except Exception: if self.available_apps is not None: apps.unset_available_apps() setting_changed.send( sender=settings._wrapped.__class__, setting='INSTALLED_APPS', value=settings.INSTALLED_APPS, enter=False, ) raise # Clear the queries_log so that it's less likely to overflow (a single # test probably won't execute 9K queries). If queries_log overflows, # then assertNumQueries() doesn't work. for db_name in self._databases_names(include_mirrors=False): connections[db_name].queries_log.clear() @classmethod def _databases_names(cls, include_mirrors=True): # Only consider allowed database aliases, including mirrors or not. return [ alias for alias in connections if alias in cls.databases and ( include_mirrors or not connections[alias].settings_dict['TEST']['MIRROR'] ) ] def _reset_sequences(self, db_name): conn = connections[db_name] if conn.features.supports_sequence_reset: sql_list = conn.ops.sequence_reset_by_name_sql( no_style(), conn.introspection.sequence_list()) if sql_list: with transaction.atomic(using=db_name): with conn.cursor() as cursor: for sql in sql_list: cursor.execute(sql) def _fixture_setup(self): for db_name in self._databases_names(include_mirrors=False): # Reset sequences if self.reset_sequences: self._reset_sequences(db_name) # Provide replica initial data from migrated apps, if needed. if self.serialized_rollback and hasattr(connections[db_name], "_test_serialized_contents"): if self.available_apps is not None: apps.unset_available_apps() connections[db_name].creation.deserialize_db_from_string( connections[db_name]._test_serialized_contents ) if self.available_apps is not None: apps.set_available_apps(self.available_apps) if self.fixtures: # We have to use this slightly awkward syntax due to the fact # that we're using *args and **kwargs together. call_command('loaddata', *self.fixtures, **{'verbosity': 0, 'database': db_name}) def _should_reload_connections(self): return True def _post_teardown(self): """ Perform post-test things: * Flush the contents of the database to leave a clean slate. If the class has an 'available_apps' attribute, don't fire post_migrate. * Force-close the connection so the next test gets a clean cursor. """ try: self._fixture_teardown() super()._post_teardown() if self._should_reload_connections(): # Some DB cursors include SQL statements as part of cursor # creation. If you have a test that does a rollback, the effect # of these statements is lost, which can affect the operation of # tests (e.g., losing a timezone setting causing objects to be # created with the wrong time). To make sure this doesn't # happen, get a clean connection at the start of every test. for conn in connections.all(): conn.close() finally: if self.available_apps is not None: apps.unset_available_apps() setting_changed.send(sender=settings._wrapped.__class__, setting='INSTALLED_APPS', value=settings.INSTALLED_APPS, enter=False) def _fixture_teardown(self): # Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal # when flushing only a subset of the apps for db_name in self._databases_names(include_mirrors=False): # Flush the database inhibit_post_migrate = ( self.available_apps is not None or ( # Inhibit the post_migrate signal when using serialized # rollback to avoid trying to recreate the serialized data. self.serialized_rollback and hasattr(connections[db_name], '_test_serialized_contents') ) ) call_command('flush', verbosity=0, interactive=False, database=db_name, reset_sequences=False, allow_cascade=self.available_apps is not None, inhibit_post_migrate=inhibit_post_migrate) def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True, msg=None): items = map(transform, qs) if not ordered: return self.assertEqual(Counter(items), Counter(values), msg=msg) values = list(values) # For example qs.iterator() could be passed as qs, but it does not # have 'ordered' attribute. if len(values) > 1 and hasattr(qs, 'ordered') and not qs.ordered: raise ValueError("Trying to compare non-ordered queryset " "against more than one ordered values") return self.assertEqual(list(items), values, msg=msg) def assertNumQueries(self, num, func=None, *args, using=DEFAULT_DB_ALIAS, **kwargs): conn = connections[using] context = _AssertNumQueriesContext(self, num, conn) if func is None: return context with context: func(*args, **kwargs) def connections_support_transactions(aliases=None): """ Return whether or not all (or specified) connections support transactions. """ conns = connections.all() if aliases is None else (connections[alias] for alias in aliases) return all(conn.features.supports_transactions for conn in conns) class TestData: """ Descriptor to provide TestCase instance isolation for attributes assigned during the setUpTestData() phase. Allow safe alteration of objects assigned in setUpTestData() by test methods by exposing deep copies instead of the original objects. Objects are deep copied using a memo kept on the test case instance in order to maintain their original relationships. """ memo_attr = '_testdata_memo' def __init__(self, name, data): self.name = name self.data = data def get_memo(self, testcase): try: memo = getattr(testcase, self.memo_attr) except AttributeError: memo = {} setattr(testcase, self.memo_attr, memo) return memo def __get__(self, instance, owner): if instance is None: return self.data memo = self.get_memo(instance) try: data = deepcopy(self.data, memo) except TypeError: # RemovedInDjango41Warning. msg = ( "Assigning objects which don't support copy.deepcopy() during " "setUpTestData() is deprecated. Either assign the %s " "attribute during setUpClass() or setUp(), or add support for " "deepcopy() to %s.%s.%s." ) % ( self.name, owner.__module__, owner.__qualname__, self.name, ) warnings.warn(msg, category=RemovedInDjango41Warning, stacklevel=2) data = self.data setattr(instance, self.name, data) return data def __repr__(self): return '<TestData: name=%r, data=%r>' % (self.name, self.data) class TestCase(TransactionTestCase): """ Similar to TransactionTestCase, but use `transaction.atomic()` to achieve test isolation. In most situations, TestCase should be preferred to TransactionTestCase as it allows faster execution. However, there are some situations where using TransactionTestCase might be necessary (e.g. testing some transactional behavior). On database backends with no transaction support, TestCase behaves as TransactionTestCase. """ @classmethod def _enter_atomics(cls): """Open atomic blocks for multiple databases.""" atomics = {} for db_name in cls._databases_names(): atomics[db_name] = transaction.atomic(using=db_name) atomics[db_name].__enter__() return atomics @classmethod def _rollback_atomics(cls, atomics): """Rollback atomic blocks opened by the previous method.""" for db_name in reversed(cls._databases_names()): transaction.set_rollback(True, using=db_name) atomics[db_name].__exit__(None, None, None) @classmethod def _databases_support_transactions(cls): return connections_support_transactions(cls.databases) @classmethod def setUpClass(cls): super().setUpClass() if not cls._databases_support_transactions(): return cls.cls_atomics = cls._enter_atomics() if cls.fixtures: for db_name in cls._databases_names(include_mirrors=False): try: call_command('loaddata', *cls.fixtures, **{'verbosity': 0, 'database': db_name}) except Exception: cls._rollback_atomics(cls.cls_atomics) cls._remove_databases_failures() raise pre_attrs = cls.__dict__.copy() try: cls.setUpTestData() except Exception: cls._rollback_atomics(cls.cls_atomics) cls._remove_databases_failures() raise for name, value in cls.__dict__.items(): if value is not pre_attrs.get(name): setattr(cls, name, TestData(name, value)) @classmethod def tearDownClass(cls): if cls._databases_support_transactions(): cls._rollback_atomics(cls.cls_atomics) for conn in connections.all(): conn.close() super().tearDownClass() @classmethod def setUpTestData(cls): """Load initial data for the TestCase.""" pass def _should_reload_connections(self): if self._databases_support_transactions(): return False return super()._should_reload_connections() def _fixture_setup(self): if not self._databases_support_transactions(): # If the backend does not support transactions, we should reload # class data before each test self.setUpTestData() return super()._fixture_setup() assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances' self.atomics = self._enter_atomics() def _fixture_teardown(self): if not self._databases_support_transactions(): return super()._fixture_teardown() try: for db_name in reversed(self._databases_names()): if self._should_check_constraints(connections[db_name]): connections[db_name].check_constraints() finally: self._rollback_atomics(self.atomics) def _should_check_constraints(self, connection): return ( connection.features.can_defer_constraint_checks and not connection.needs_rollback and connection.is_usable() ) @classmethod @contextmanager def captureOnCommitCallbacks(cls, *, using=DEFAULT_DB_ALIAS, execute=False): """Context manager to capture transaction.on_commit() callbacks.""" callbacks = [] start_count = len(connections[using].run_on_commit) try: yield callbacks finally: run_on_commit = connections[using].run_on_commit[start_count:] callbacks[:] = [func for sids, func in run_on_commit] if execute: for callback in callbacks: callback() class CheckCondition: """Descriptor class for deferred condition checking.""" def __init__(self, *conditions): self.conditions = conditions def add_condition(self, condition, reason): return self.__class__(*self.conditions, (condition, reason)) def __get__(self, instance, cls=None): # Trigger access for all bases. if any(getattr(base, '__unittest_skip__', False) for base in cls.__bases__): return True for condition, reason in self.conditions: if condition(): # Override this descriptor's value and set the skip reason. cls.__unittest_skip__ = True cls.__unittest_skip_why__ = reason return True return False def _deferredSkip(condition, reason, name): def decorator(test_func): nonlocal condition if not (isinstance(test_func, type) and issubclass(test_func, unittest.TestCase)): @wraps(test_func) def skip_wrapper(*args, **kwargs): if (args and isinstance(args[0], unittest.TestCase) and connection.alias not in getattr(args[0], 'databases', {})): raise ValueError( "%s cannot be used on %s as %s doesn't allow queries " "against the %r database." % ( name, args[0], args[0].__class__.__qualname__, connection.alias, ) ) if condition(): raise unittest.SkipTest(reason) return test_func(*args, **kwargs) test_item = skip_wrapper else: # Assume a class is decorated test_item = test_func databases = getattr(test_item, 'databases', None) if not databases or connection.alias not in databases: # Defer raising to allow importing test class's module. def condition(): raise ValueError( "%s cannot be used on %s as it doesn't allow queries " "against the '%s' database." % ( name, test_item, connection.alias, ) ) # Retrieve the possibly existing value from the class's dict to # avoid triggering the descriptor. skip = test_func.__dict__.get('__unittest_skip__') if isinstance(skip, CheckCondition): test_item.__unittest_skip__ = skip.add_condition(condition, reason) elif skip is not True: test_item.__unittest_skip__ = CheckCondition((condition, reason)) return test_item return decorator def skipIfDBFeature(*features): """Skip a test if a database has at least one of the named features.""" return _deferredSkip( lambda: any(getattr(connection.features, feature, False) for feature in features), "Database has feature(s) %s" % ", ".join(features), 'skipIfDBFeature', ) def skipUnlessDBFeature(*features): """Skip a test unless a database has all the named features.""" return _deferredSkip( lambda: not all(getattr(connection.features, feature, False) for feature in features), "Database doesn't support feature(s): %s" % ", ".join(features), 'skipUnlessDBFeature', ) def skipUnlessAnyDBFeature(*features): """Skip a test unless a database has any of the named features.""" return _deferredSkip( lambda: not any(getattr(connection.features, feature, False) for feature in features), "Database doesn't support any of the feature(s): %s" % ", ".join(features), 'skipUnlessAnyDBFeature', ) class QuietWSGIRequestHandler(WSGIRequestHandler): """ A WSGIRequestHandler that doesn't log to standard output any of the requests received, so as to not clutter the test result output. """ def log_message(*args): pass class FSFilesHandler(WSGIHandler): """ WSGI middleware that intercepts calls to a directory, as defined by one of the *_ROOT settings, and serves those files, publishing them under *_URL. """ def __init__(self, application): self.application = application self.base_url = urlparse(self.get_base_url()) super().__init__() def _should_handle(self, path): """ Check if the path should be handled. Ignore the path if: * the host is provided as part of the base_url * the request's path isn't under the media path (or equal) """ return path.startswith(self.base_url[2]) and not self.base_url[1] def file_path(self, url): """Return the relative path to the file on disk for the given URL.""" relative_url = url[len(self.base_url[2]):] return url2pathname(relative_url) def get_response(self, request): from django.http import Http404 if self._should_handle(request.path): try: return self.serve(request) except Http404: pass return super().get_response(request) def serve(self, request): os_rel_path = self.file_path(request.path) os_rel_path = posixpath.normpath(unquote(os_rel_path)) # Emulate behavior of django.contrib.staticfiles.views.serve() when it # invokes staticfiles' finders functionality. # TODO: Modify if/when that internal API is refactored final_rel_path = os_rel_path.replace('\\', '/').lstrip('/') return serve(request, final_rel_path, document_root=self.get_base_dir()) def __call__(self, environ, start_response): if not self._should_handle(get_path_info(environ)): return self.application(environ, start_response) return super().__call__(environ, start_response) class _StaticFilesHandler(FSFilesHandler): """ Handler for serving static files. A private class that is meant to be used solely as a convenience by LiveServerThread. """ def get_base_dir(self): return settings.STATIC_ROOT def get_base_url(self): return settings.STATIC_URL class _MediaFilesHandler(FSFilesHandler): """ Handler for serving the media files. A private class that is meant to be used solely as a convenience by LiveServerThread. """ def get_base_dir(self): return settings.MEDIA_ROOT def get_base_url(self): return settings.MEDIA_URL class LiveServerThread(threading.Thread): """Thread for running a live http server while the tests are running.""" def __init__(self, host, static_handler, connections_override=None, port=0): self.host = host self.port = port self.is_ready = threading.Event() self.error = None self.static_handler = static_handler self.connections_override = connections_override super().__init__() def run(self): """ Set up the live server and databases, and then loop over handling HTTP requests. """ if self.connections_override: # Override this thread's database connections with the ones # provided by the main thread. for alias, conn in self.connections_override.items(): connections[alias] = conn try: # Create the handler for serving static and media files handler = self.static_handler(_MediaFilesHandler(WSGIHandler())) self.httpd = self._create_server() # If binding to port zero, assign the port allocated by the OS. if self.port == 0: self.port = self.httpd.server_address[1] self.httpd.set_app(handler) self.is_ready.set() self.httpd.serve_forever() except Exception as e: self.error = e self.is_ready.set() finally: connections.close_all() def _create_server(self): return ThreadedWSGIServer((self.host, self.port), QuietWSGIRequestHandler, allow_reuse_address=False) def terminate(self): if hasattr(self, 'httpd'): # Stop the WSGI server self.httpd.shutdown() self.httpd.server_close() self.join() class LiveServerTestCase(TransactionTestCase): """ Do basically the same as TransactionTestCase but also launch a live HTTP server in a separate thread so that the tests may use another testing framework, such as Selenium for example, instead of the built-in dummy client. It inherits from TransactionTestCase instead of TestCase because the threads don't share the same transactions (unless if using in-memory sqlite) and each thread needs to commit all their transactions so that the other thread can see the changes. """ host = 'localhost' port = 0 server_thread_class = LiveServerThread static_handler = _StaticFilesHandler @classproperty def live_server_url(cls): return 'http://%s:%s' % (cls.host, cls.server_thread.port) @classproperty def allowed_host(cls): return cls.host @classmethod def setUpClass(cls): super().setUpClass() connections_override = {} for conn in connections.all(): # If using in-memory sqlite databases, pass the connections to # the server thread. if conn.vendor == 'sqlite' and conn.is_in_memory_db(): # Explicitly enable thread-shareability for this connection conn.inc_thread_sharing() connections_override[conn.alias] = conn cls._live_server_modified_settings = modify_settings( ALLOWED_HOSTS={'append': cls.allowed_host}, ) cls._live_server_modified_settings.enable() cls.server_thread = cls._create_server_thread(connections_override) cls.server_thread.daemon = True cls.server_thread.start() # Wait for the live server to be ready cls.server_thread.is_ready.wait() if cls.server_thread.error: # Clean up behind ourselves, since tearDownClass won't get called in # case of errors. cls._tearDownClassInternal() raise cls.server_thread.error @classmethod def _create_server_thread(cls, connections_override): return cls.server_thread_class( cls.host, cls.static_handler, connections_override=connections_override, port=cls.port, ) @classmethod def _tearDownClassInternal(cls): # There may not be a 'server_thread' attribute if setUpClass() for some # reasons has raised an exception. if hasattr(cls, 'server_thread'): # Terminate the live server's thread cls.server_thread.terminate() # Restore sqlite in-memory database connections' non-shareability. for conn in cls.server_thread.connections_override.values(): conn.dec_thread_sharing() @classmethod def tearDownClass(cls): cls._tearDownClassInternal() cls._live_server_modified_settings.disable() super().tearDownClass() class SerializeMixin: """ Enforce serialization of TestCases that share a common resource. Define a common 'lockfile' for each set of TestCases to serialize. This file must exist on the filesystem. Place it early in the MRO in order to isolate setUpClass()/tearDownClass(). """ lockfile = None @classmethod def setUpClass(cls): if cls.lockfile is None: raise ValueError( "{}.lockfile isn't set. Set it to a unique value " "in the base class.".format(cls.__name__)) cls._lockfile = open(cls.lockfile) locks.lock(cls._lockfile, locks.LOCK_EX) super().setUpClass() @classmethod def tearDownClass(cls): super().tearDownClass() cls._lockfile.close()
7911b2c2337e2404108dddf40beb86b1a39e23c74949d08386acf4c3c13c7cbb
import functools import re import sys import types import warnings from pathlib import Path from django.conf import settings from django.http import Http404, HttpResponse, HttpResponseNotFound from django.template import Context, Engine, TemplateDoesNotExist from django.template.defaultfilters import pprint from django.urls import resolve from django.utils import timezone from django.utils.datastructures import MultiValueDict from django.utils.encoding import force_str from django.utils.module_loading import import_string from django.utils.regex_helper import _lazy_re_compile from django.utils.version import get_docs_version # Minimal Django templates engine to render the error templates # regardless of the project's TEMPLATES setting. Templates are # read directly from the filesystem so that the error handler # works even if the template loader is broken. DEBUG_ENGINE = Engine( debug=True, libraries={'i18n': 'django.templatetags.i18n'}, ) CURRENT_DIR = Path(__file__).parent class ExceptionCycleWarning(UserWarning): pass class CallableSettingWrapper: """ Object to wrap callable appearing in settings. * Not to call in the debug page (#21345). * Not to break the debug page if the callable forbidding to set attributes (#23070). """ def __init__(self, callable_setting): self._wrapped = callable_setting def __repr__(self): return repr(self._wrapped) def technical_500_response(request, exc_type, exc_value, tb, status_code=500): """ Create a technical server error response. The last three arguments are the values returned from sys.exc_info() and friends. """ reporter = get_exception_reporter_class(request)(request, exc_type, exc_value, tb) if request.accepts('text/html'): html = reporter.get_traceback_html() return HttpResponse(html, status=status_code, content_type='text/html') else: text = reporter.get_traceback_text() return HttpResponse(text, status=status_code, content_type='text/plain; charset=utf-8') @functools.lru_cache() def get_default_exception_reporter_filter(): # Instantiate the default filter for the first time and cache it. return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)() def get_exception_reporter_filter(request): default_filter = get_default_exception_reporter_filter() return getattr(request, 'exception_reporter_filter', default_filter) def get_exception_reporter_class(request): default_exception_reporter_class = import_string(settings.DEFAULT_EXCEPTION_REPORTER) return getattr(request, 'exception_reporter_class', default_exception_reporter_class) class SafeExceptionReporterFilter: """ Use annotations made by the sensitive_post_parameters and sensitive_variables decorators to filter out sensitive information. """ cleansed_substitute = '********************' hidden_settings = _lazy_re_compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE', flags=re.I) def cleanse_setting(self, key, value): """ Cleanse an individual setting key/value of sensitive content. If the value is a dictionary, recursively cleanse the keys in that dictionary. """ try: if self.hidden_settings.search(key): cleansed = self.cleansed_substitute elif isinstance(value, dict): cleansed = {k: self.cleanse_setting(k, v) for k, v in value.items()} elif isinstance(value, list): cleansed = [self.cleanse_setting('', v) for v in value] elif isinstance(value, tuple): cleansed = tuple([self.cleanse_setting('', v) for v in value]) else: cleansed = value except TypeError: # If the key isn't regex-able, just return as-is. cleansed = value if callable(cleansed): cleansed = CallableSettingWrapper(cleansed) return cleansed def get_safe_settings(self): """ Return a dictionary of the settings module with values of sensitive settings replaced with stars (*********). """ settings_dict = {} for k in dir(settings): if k.isupper(): settings_dict[k] = self.cleanse_setting(k, getattr(settings, k)) return settings_dict def get_safe_request_meta(self, request): """ Return a dictionary of request.META with sensitive values redacted. """ if not hasattr(request, 'META'): return {} return {k: self.cleanse_setting(k, v) for k, v in request.META.items()} def is_active(self, request): """ This filter is to add safety in production environments (i.e. DEBUG is False). If DEBUG is True then your site is not safe anyway. This hook is provided as a convenience to easily activate or deactivate the filter on a per request basis. """ return settings.DEBUG is False def get_cleansed_multivaluedict(self, request, multivaluedict): """ Replace the keys in a MultiValueDict marked as sensitive with stars. This mitigates leaking sensitive POST parameters if something like request.POST['nonexistent_key'] throws an exception (#21098). """ sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', []) if self.is_active(request) and sensitive_post_parameters: multivaluedict = multivaluedict.copy() for param in sensitive_post_parameters: if param in multivaluedict: multivaluedict[param] = self.cleansed_substitute return multivaluedict def get_post_parameters(self, request): """ Replace the values of POST parameters marked as sensitive with stars (*********). """ if request is None: return {} else: sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', []) if self.is_active(request) and sensitive_post_parameters: cleansed = request.POST.copy() if sensitive_post_parameters == '__ALL__': # Cleanse all parameters. for k in cleansed: cleansed[k] = self.cleansed_substitute return cleansed else: # Cleanse only the specified parameters. for param in sensitive_post_parameters: if param in cleansed: cleansed[param] = self.cleansed_substitute return cleansed else: return request.POST def cleanse_special_types(self, request, value): try: # If value is lazy or a complex object of another kind, this check # might raise an exception. isinstance checks that lazy # MultiValueDicts will have a return value. is_multivalue_dict = isinstance(value, MultiValueDict) except Exception as e: return '{!r} while evaluating {!r}'.format(e, value) if is_multivalue_dict: # Cleanse MultiValueDicts (request.POST is the one we usually care about) value = self.get_cleansed_multivaluedict(request, value) return value def get_traceback_frame_variables(self, request, tb_frame): """ Replace the values of variables marked as sensitive with stars (*********). """ # Loop through the frame's callers to see if the sensitive_variables # decorator was used. current_frame = tb_frame.f_back sensitive_variables = None while current_frame is not None: if (current_frame.f_code.co_name == 'sensitive_variables_wrapper' and 'sensitive_variables_wrapper' in current_frame.f_locals): # The sensitive_variables decorator was used, so we take note # of the sensitive variables' names. wrapper = current_frame.f_locals['sensitive_variables_wrapper'] sensitive_variables = getattr(wrapper, 'sensitive_variables', None) break current_frame = current_frame.f_back cleansed = {} if self.is_active(request) and sensitive_variables: if sensitive_variables == '__ALL__': # Cleanse all variables for name in tb_frame.f_locals: cleansed[name] = self.cleansed_substitute else: # Cleanse specified variables for name, value in tb_frame.f_locals.items(): if name in sensitive_variables: value = self.cleansed_substitute else: value = self.cleanse_special_types(request, value) cleansed[name] = value else: # Potentially cleanse the request and any MultiValueDicts if they # are one of the frame variables. for name, value in tb_frame.f_locals.items(): cleansed[name] = self.cleanse_special_types(request, value) if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper' and 'sensitive_variables_wrapper' in tb_frame.f_locals): # For good measure, obfuscate the decorated function's arguments in # the sensitive_variables decorator's frame, in case the variables # associated with those arguments were meant to be obfuscated from # the decorated function's frame. cleansed['func_args'] = self.cleansed_substitute cleansed['func_kwargs'] = self.cleansed_substitute return cleansed.items() class ExceptionReporter: """Organize and coordinate reporting on exceptions.""" def __init__(self, request, exc_type, exc_value, tb, is_email=False): self.request = request self.filter = get_exception_reporter_filter(self.request) self.exc_type = exc_type self.exc_value = exc_value self.tb = tb self.is_email = is_email self.template_info = getattr(self.exc_value, 'template_debug', None) self.template_does_not_exist = False self.postmortem = None def get_traceback_data(self): """Return a dictionary containing traceback information.""" if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist): self.template_does_not_exist = True self.postmortem = self.exc_value.chain or [self.exc_value] frames = self.get_traceback_frames() for i, frame in enumerate(frames): if 'vars' in frame: frame_vars = [] for k, v in frame['vars']: v = pprint(v) # Trim large blobs of data if len(v) > 4096: v = '%s… <trimmed %d bytes string>' % (v[0:4096], len(v)) frame_vars.append((k, v)) frame['vars'] = frame_vars frames[i] = frame unicode_hint = '' if self.exc_type and issubclass(self.exc_type, UnicodeError): start = getattr(self.exc_value, 'start', None) end = getattr(self.exc_value, 'end', None) if start is not None and end is not None: unicode_str = self.exc_value.args[1] unicode_hint = force_str( unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))], 'ascii', errors='replace' ) from django import get_version if self.request is None: user_str = None else: try: user_str = str(self.request.user) except Exception: # request.user may raise OperationalError if the database is # unavailable, for example. user_str = '[unable to retrieve the current user]' c = { 'is_email': self.is_email, 'unicode_hint': unicode_hint, 'frames': frames, 'request': self.request, 'request_meta': self.filter.get_safe_request_meta(self.request), 'user_str': user_str, 'filtered_POST_items': list(self.filter.get_post_parameters(self.request).items()), 'settings': self.filter.get_safe_settings(), 'sys_executable': sys.executable, 'sys_version_info': '%d.%d.%d' % sys.version_info[0:3], 'server_time': timezone.now(), 'django_version_info': get_version(), 'sys_path': sys.path, 'template_info': self.template_info, 'template_does_not_exist': self.template_does_not_exist, 'postmortem': self.postmortem, } if self.request is not None: c['request_GET_items'] = self.request.GET.items() c['request_FILES_items'] = self.request.FILES.items() c['request_COOKIES_items'] = self.request.COOKIES.items() # Check whether exception info is available if self.exc_type: c['exception_type'] = self.exc_type.__name__ if self.exc_value: c['exception_value'] = str(self.exc_value) if frames: c['lastframe'] = frames[-1] return c def get_traceback_html(self): """Return HTML version of debug 500 HTTP error page.""" with Path(CURRENT_DIR, 'templates', 'technical_500.html').open(encoding='utf-8') as fh: t = DEBUG_ENGINE.from_string(fh.read()) c = Context(self.get_traceback_data(), use_l10n=False) return t.render(c) def get_traceback_text(self): """Return plain text version of debug 500 HTTP error page.""" with Path(CURRENT_DIR, 'templates', 'technical_500.txt').open(encoding='utf-8') as fh: t = DEBUG_ENGINE.from_string(fh.read()) c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False) return t.render(c) def _get_source(self, filename, loader, module_name): source = None if hasattr(loader, 'get_source'): try: source = loader.get_source(module_name) except ImportError: pass if source is not None: source = source.splitlines() if source is None: try: with open(filename, 'rb') as fp: source = fp.read().splitlines() except OSError: pass return source def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None): """ Return context_lines before and after lineno from file. Return (pre_context_lineno, pre_context, context_line, post_context). """ source = self._get_source(filename, loader, module_name) if source is None: return None, [], None, [] # If we just read the source from a file, or if the loader did not # apply tokenize.detect_encoding to decode the source into a # string, then we should do that ourselves. if isinstance(source[0], bytes): encoding = 'ascii' for line in source[:2]: # File coding may be specified. Match pattern from PEP-263 # (https://www.python.org/dev/peps/pep-0263/) match = re.search(br'coding[:=]\s*([-\w.]+)', line) if match: encoding = match[1].decode('ascii') break source = [str(sline, encoding, 'replace') for sline in source] lower_bound = max(0, lineno - context_lines) upper_bound = lineno + context_lines try: pre_context = source[lower_bound:lineno] context_line = source[lineno] post_context = source[lineno + 1:upper_bound] except IndexError: return None, [], None, [] return lower_bound, pre_context, context_line, post_context def get_traceback_frames(self): def explicit_or_implicit_cause(exc_value): explicit = getattr(exc_value, '__cause__', None) suppress_context = getattr(exc_value, '__suppress_context__', None) implicit = getattr(exc_value, '__context__', None) return explicit or (None if suppress_context else implicit) # Get the exception and all its causes exceptions = [] exc_value = self.exc_value while exc_value: exceptions.append(exc_value) exc_value = explicit_or_implicit_cause(exc_value) if exc_value in exceptions: warnings.warn( "Cycle in the exception chain detected: exception '%s' " "encountered again." % exc_value, ExceptionCycleWarning, ) # Avoid infinite loop if there's a cyclic reference (#29393). break frames = [] # No exceptions were supplied to ExceptionReporter if not exceptions: return frames # In case there's just one exception, take the traceback from self.tb exc_value = exceptions.pop() tb = self.tb if not exceptions else exc_value.__traceback__ while tb is not None: # Support for __traceback_hide__ which is used by a few libraries # to hide internal frames. if tb.tb_frame.f_locals.get('__traceback_hide__'): tb = tb.tb_next continue filename = tb.tb_frame.f_code.co_filename function = tb.tb_frame.f_code.co_name lineno = tb.tb_lineno - 1 loader = tb.tb_frame.f_globals.get('__loader__') module_name = tb.tb_frame.f_globals.get('__name__') or '' pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file( filename, lineno, 7, loader, module_name, ) if pre_context_lineno is None: pre_context_lineno = lineno pre_context = [] context_line = '<source code not available>' post_context = [] frames.append({ 'exc_cause': explicit_or_implicit_cause(exc_value), 'exc_cause_explicit': getattr(exc_value, '__cause__', True), 'tb': tb, 'type': 'django' if module_name.startswith('django.') else 'user', 'filename': filename, 'function': function, 'lineno': lineno + 1, 'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame), 'id': id(tb), 'pre_context': pre_context, 'context_line': context_line, 'post_context': post_context, 'pre_context_lineno': pre_context_lineno + 1, }) # If the traceback for current exception is consumed, try the # other exception. if not tb.tb_next and exceptions: exc_value = exceptions.pop() tb = exc_value.__traceback__ else: tb = tb.tb_next return frames def technical_404_response(request, exception): """Create a technical 404 error response. `exception` is the Http404.""" try: error_url = exception.args[0]['path'] except (IndexError, TypeError, KeyError): error_url = request.path_info[1:] # Trim leading slash try: tried = exception.args[0]['tried'] except (IndexError, TypeError, KeyError): tried = [] else: if (not tried or ( # empty URLconf request.path == '/' and len(tried) == 1 and # default URLconf len(tried[0]) == 1 and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin' )): return default_urlconf(request) urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF) if isinstance(urlconf, types.ModuleType): urlconf = urlconf.__name__ caller = '' try: resolver_match = resolve(request.path) except Http404: pass else: obj = resolver_match.func if hasattr(obj, '__name__'): caller = obj.__name__ elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'): caller = obj.__class__.__name__ if hasattr(obj, '__module__'): module = obj.__module__ caller = '%s.%s' % (module, caller) with Path(CURRENT_DIR, 'templates', 'technical_404.html').open(encoding='utf-8') as fh: t = DEBUG_ENGINE.from_string(fh.read()) reporter_filter = get_default_exception_reporter_filter() c = Context({ 'urlconf': urlconf, 'root_urlconf': settings.ROOT_URLCONF, 'request_path': error_url, 'urlpatterns': tried, 'reason': str(exception), 'request': request, 'settings': reporter_filter.get_safe_settings(), 'raising_view_name': caller, }) return HttpResponseNotFound(t.render(c), content_type='text/html') def default_urlconf(request): """Create an empty URLconf 404 error response.""" with Path(CURRENT_DIR, 'templates', 'default_urlconf.html').open(encoding='utf-8') as fh: t = DEBUG_ENGINE.from_string(fh.read()) c = Context({ 'version': get_docs_version(), }) return HttpResponse(t.render(c), content_type='text/html')
af64b6805ec048f0eaf8bf06854322fb065028187a87d21e50576064a6d1e70a
from django.conf import settings from django.http import HttpResponseForbidden from django.template import Context, Engine, TemplateDoesNotExist, loader from django.utils.translation import gettext as _ from django.utils.version import get_docs_version # We include the template inline since we need to be able to reliably display # this error message, especially for the sake of developers, and there isn't any # other way of making it available independent of what is in the settings file. # Only the text appearing with DEBUG=False is translated. Normal translation # tags cannot be used with this inline templates as makemessages would not be # able to discover the strings. CSRF_FAILURE_TEMPLATE = """ <!DOCTYPE html> <html lang="en"> <head> <meta http-equiv="content-type" content="text/html; charset=utf-8"> <meta name="robots" content="NONE,NOARCHIVE"> <title>403 Forbidden</title> <style type="text/css"> html * { padding:0; margin:0; } body * { padding:10px 20px; } body * * { padding:0; } body { font:small sans-serif; background:#eee; color:#000; } body>div { border-bottom:1px solid #ddd; } h1 { font-weight:normal; margin-bottom:.4em; } h1 span { font-size:60%; color:#666; font-weight:normal; } #info { background:#f6f6f6; } #info ul { margin: 0.5em 4em; } #info p, #summary p { padding-top:10px; } #summary { background: #ffc; } #explanation { background:#eee; border-bottom: 0px none; } </style> </head> <body> <div id="summary"> <h1>{{ title }} <span>(403)</span></h1> <p>{{ main }}</p> {% if no_referer %} <p>{{ no_referer1 }}</p> <p>{{ no_referer2 }}</p> <p>{{ no_referer3 }}</p> {% endif %} {% if no_cookie %} <p>{{ no_cookie1 }}</p> <p>{{ no_cookie2 }}</p> {% endif %} </div> {% if DEBUG %} <div id="info"> <h2>Help</h2> {% if reason %} <p>Reason given for failure:</p> <pre> {{ reason }} </pre> {% endif %} <p>In general, this can occur when there is a genuine Cross Site Request Forgery, or when <a href="https://docs.djangoproject.com/en/{{ docs_version }}/ref/csrf/">Django’s CSRF mechanism</a> has not been used correctly. For POST forms, you need to ensure:</p> <ul> <li>Your browser is accepting cookies.</li> <li>The view function passes a <code>request</code> to the template’s <a href="https://docs.djangoproject.com/en/dev/topics/templates/#django.template.backends.base.Template.render"><code>render</code></a> method.</li> <li>In the template, there is a <code>{% templatetag openblock %} csrf_token {% templatetag closeblock %}</code> template tag inside each POST form that targets an internal URL.</li> <li>If you are not using <code>CsrfViewMiddleware</code>, then you must use <code>csrf_protect</code> on any views that use the <code>csrf_token</code> template tag, as well as those that accept the POST data.</li> <li>The form has a valid CSRF token. After logging in in another browser tab or hitting the back button after a login, you may need to reload the page with the form, because the token is rotated after a login.</li> </ul> <p>You’re seeing the help section of this page because you have <code>DEBUG = True</code> in your Django settings file. Change that to <code>False</code>, and only the initial error message will be displayed. </p> <p>You can customize this page using the CSRF_FAILURE_VIEW setting.</p> </div> {% else %} <div id="explanation"> <p><small>{{ more }}</small></p> </div> {% endif %} </body> </html> """ CSRF_FAILURE_TEMPLATE_NAME = "403_csrf.html" def csrf_failure(request, reason="", template_name=CSRF_FAILURE_TEMPLATE_NAME): """ Default view used when request fails CSRF protection """ from django.middleware.csrf import REASON_NO_REFERER, REASON_NO_CSRF_COOKIE c = { 'title': _("Forbidden"), 'main': _("CSRF verification failed. Request aborted."), 'reason': reason, 'no_referer': reason == REASON_NO_REFERER, 'no_referer1': _( 'You are seeing this message because this HTTPS site requires a ' '“Referer header” to be sent by your Web browser, but none was ' 'sent. This header is required for security reasons, to ensure ' 'that your browser is not being hijacked by third parties.'), 'no_referer2': _( 'If you have configured your browser to disable “Referer” headers, ' 'please re-enable them, at least for this site, or for HTTPS ' 'connections, or for “same-origin” requests.'), 'no_referer3': _( 'If you are using the <meta name="referrer" ' 'content=\"no-referrer\"> tag or including the “Referrer-Policy: ' 'no-referrer” header, please remove them. The CSRF protection ' 'requires the “Referer” header to do strict referer checking. If ' 'you’re concerned about privacy, use alternatives like ' '<a rel=\"noreferrer\" …> for links to third-party sites.'), 'no_cookie': reason == REASON_NO_CSRF_COOKIE, 'no_cookie1': _( "You are seeing this message because this site requires a CSRF " "cookie when submitting forms. This cookie is required for " "security reasons, to ensure that your browser is not being " "hijacked by third parties."), 'no_cookie2': _( 'If you have configured your browser to disable cookies, please ' 're-enable them, at least for this site, or for “same-origin” ' 'requests.'), 'DEBUG': settings.DEBUG, 'docs_version': get_docs_version(), 'more': _("More information is available with DEBUG=True."), } try: t = loader.get_template(template_name) except TemplateDoesNotExist: if template_name == CSRF_FAILURE_TEMPLATE_NAME: # If the default template doesn't exist, use the string template. t = Engine().from_string(CSRF_FAILURE_TEMPLATE) c = Context(c) else: # Raise if a developer-specified template doesn't exist. raise return HttpResponseForbidden(t.render(c), content_type='text/html')
cbe215bc7cceec34e008f364ec1d3e79c3c843be5b34dfdb04f9e757fe186145
""" Default Django settings. Override these with settings in the module pointed to by the DJANGO_SETTINGS_MODULE environment variable. """ # This is defined here as a do-nothing function because we can't import # django.utils.translation -- that module depends on the settings. def gettext_noop(s): return s #################### # CORE # #################### DEBUG = False # Whether the framework should propagate raw exceptions rather than catching # them. This is useful under some testing situations and should never be used # on a live site. DEBUG_PROPAGATE_EXCEPTIONS = False # People who get code error notifications. # In the format [('Full Name', '[email protected]'), ('Full Name', '[email protected]')] ADMINS = [] # List of IP addresses, as strings, that: # * See debug comments, when DEBUG is true # * Receive x-headers INTERNAL_IPS = [] # Hosts/domain names that are valid for this site. # "*" matches anything, ".example.com" matches example.com and all subdomains ALLOWED_HOSTS = [] # Local time zone for this installation. All choices can be found here: # https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all # systems may support all possibilities). When USE_TZ is True, this is # interpreted as the default user time zone. TIME_ZONE = 'America/Chicago' # If you set this to True, Django will use timezone-aware datetimes. USE_TZ = False # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' # Languages we provide translations for, out of the box. LANGUAGES = [ ('af', gettext_noop('Afrikaans')), ('ar', gettext_noop('Arabic')), ('ar-dz', gettext_noop('Algerian Arabic')), ('ast', gettext_noop('Asturian')), ('az', gettext_noop('Azerbaijani')), ('bg', gettext_noop('Bulgarian')), ('be', gettext_noop('Belarusian')), ('bn', gettext_noop('Bengali')), ('br', gettext_noop('Breton')), ('bs', gettext_noop('Bosnian')), ('ca', gettext_noop('Catalan')), ('cs', gettext_noop('Czech')), ('cy', gettext_noop('Welsh')), ('da', gettext_noop('Danish')), ('de', gettext_noop('German')), ('dsb', gettext_noop('Lower Sorbian')), ('el', gettext_noop('Greek')), ('en', gettext_noop('English')), ('en-au', gettext_noop('Australian English')), ('en-gb', gettext_noop('British English')), ('eo', gettext_noop('Esperanto')), ('es', gettext_noop('Spanish')), ('es-ar', gettext_noop('Argentinian Spanish')), ('es-co', gettext_noop('Colombian Spanish')), ('es-mx', gettext_noop('Mexican Spanish')), ('es-ni', gettext_noop('Nicaraguan Spanish')), ('es-ve', gettext_noop('Venezuelan Spanish')), ('et', gettext_noop('Estonian')), ('eu', gettext_noop('Basque')), ('fa', gettext_noop('Persian')), ('fi', gettext_noop('Finnish')), ('fr', gettext_noop('French')), ('fy', gettext_noop('Frisian')), ('ga', gettext_noop('Irish')), ('gd', gettext_noop('Scottish Gaelic')), ('gl', gettext_noop('Galician')), ('he', gettext_noop('Hebrew')), ('hi', gettext_noop('Hindi')), ('hr', gettext_noop('Croatian')), ('hsb', gettext_noop('Upper Sorbian')), ('hu', gettext_noop('Hungarian')), ('hy', gettext_noop('Armenian')), ('ia', gettext_noop('Interlingua')), ('id', gettext_noop('Indonesian')), ('ig', gettext_noop('Igbo')), ('io', gettext_noop('Ido')), ('is', gettext_noop('Icelandic')), ('it', gettext_noop('Italian')), ('ja', gettext_noop('Japanese')), ('ka', gettext_noop('Georgian')), ('kab', gettext_noop('Kabyle')), ('kk', gettext_noop('Kazakh')), ('km', gettext_noop('Khmer')), ('kn', gettext_noop('Kannada')), ('ko', gettext_noop('Korean')), ('ky', gettext_noop('Kyrgyz')), ('lb', gettext_noop('Luxembourgish')), ('lt', gettext_noop('Lithuanian')), ('lv', gettext_noop('Latvian')), ('mk', gettext_noop('Macedonian')), ('ml', gettext_noop('Malayalam')), ('mn', gettext_noop('Mongolian')), ('mr', gettext_noop('Marathi')), ('my', gettext_noop('Burmese')), ('nb', gettext_noop('Norwegian Bokmål')), ('ne', gettext_noop('Nepali')), ('nl', gettext_noop('Dutch')), ('nn', gettext_noop('Norwegian Nynorsk')), ('os', gettext_noop('Ossetic')), ('pa', gettext_noop('Punjabi')), ('pl', gettext_noop('Polish')), ('pt', gettext_noop('Portuguese')), ('pt-br', gettext_noop('Brazilian Portuguese')), ('ro', gettext_noop('Romanian')), ('ru', gettext_noop('Russian')), ('sk', gettext_noop('Slovak')), ('sl', gettext_noop('Slovenian')), ('sq', gettext_noop('Albanian')), ('sr', gettext_noop('Serbian')), ('sr-latn', gettext_noop('Serbian Latin')), ('sv', gettext_noop('Swedish')), ('sw', gettext_noop('Swahili')), ('ta', gettext_noop('Tamil')), ('te', gettext_noop('Telugu')), ('tg', gettext_noop('Tajik')), ('th', gettext_noop('Thai')), ('tk', gettext_noop('Turkmen')), ('tr', gettext_noop('Turkish')), ('tt', gettext_noop('Tatar')), ('udm', gettext_noop('Udmurt')), ('uk', gettext_noop('Ukrainian')), ('ur', gettext_noop('Urdu')), ('uz', gettext_noop('Uzbek')), ('vi', gettext_noop('Vietnamese')), ('zh-hans', gettext_noop('Simplified Chinese')), ('zh-hant', gettext_noop('Traditional Chinese')), ] # Languages using BiDi (right-to-left) layout LANGUAGES_BIDI = ["he", "ar", "ar-dz", "fa", "ur"] # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True LOCALE_PATHS = [] # Settings for language cookie LANGUAGE_COOKIE_NAME = 'django_language' LANGUAGE_COOKIE_AGE = None LANGUAGE_COOKIE_DOMAIN = None LANGUAGE_COOKIE_PATH = '/' LANGUAGE_COOKIE_SECURE = False LANGUAGE_COOKIE_HTTPONLY = False LANGUAGE_COOKIE_SAMESITE = None # If you set this to True, Django will format dates, numbers and calendars # according to user current locale. USE_L10N = False # Not-necessarily-technical managers of the site. They get broken link # notifications and other various emails. MANAGERS = ADMINS # Default charset to use for all HttpResponse objects, if a MIME type isn't # manually specified. It's used to construct the Content-Type header. DEFAULT_CHARSET = 'utf-8' # Email address that error messages come from. SERVER_EMAIL = 'root@localhost' # Database connection info. If left empty, will default to the dummy backend. DATABASES = {} # Classes used to implement DB routing behavior. DATABASE_ROUTERS = [] # The email backend to use. For possible shortcuts see django.core.mail. # The default is to use the SMTP backend. # Third-party backends can be specified by providing a Python path # to a module that defines an EmailBackend class. EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # Host for sending email. EMAIL_HOST = 'localhost' # Port for sending email. EMAIL_PORT = 25 # Whether to send SMTP 'Date' header in the local time zone or in UTC. EMAIL_USE_LOCALTIME = False # Optional SMTP authentication information for EMAIL_HOST. EMAIL_HOST_USER = '' EMAIL_HOST_PASSWORD = '' EMAIL_USE_TLS = False EMAIL_USE_SSL = False EMAIL_SSL_CERTFILE = None EMAIL_SSL_KEYFILE = None EMAIL_TIMEOUT = None # List of strings representing installed apps. INSTALLED_APPS = [] TEMPLATES = [] # Default form rendering class. FORM_RENDERER = 'django.forms.renderers.DjangoTemplates' # Default email address to use for various automated correspondence from # the site managers. DEFAULT_FROM_EMAIL = 'webmaster@localhost' # Subject-line prefix for email messages send with django.core.mail.mail_admins # or ...mail_managers. Make sure to include the trailing space. EMAIL_SUBJECT_PREFIX = '[Django] ' # Whether to append trailing slashes to URLs. APPEND_SLASH = True # Whether to prepend the "www." subdomain to URLs that don't have it. PREPEND_WWW = False # Override the server-derived value of SCRIPT_NAME FORCE_SCRIPT_NAME = None # List of compiled regular expression objects representing User-Agent strings # that are not allowed to visit any page, systemwide. Use this for bad # robots/crawlers. Here are a few examples: # import re # DISALLOWED_USER_AGENTS = [ # re.compile(r'^NaverBot.*'), # re.compile(r'^EmailSiphon.*'), # re.compile(r'^SiteSucker.*'), # re.compile(r'^sohu-search'), # ] DISALLOWED_USER_AGENTS = [] ABSOLUTE_URL_OVERRIDES = {} # List of compiled regular expression objects representing URLs that need not # be reported by BrokenLinkEmailsMiddleware. Here are a few examples: # import re # IGNORABLE_404_URLS = [ # re.compile(r'^/apple-touch-icon.*\.png$'), # re.compile(r'^/favicon.ico$'), # re.compile(r'^/robots.txt$'), # re.compile(r'^/phpmyadmin/'), # re.compile(r'\.(cgi|php|pl)$'), # ] IGNORABLE_404_URLS = [] # A secret key for this particular Django installation. Used in secret-key # hashing algorithms. Set this in your settings, or Django will complain # loudly. SECRET_KEY = '' # Default file storage mechanism that holds media. DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage' # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. # Examples: "http://example.com/media/", "http://media.example.com/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Example: "/var/www/example.com/static/" STATIC_ROOT = None # URL that handles the static files served from STATIC_ROOT. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = None # List of upload handler classes to be applied in order. FILE_UPLOAD_HANDLERS = [ 'django.core.files.uploadhandler.MemoryFileUploadHandler', 'django.core.files.uploadhandler.TemporaryFileUploadHandler', ] # Maximum size, in bytes, of a request before it will be streamed to the # file system instead of into memory. FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB # Maximum size in bytes of request data (excluding file uploads) that will be # read before a SuspiciousOperation (RequestDataTooBig) is raised. DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB # Maximum number of GET/POST parameters that will be read before a # SuspiciousOperation (TooManyFieldsSent) is raised. DATA_UPLOAD_MAX_NUMBER_FIELDS = 1000 # Directory in which upload streamed files will be temporarily saved. A value of # `None` will make Django use the operating system's default temporary directory # (i.e. "/tmp" on *nix systems). FILE_UPLOAD_TEMP_DIR = None # The numeric mode to set newly-uploaded files to. The value should be a mode # you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories. FILE_UPLOAD_PERMISSIONS = 0o644 # The numeric mode to assign to newly-created directories, when uploading files. # The value should be a mode as you'd pass to os.chmod; # see https://docs.python.org/library/os.html#files-and-directories. FILE_UPLOAD_DIRECTORY_PERMISSIONS = None # Python module path where user will place custom format definition. # The directory where this setting is pointing should contain subdirectories # named as the locales, containing a formats.py file # (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use) FORMAT_MODULE_PATH = None # Default formatting for date objects. See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'N j, Y' # Default formatting for datetime objects. See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATETIME_FORMAT = 'N j, Y, P' # Default formatting for time objects. See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date TIME_FORMAT = 'P' # Default formatting for date objects when only the year and month are relevant. # See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date YEAR_MONTH_FORMAT = 'F Y' # Default formatting for date objects when only the month and day are relevant. # See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date MONTH_DAY_FORMAT = 'F j' # Default short formatting for date objects. See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date SHORT_DATE_FORMAT = 'm/d/Y' # Default short formatting for datetime objects. # See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date SHORT_DATETIME_FORMAT = 'm/d/Y P' # Default formats to be used when parsing dates from input boxes, in order # See all available format string here: # https://docs.python.org/library/datetime.html#strftime-behavior # * Note that these format strings are different from the ones to display dates DATE_INPUT_FORMATS = [ '%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06' '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006' '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006' '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006' '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006' ] # Default formats to be used when parsing times from input boxes, in order # See all available format string here: # https://docs.python.org/library/datetime.html#strftime-behavior # * Note that these format strings are different from the ones to display dates TIME_INPUT_FORMATS = [ '%H:%M:%S', # '14:30:59' '%H:%M:%S.%f', # '14:30:59.000200' '%H:%M', # '14:30' ] # Default formats to be used when parsing dates and times from input boxes, # in order # See all available format string here: # https://docs.python.org/library/datetime.html#strftime-behavior # * Note that these format strings are different from the ones to display dates DATETIME_INPUT_FORMATS = [ '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59' '%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200' '%m/%d/%Y %H:%M', # '10/25/2006 14:30' '%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59' '%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200' '%m/%d/%y %H:%M', # '10/25/06 14:30' ] # First day of week, to be used on calendars # 0 means Sunday, 1 means Monday... FIRST_DAY_OF_WEEK = 0 # Decimal separator symbol DECIMAL_SEPARATOR = '.' # Boolean that sets whether to add thousand separator when formatting numbers USE_THOUSAND_SEPARATOR = False # Number of digits that will be together, when splitting them by # THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands... NUMBER_GROUPING = 0 # Thousand separator symbol THOUSAND_SEPARATOR = ',' # The tablespaces to use for each model when not specified otherwise. DEFAULT_TABLESPACE = '' DEFAULT_INDEX_TABLESPACE = '' # Default X-Frame-Options header value X_FRAME_OPTIONS = 'DENY' USE_X_FORWARDED_HOST = False USE_X_FORWARDED_PORT = False # The Python dotted path to the WSGI application that Django's internal server # (runserver) will use. If `None`, the return value of # 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same # behavior as previous versions of Django. Otherwise this should point to an # actual WSGI application object. WSGI_APPLICATION = None # If your Django app is behind a proxy that sets a header to specify secure # connections, AND that proxy ensures that user-submitted headers with the # same name are ignored (so that people can't spoof it), set this value to # a tuple of (header_name, header_value). For any requests that come in with # that header/value, request.is_secure() will return True. # WARNING! Only set this if you fully understand what you're doing. Otherwise, # you may be opening yourself up to a security risk. SECURE_PROXY_SSL_HEADER = None ############## # MIDDLEWARE # ############## # List of middleware to use. Order is important; in the request phase, these # middleware will be applied in the order given, and in the response # phase the middleware will be applied in reverse order. MIDDLEWARE = [] ############ # SESSIONS # ############ # Cache to store session data if using the cache session backend. SESSION_CACHE_ALIAS = 'default' # Cookie name. This can be whatever you want. SESSION_COOKIE_NAME = 'sessionid' # Age of cookie, in seconds (default: 2 weeks). SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # A string like "example.com", or None for standard domain cookie. SESSION_COOKIE_DOMAIN = None # Whether the session cookie should be secure (https:// only). SESSION_COOKIE_SECURE = False # The path of the session cookie. SESSION_COOKIE_PATH = '/' # Whether to use the HttpOnly flag. SESSION_COOKIE_HTTPONLY = True # Whether to set the flag restricting cookie leaks on cross-site requests. # This can be 'Lax', 'Strict', or None to disable the flag. SESSION_COOKIE_SAMESITE = 'Lax' # Whether to save the session data on every request. SESSION_SAVE_EVERY_REQUEST = False # Whether a user's session cookie expires when the Web browser is closed. SESSION_EXPIRE_AT_BROWSER_CLOSE = False # The module to store session data SESSION_ENGINE = 'django.contrib.sessions.backends.db' # Directory to store session files if using the file session module. If None, # the backend will use a sensible default. SESSION_FILE_PATH = None # class to serialize session data SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer' ######### # CACHE # ######### # The cache backends to use. CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', } } CACHE_MIDDLEWARE_KEY_PREFIX = '' CACHE_MIDDLEWARE_SECONDS = 600 CACHE_MIDDLEWARE_ALIAS = 'default' ################## # AUTHENTICATION # ################## AUTH_USER_MODEL = 'auth.User' AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend'] LOGIN_URL = '/accounts/login/' LOGIN_REDIRECT_URL = '/accounts/profile/' LOGOUT_REDIRECT_URL = None # The number of days a password reset link is valid for PASSWORD_RESET_TIMEOUT_DAYS = 3 # The number of seconds a password reset link is valid for (default: 3 days). PASSWORD_RESET_TIMEOUT = 60 * 60 * 24 * 3 # the first hasher in this list is the preferred algorithm. any # password using different algorithms will be converted automatically # upon login PASSWORD_HASHERS = [ 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher', 'django.contrib.auth.hashers.Argon2PasswordHasher', 'django.contrib.auth.hashers.BCryptSHA256PasswordHasher', ] AUTH_PASSWORD_VALIDATORS = [] ########### # SIGNING # ########### SIGNING_BACKEND = 'django.core.signing.TimestampSigner' ######## # CSRF # ######## # Dotted path to callable to be used as view when a request is # rejected by the CSRF middleware. CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure' # Settings for CSRF cookie. CSRF_COOKIE_NAME = 'csrftoken' CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52 CSRF_COOKIE_DOMAIN = None CSRF_COOKIE_PATH = '/' CSRF_COOKIE_SECURE = False CSRF_COOKIE_HTTPONLY = False CSRF_COOKIE_SAMESITE = 'Lax' CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN' CSRF_TRUSTED_ORIGINS = [] CSRF_USE_SESSIONS = False ############ # MESSAGES # ############ # Class to use as messages backend MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage' # Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within # django.contrib.messages to avoid imports in this settings file. ########### # LOGGING # ########### # The callable to use to configure logging LOGGING_CONFIG = 'logging.config.dictConfig' # Custom logging configuration. LOGGING = {} # Default exception reporter class used in case none has been # specifically assigned to the HttpRequest instance. DEFAULT_EXCEPTION_REPORTER = 'django.views.debug.ExceptionReporter' # Default exception reporter filter class used in case none has been # specifically assigned to the HttpRequest instance. DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter' ########### # TESTING # ########### # The name of the class to use to run the test suite TEST_RUNNER = 'django.test.runner.DiscoverRunner' # Apps that don't need to be serialized at test database creation time # (only apps with migrations are to start with) TEST_NON_SERIALIZED_APPS = [] ############ # FIXTURES # ############ # The list of directories to search for fixtures FIXTURE_DIRS = [] ############### # STATICFILES # ############### # A list of locations of additional static files STATICFILES_DIRS = [] # The default file storage backend used during the build process STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage' # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = [ 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ] ############## # MIGRATIONS # ############## # Migration module overrides for apps, by app label. MIGRATION_MODULES = {} ################# # SYSTEM CHECKS # ################# # List of all issues generated by system checks that should be silenced. Light # issues like warnings, infos or debugs will not generate a message. Silencing # serious issues like errors and criticals does not result in hiding the # message, but Django will not stop you from e.g. running server. SILENCED_SYSTEM_CHECKS = [] ####################### # SECURITY MIDDLEWARE # ####################### SECURE_BROWSER_XSS_FILTER = False SECURE_CONTENT_TYPE_NOSNIFF = True SECURE_HSTS_INCLUDE_SUBDOMAINS = False SECURE_HSTS_PRELOAD = False SECURE_HSTS_SECONDS = 0 SECURE_REDIRECT_EXEMPT = [] SECURE_REFERRER_POLICY = 'same-origin' SECURE_SSL_HOST = None SECURE_SSL_REDIRECT = False
2961e2aa9c2c0a5bfaae3b5f14bcd7e92ef732cda167d5183c0fe3647d04cba7
""" HTML Widget classes """ import copy import datetime import warnings from collections import defaultdict from itertools import chain from django.forms.utils import to_current_timezone from django.templatetags.static import static from django.utils import datetime_safe, formats from django.utils.datastructures import OrderedSet from django.utils.dates import MONTHS from django.utils.formats import get_format from django.utils.html import format_html, html_safe from django.utils.regex_helper import _lazy_re_compile from django.utils.safestring import mark_safe from django.utils.topological_sort import ( CyclicDependencyError, stable_topological_sort, ) from django.utils.translation import gettext_lazy as _ from .renderers import get_default_renderer __all__ = ( 'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'NumberInput', 'EmailInput', 'URLInput', 'PasswordInput', 'HiddenInput', 'MultipleHiddenInput', 'FileInput', 'ClearableFileInput', 'Textarea', 'DateInput', 'DateTimeInput', 'TimeInput', 'CheckboxInput', 'Select', 'NullBooleanSelect', 'SelectMultiple', 'RadioSelect', 'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget', 'SplitHiddenDateTimeWidget', 'SelectDateWidget', ) MEDIA_TYPES = ('css', 'js') class MediaOrderConflictWarning(RuntimeWarning): pass @html_safe class Media: def __init__(self, media=None, css=None, js=None): if media is not None: css = getattr(media, 'css', {}) js = getattr(media, 'js', []) else: if css is None: css = {} if js is None: js = [] self._css_lists = [css] self._js_lists = [js] def __repr__(self): return 'Media(css=%r, js=%r)' % (self._css, self._js) def __str__(self): return self.render() @property def _css(self): css = defaultdict(list) for css_list in self._css_lists: for medium, sublist in css_list.items(): css[medium].append(sublist) return {medium: self.merge(*lists) for medium, lists in css.items()} @property def _js(self): return self.merge(*self._js_lists) def render(self): return mark_safe('\n'.join(chain.from_iterable(getattr(self, 'render_' + name)() for name in MEDIA_TYPES))) def render_js(self): return [ format_html( '<script src="{}"></script>', self.absolute_path(path) ) for path in self._js ] def render_css(self): # To keep rendering order consistent, we can't just iterate over items(). # We need to sort the keys, and iterate over the sorted list. media = sorted(self._css) return chain.from_iterable([ format_html( '<link href="{}" type="text/css" media="{}" rel="stylesheet">', self.absolute_path(path), medium ) for path in self._css[medium] ] for medium in media) def absolute_path(self, path): """ Given a relative or absolute path to a static asset, return an absolute path. An absolute path will be returned unchanged while a relative path will be passed to django.templatetags.static.static(). """ if path.startswith(('http://', 'https://', '/')): return path return static(path) def __getitem__(self, name): """Return a Media object that only contains media of the given type.""" if name in MEDIA_TYPES: return Media(**{str(name): getattr(self, '_' + name)}) raise KeyError('Unknown media type "%s"' % name) @staticmethod def merge(*lists): """ Merge lists while trying to keep the relative order of the elements. Warn if the lists have the same elements in a different relative order. For static assets it can be important to have them included in the DOM in a certain order. In JavaScript you may not be able to reference a global or in CSS you might want to override a style. """ dependency_graph = defaultdict(set) all_items = OrderedSet() for list_ in filter(None, lists): head = list_[0] # The first items depend on nothing but have to be part of the # dependency graph to be included in the result. dependency_graph.setdefault(head, set()) for item in list_: all_items.add(item) # No self dependencies if head != item: dependency_graph[item].add(head) head = item try: return stable_topological_sort(all_items, dependency_graph) except CyclicDependencyError: warnings.warn( 'Detected duplicate Media files in an opposite order: {}'.format( ', '.join(repr(list_) for list_ in lists) ), MediaOrderConflictWarning, ) return list(all_items) def __add__(self, other): combined = Media() combined._css_lists = self._css_lists + other._css_lists combined._js_lists = self._js_lists + other._js_lists return combined def media_property(cls): def _media(self): # Get the media property of the superclass, if it exists sup_cls = super(cls, self) try: base = sup_cls.media except AttributeError: base = Media() # Get the media definition for this class definition = getattr(cls, 'Media', None) if definition: extend = getattr(definition, 'extend', True) if extend: if extend is True: m = base else: m = Media() for medium in extend: m = m + base[medium] return m + Media(definition) return Media(definition) return base return property(_media) class MediaDefiningClass(type): """ Metaclass for classes that can have media definitions. """ def __new__(mcs, name, bases, attrs): new_class = super().__new__(mcs, name, bases, attrs) if 'media' not in attrs: new_class.media = media_property(new_class) return new_class class Widget(metaclass=MediaDefiningClass): needs_multipart_form = False # Determines does this widget need multipart form is_localized = False is_required = False supports_microseconds = True def __init__(self, attrs=None): self.attrs = {} if attrs is None else attrs.copy() def __deepcopy__(self, memo): obj = copy.copy(self) obj.attrs = self.attrs.copy() memo[id(self)] = obj return obj @property def is_hidden(self): return self.input_type == 'hidden' if hasattr(self, 'input_type') else False def subwidgets(self, name, value, attrs=None): context = self.get_context(name, value, attrs) yield context['widget'] def format_value(self, value): """ Return a value as it should appear when rendered in a template. """ if value == '' or value is None: return None if self.is_localized: return formats.localize_input(value) return str(value) def get_context(self, name, value, attrs): return { 'widget': { 'name': name, 'is_hidden': self.is_hidden, 'required': self.is_required, 'value': self.format_value(value), 'attrs': self.build_attrs(self.attrs, attrs), 'template_name': self.template_name, }, } def render(self, name, value, attrs=None, renderer=None): """Render the widget as an HTML string.""" context = self.get_context(name, value, attrs) return self._render(self.template_name, context, renderer) def _render(self, template_name, context, renderer=None): if renderer is None: renderer = get_default_renderer() return mark_safe(renderer.render(template_name, context)) def build_attrs(self, base_attrs, extra_attrs=None): """Build an attribute dictionary.""" return {**base_attrs, **(extra_attrs or {})} def value_from_datadict(self, data, files, name): """ Given a dictionary of data and this widget's name, return the value of this widget or None if it's not provided. """ return data.get(name) def value_omitted_from_data(self, data, files, name): return name not in data def id_for_label(self, id_): """ Return the HTML ID attribute of this Widget for use by a <label>, given the ID of the field. Return None if no ID is available. This hook is necessary because some widgets have multiple HTML elements and, thus, multiple IDs. In that case, this method should return an ID value that corresponds to the first ID in the widget's tags. """ return id_ def use_required_attribute(self, initial): return not self.is_hidden class Input(Widget): """ Base class for all <input> widgets. """ input_type = None # Subclasses must define this. template_name = 'django/forms/widgets/input.html' def __init__(self, attrs=None): if attrs is not None: attrs = attrs.copy() self.input_type = attrs.pop('type', self.input_type) super().__init__(attrs) def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) context['widget']['type'] = self.input_type return context class TextInput(Input): input_type = 'text' template_name = 'django/forms/widgets/text.html' class NumberInput(Input): input_type = 'number' template_name = 'django/forms/widgets/number.html' class EmailInput(Input): input_type = 'email' template_name = 'django/forms/widgets/email.html' class URLInput(Input): input_type = 'url' template_name = 'django/forms/widgets/url.html' class PasswordInput(Input): input_type = 'password' template_name = 'django/forms/widgets/password.html' def __init__(self, attrs=None, render_value=False): super().__init__(attrs) self.render_value = render_value def get_context(self, name, value, attrs): if not self.render_value: value = None return super().get_context(name, value, attrs) class HiddenInput(Input): input_type = 'hidden' template_name = 'django/forms/widgets/hidden.html' class MultipleHiddenInput(HiddenInput): """ Handle <input type="hidden"> for fields that have a list of values. """ template_name = 'django/forms/widgets/multiple_hidden.html' def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) final_attrs = context['widget']['attrs'] id_ = context['widget']['attrs'].get('id') subwidgets = [] for index, value_ in enumerate(context['widget']['value']): widget_attrs = final_attrs.copy() if id_: # An ID attribute was given. Add a numeric index as a suffix # so that the inputs don't all have the same ID attribute. widget_attrs['id'] = '%s_%s' % (id_, index) widget = HiddenInput() widget.is_required = self.is_required subwidgets.append(widget.get_context(name, value_, widget_attrs)['widget']) context['widget']['subwidgets'] = subwidgets return context def value_from_datadict(self, data, files, name): try: getter = data.getlist except AttributeError: getter = data.get return getter(name) def format_value(self, value): return [] if value is None else value class FileInput(Input): input_type = 'file' needs_multipart_form = True template_name = 'django/forms/widgets/file.html' def format_value(self, value): """File input never renders a value.""" return def value_from_datadict(self, data, files, name): "File widgets take data from FILES, not POST" return files.get(name) def value_omitted_from_data(self, data, files, name): return name not in files def use_required_attribute(self, initial): return super().use_required_attribute(initial) and not initial FILE_INPUT_CONTRADICTION = object() class ClearableFileInput(FileInput): clear_checkbox_label = _('Clear') initial_text = _('Currently') input_text = _('Change') template_name = 'django/forms/widgets/clearable_file_input.html' def clear_checkbox_name(self, name): """ Given the name of the file input, return the name of the clear checkbox input. """ return name + '-clear' def clear_checkbox_id(self, name): """ Given the name of the clear checkbox input, return the HTML id for it. """ return name + '_id' def is_initial(self, value): """ Return whether value is considered to be initial value. """ return bool(value and getattr(value, 'url', False)) def format_value(self, value): """ Return the file object if it has a defined url attribute. """ if self.is_initial(value): return value def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) checkbox_name = self.clear_checkbox_name(name) checkbox_id = self.clear_checkbox_id(checkbox_name) context['widget'].update({ 'checkbox_name': checkbox_name, 'checkbox_id': checkbox_id, 'is_initial': self.is_initial(value), 'input_text': self.input_text, 'initial_text': self.initial_text, 'clear_checkbox_label': self.clear_checkbox_label, }) return context def value_from_datadict(self, data, files, name): upload = super().value_from_datadict(data, files, name) if not self.is_required and CheckboxInput().value_from_datadict( data, files, self.clear_checkbox_name(name)): if upload: # If the user contradicts themselves (uploads a new file AND # checks the "clear" checkbox), we return a unique marker # object that FileField will turn into a ValidationError. return FILE_INPUT_CONTRADICTION # False signals to clear any existing value, as opposed to just None return False return upload def value_omitted_from_data(self, data, files, name): return ( super().value_omitted_from_data(data, files, name) and self.clear_checkbox_name(name) not in data ) class Textarea(Widget): template_name = 'django/forms/widgets/textarea.html' def __init__(self, attrs=None): # Use slightly better defaults than HTML's 20x2 box default_attrs = {'cols': '40', 'rows': '10'} if attrs: default_attrs.update(attrs) super().__init__(default_attrs) class DateTimeBaseInput(TextInput): format_key = '' supports_microseconds = False def __init__(self, attrs=None, format=None): super().__init__(attrs) self.format = format or None def format_value(self, value): return formats.localize_input(value, self.format or formats.get_format(self.format_key)[0]) class DateInput(DateTimeBaseInput): format_key = 'DATE_INPUT_FORMATS' template_name = 'django/forms/widgets/date.html' class DateTimeInput(DateTimeBaseInput): format_key = 'DATETIME_INPUT_FORMATS' template_name = 'django/forms/widgets/datetime.html' class TimeInput(DateTimeBaseInput): format_key = 'TIME_INPUT_FORMATS' template_name = 'django/forms/widgets/time.html' # Defined at module level so that CheckboxInput is picklable (#17976) def boolean_check(v): return not (v is False or v is None or v == '') class CheckboxInput(Input): input_type = 'checkbox' template_name = 'django/forms/widgets/checkbox.html' def __init__(self, attrs=None, check_test=None): super().__init__(attrs) # check_test is a callable that takes a value and returns True # if the checkbox should be checked for that value. self.check_test = boolean_check if check_test is None else check_test def format_value(self, value): """Only return the 'value' attribute if value isn't empty.""" if value is True or value is False or value is None or value == '': return return str(value) def get_context(self, name, value, attrs): if self.check_test(value): attrs = {**(attrs or {}), 'checked': True} return super().get_context(name, value, attrs) def value_from_datadict(self, data, files, name): if name not in data: # A missing value means False because HTML form submission does not # send results for unselected checkboxes. return False value = data.get(name) # Translate true and false strings to boolean values. values = {'true': True, 'false': False} if isinstance(value, str): value = values.get(value.lower(), value) return bool(value) def value_omitted_from_data(self, data, files, name): # HTML checkboxes don't appear in POST data if not checked, so it's # never known if the value is actually omitted. return False class ChoiceWidget(Widget): allow_multiple_selected = False input_type = None template_name = None option_template_name = None add_id_index = True checked_attribute = {'checked': True} option_inherits_attrs = True def __init__(self, attrs=None, choices=()): super().__init__(attrs) # choices can be any iterable, but we may need to render this widget # multiple times. Thus, collapse it into a list so it can be consumed # more than once. self.choices = list(choices) def __deepcopy__(self, memo): obj = copy.copy(self) obj.attrs = self.attrs.copy() obj.choices = copy.copy(self.choices) memo[id(self)] = obj return obj def subwidgets(self, name, value, attrs=None): """ Yield all "subwidgets" of this widget. Used to enable iterating options from a BoundField for choice widgets. """ value = self.format_value(value) yield from self.options(name, value, attrs) def options(self, name, value, attrs=None): """Yield a flat list of options for this widgets.""" for group in self.optgroups(name, value, attrs): yield from group[1] def optgroups(self, name, value, attrs=None): """Return a list of optgroups for this widget.""" groups = [] has_selected = False for index, (option_value, option_label) in enumerate(self.choices): if option_value is None: option_value = '' subgroup = [] if isinstance(option_label, (list, tuple)): group_name = option_value subindex = 0 choices = option_label else: group_name = None subindex = None choices = [(option_value, option_label)] groups.append((group_name, subgroup, index)) for subvalue, sublabel in choices: selected = ( str(subvalue) in value and (not has_selected or self.allow_multiple_selected) ) has_selected |= selected subgroup.append(self.create_option( name, subvalue, sublabel, selected, index, subindex=subindex, attrs=attrs, )) if subindex is not None: subindex += 1 return groups def create_option(self, name, value, label, selected, index, subindex=None, attrs=None): index = str(index) if subindex is None else "%s_%s" % (index, subindex) if attrs is None: attrs = {} option_attrs = self.build_attrs(self.attrs, attrs) if self.option_inherits_attrs else {} if selected: option_attrs.update(self.checked_attribute) if 'id' in option_attrs: option_attrs['id'] = self.id_for_label(option_attrs['id'], index) return { 'name': name, 'value': value, 'label': label, 'selected': selected, 'index': index, 'attrs': option_attrs, 'type': self.input_type, 'template_name': self.option_template_name, 'wrap_label': True, } def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) context['widget']['optgroups'] = self.optgroups(name, context['widget']['value'], attrs) return context def id_for_label(self, id_, index='0'): """ Use an incremented id for each option where the main widget references the zero index. """ if id_ and self.add_id_index: id_ = '%s_%s' % (id_, index) return id_ def value_from_datadict(self, data, files, name): getter = data.get if self.allow_multiple_selected: try: getter = data.getlist except AttributeError: pass return getter(name) def format_value(self, value): """Return selected values as a list.""" if value is None and self.allow_multiple_selected: return [] if not isinstance(value, (tuple, list)): value = [value] return [str(v) if v is not None else '' for v in value] class Select(ChoiceWidget): input_type = 'select' template_name = 'django/forms/widgets/select.html' option_template_name = 'django/forms/widgets/select_option.html' add_id_index = False checked_attribute = {'selected': True} option_inherits_attrs = False def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) if self.allow_multiple_selected: context['widget']['attrs']['multiple'] = True return context @staticmethod def _choice_has_empty_value(choice): """Return True if the choice's value is empty string or None.""" value, _ = choice return value is None or value == '' def use_required_attribute(self, initial): """ Don't render 'required' if the first <option> has a value, as that's invalid HTML. """ use_required_attribute = super().use_required_attribute(initial) # 'required' is always okay for <select multiple>. if self.allow_multiple_selected: return use_required_attribute first_choice = next(iter(self.choices), None) return use_required_attribute and first_choice is not None and self._choice_has_empty_value(first_choice) class NullBooleanSelect(Select): """ A Select Widget intended to be used with NullBooleanField. """ def __init__(self, attrs=None): choices = ( ('unknown', _('Unknown')), ('true', _('Yes')), ('false', _('No')), ) super().__init__(attrs, choices) def format_value(self, value): try: return { True: 'true', False: 'false', 'true': 'true', 'false': 'false', # For backwards compatibility with Django < 2.2. '2': 'true', '3': 'false', }[value] except KeyError: return 'unknown' def value_from_datadict(self, data, files, name): value = data.get(name) return { True: True, 'True': True, 'False': False, False: False, 'true': True, 'false': False, # For backwards compatibility with Django < 2.2. '2': True, '3': False, }.get(value) class SelectMultiple(Select): allow_multiple_selected = True def value_from_datadict(self, data, files, name): try: getter = data.getlist except AttributeError: getter = data.get return getter(name) def value_omitted_from_data(self, data, files, name): # An unselected <select multiple> doesn't appear in POST data, so it's # never known if the value is actually omitted. return False class RadioSelect(ChoiceWidget): input_type = 'radio' template_name = 'django/forms/widgets/radio.html' option_template_name = 'django/forms/widgets/radio_option.html' class CheckboxSelectMultiple(ChoiceWidget): allow_multiple_selected = True input_type = 'checkbox' template_name = 'django/forms/widgets/checkbox_select.html' option_template_name = 'django/forms/widgets/checkbox_option.html' def use_required_attribute(self, initial): # Don't use the 'required' attribute because browser validation would # require all checkboxes to be checked instead of at least one. return False def value_omitted_from_data(self, data, files, name): # HTML checkboxes don't appear in POST data if not checked, so it's # never known if the value is actually omitted. return False def id_for_label(self, id_, index=None): """" Don't include for="field_0" in <label> because clicking such a label would toggle the first checkbox. """ if index is None: return '' return super().id_for_label(id_, index) class MultiWidget(Widget): """ A widget that is composed of multiple widgets. In addition to the values added by Widget.get_context(), this widget adds a list of subwidgets to the context as widget['subwidgets']. These can be looped over and rendered like normal widgets. You'll probably want to use this class with MultiValueField. """ template_name = 'django/forms/widgets/multiwidget.html' def __init__(self, widgets, attrs=None): if isinstance(widgets, dict): self.widgets_names = [ ('_%s' % name) if name else '' for name in widgets ] widgets = widgets.values() else: self.widgets_names = ['_%s' % i for i in range(len(widgets))] self.widgets = [w() if isinstance(w, type) else w for w in widgets] super().__init__(attrs) @property def is_hidden(self): return all(w.is_hidden for w in self.widgets) def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) if self.is_localized: for widget in self.widgets: widget.is_localized = self.is_localized # value is a list of values, each corresponding to a widget # in self.widgets. if not isinstance(value, list): value = self.decompress(value) final_attrs = context['widget']['attrs'] input_type = final_attrs.pop('type', None) id_ = final_attrs.get('id') subwidgets = [] for i, (widget_name, widget) in enumerate(zip(self.widgets_names, self.widgets)): if input_type is not None: widget.input_type = input_type widget_name = name + widget_name try: widget_value = value[i] except IndexError: widget_value = None if id_: widget_attrs = final_attrs.copy() widget_attrs['id'] = '%s_%s' % (id_, i) else: widget_attrs = final_attrs subwidgets.append(widget.get_context(widget_name, widget_value, widget_attrs)['widget']) context['widget']['subwidgets'] = subwidgets return context def id_for_label(self, id_): if id_: id_ += '_0' return id_ def value_from_datadict(self, data, files, name): return [ widget.value_from_datadict(data, files, name + widget_name) for widget_name, widget in zip(self.widgets_names, self.widgets) ] def value_omitted_from_data(self, data, files, name): return all( widget.value_omitted_from_data(data, files, name + widget_name) for widget_name, widget in zip(self.widgets_names, self.widgets) ) def decompress(self, value): """ Return a list of decompressed values for the given compressed value. The given value can be assumed to be valid, but not necessarily non-empty. """ raise NotImplementedError('Subclasses must implement this method.') def _get_media(self): """ Media for a multiwidget is the combination of all media of the subwidgets. """ media = Media() for w in self.widgets: media = media + w.media return media media = property(_get_media) def __deepcopy__(self, memo): obj = super().__deepcopy__(memo) obj.widgets = copy.deepcopy(self.widgets) return obj @property def needs_multipart_form(self): return any(w.needs_multipart_form for w in self.widgets) class SplitDateTimeWidget(MultiWidget): """ A widget that splits datetime input into two <input type="text"> boxes. """ supports_microseconds = False template_name = 'django/forms/widgets/splitdatetime.html' def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None): widgets = ( DateInput( attrs=attrs if date_attrs is None else date_attrs, format=date_format, ), TimeInput( attrs=attrs if time_attrs is None else time_attrs, format=time_format, ), ) super().__init__(widgets) def decompress(self, value): if value: value = to_current_timezone(value) return [value.date(), value.time()] return [None, None] class SplitHiddenDateTimeWidget(SplitDateTimeWidget): """ A widget that splits datetime input into two <input type="hidden"> inputs. """ template_name = 'django/forms/widgets/splithiddendatetime.html' def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None): super().__init__(attrs, date_format, time_format, date_attrs, time_attrs) for widget in self.widgets: widget.input_type = 'hidden' class SelectDateWidget(Widget): """ A widget that splits date input into three <select> boxes. This also serves as an example of a Widget that has more than one HTML element and hence implements value_from_datadict. """ none_value = ('', '---') month_field = '%s_month' day_field = '%s_day' year_field = '%s_year' template_name = 'django/forms/widgets/select_date.html' input_type = 'select' select_widget = Select date_re = _lazy_re_compile(r'(\d{4}|0)-(\d\d?)-(\d\d?)$') def __init__(self, attrs=None, years=None, months=None, empty_label=None): self.attrs = attrs or {} # Optional list or tuple of years to use in the "year" select box. if years: self.years = years else: this_year = datetime.date.today().year self.years = range(this_year, this_year + 10) # Optional dict of months to use in the "month" select box. if months: self.months = months else: self.months = MONTHS # Optional string, list, or tuple to use as empty_label. if isinstance(empty_label, (list, tuple)): if not len(empty_label) == 3: raise ValueError('empty_label list/tuple must have 3 elements.') self.year_none_value = ('', empty_label[0]) self.month_none_value = ('', empty_label[1]) self.day_none_value = ('', empty_label[2]) else: if empty_label is not None: self.none_value = ('', empty_label) self.year_none_value = self.none_value self.month_none_value = self.none_value self.day_none_value = self.none_value def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) date_context = {} year_choices = [(i, str(i)) for i in self.years] if not self.is_required: year_choices.insert(0, self.year_none_value) year_name = self.year_field % name date_context['year'] = self.select_widget(attrs, choices=year_choices).get_context( name=year_name, value=context['widget']['value']['year'], attrs={**context['widget']['attrs'], 'id': 'id_%s' % year_name}, ) month_choices = list(self.months.items()) if not self.is_required: month_choices.insert(0, self.month_none_value) month_name = self.month_field % name date_context['month'] = self.select_widget(attrs, choices=month_choices).get_context( name=month_name, value=context['widget']['value']['month'], attrs={**context['widget']['attrs'], 'id': 'id_%s' % month_name}, ) day_choices = [(i, i) for i in range(1, 32)] if not self.is_required: day_choices.insert(0, self.day_none_value) day_name = self.day_field % name date_context['day'] = self.select_widget(attrs, choices=day_choices,).get_context( name=day_name, value=context['widget']['value']['day'], attrs={**context['widget']['attrs'], 'id': 'id_%s' % day_name}, ) subwidgets = [] for field in self._parse_date_fmt(): subwidgets.append(date_context[field]['widget']) context['widget']['subwidgets'] = subwidgets return context def format_value(self, value): """ Return a dict containing the year, month, and day of the current value. Use dict instead of a datetime to allow invalid dates such as February 31 to display correctly. """ year, month, day = None, None, None if isinstance(value, (datetime.date, datetime.datetime)): year, month, day = value.year, value.month, value.day elif isinstance(value, str): match = self.date_re.match(value) if match: # Convert any zeros in the date to empty strings to match the # empty option value. year, month, day = [int(val) or '' for val in match.groups()] else: input_format = get_format('DATE_INPUT_FORMATS')[0] try: d = datetime.datetime.strptime(value, input_format) except ValueError: pass else: year, month, day = d.year, d.month, d.day return {'year': year, 'month': month, 'day': day} @staticmethod def _parse_date_fmt(): fmt = get_format('DATE_FORMAT') escaped = False for char in fmt: if escaped: escaped = False elif char == '\\': escaped = True elif char in 'Yy': yield 'year' elif char in 'bEFMmNn': yield 'month' elif char in 'dj': yield 'day' def id_for_label(self, id_): for first_select in self._parse_date_fmt(): return '%s_%s' % (id_, first_select) return '%s_month' % id_ def value_from_datadict(self, data, files, name): y = data.get(self.year_field % name) m = data.get(self.month_field % name) d = data.get(self.day_field % name) if y == m == d == '': return None if y is not None and m is not None and d is not None: input_format = get_format('DATE_INPUT_FORMATS')[0] try: date_value = datetime.date(int(y), int(m), int(d)) except ValueError: # Return pseudo-ISO dates with zeros for any unselected values, # e.g. '2017-0-23'. return '%s-%s-%s' % (y or 0, m or 0, d or 0) date_value = datetime_safe.new_date(date_value) return date_value.strftime(input_format) return data.get(name) def value_omitted_from_data(self, data, files, name): return not any( ('{}_{}'.format(name, interval) in data) for interval in ('year', 'month', 'day') )
a7c63d224340c4a7e7694c10d4e15491621c88c8ddd579ae0d79b512460fb986
""" LANG_INFO is a dictionary structure to provide meta information about languages. About name_local: capitalize it as if your language name was appearing inside a sentence in your language. The 'fallback' key can be used to specify a special fallback logic which doesn't follow the traditional 'fr-ca' -> 'fr' fallback logic. """ LANG_INFO = { 'af': { 'bidi': False, 'code': 'af', 'name': 'Afrikaans', 'name_local': 'Afrikaans', }, 'ar': { 'bidi': True, 'code': 'ar', 'name': 'Arabic', 'name_local': 'العربيّة', }, 'ar-dz': { 'bidi': True, 'code': 'ar-dz', 'name': 'Algerian Arabic', 'name_local': 'العربية الجزائرية', }, 'ast': { 'bidi': False, 'code': 'ast', 'name': 'Asturian', 'name_local': 'asturianu', }, 'az': { 'bidi': True, 'code': 'az', 'name': 'Azerbaijani', 'name_local': 'Azərbaycanca', }, 'be': { 'bidi': False, 'code': 'be', 'name': 'Belarusian', 'name_local': 'беларуская', }, 'bg': { 'bidi': False, 'code': 'bg', 'name': 'Bulgarian', 'name_local': 'български', }, 'bn': { 'bidi': False, 'code': 'bn', 'name': 'Bengali', 'name_local': 'বাংলা', }, 'br': { 'bidi': False, 'code': 'br', 'name': 'Breton', 'name_local': 'brezhoneg', }, 'bs': { 'bidi': False, 'code': 'bs', 'name': 'Bosnian', 'name_local': 'bosanski', }, 'ca': { 'bidi': False, 'code': 'ca', 'name': 'Catalan', 'name_local': 'català', }, 'cs': { 'bidi': False, 'code': 'cs', 'name': 'Czech', 'name_local': 'česky', }, 'cy': { 'bidi': False, 'code': 'cy', 'name': 'Welsh', 'name_local': 'Cymraeg', }, 'da': { 'bidi': False, 'code': 'da', 'name': 'Danish', 'name_local': 'dansk', }, 'de': { 'bidi': False, 'code': 'de', 'name': 'German', 'name_local': 'Deutsch', }, 'dsb': { 'bidi': False, 'code': 'dsb', 'name': 'Lower Sorbian', 'name_local': 'dolnoserbski', }, 'el': { 'bidi': False, 'code': 'el', 'name': 'Greek', 'name_local': 'Ελληνικά', }, 'en': { 'bidi': False, 'code': 'en', 'name': 'English', 'name_local': 'English', }, 'en-au': { 'bidi': False, 'code': 'en-au', 'name': 'Australian English', 'name_local': 'Australian English', }, 'en-gb': { 'bidi': False, 'code': 'en-gb', 'name': 'British English', 'name_local': 'British English', }, 'eo': { 'bidi': False, 'code': 'eo', 'name': 'Esperanto', 'name_local': 'Esperanto', }, 'es': { 'bidi': False, 'code': 'es', 'name': 'Spanish', 'name_local': 'español', }, 'es-ar': { 'bidi': False, 'code': 'es-ar', 'name': 'Argentinian Spanish', 'name_local': 'español de Argentina', }, 'es-co': { 'bidi': False, 'code': 'es-co', 'name': 'Colombian Spanish', 'name_local': 'español de Colombia', }, 'es-mx': { 'bidi': False, 'code': 'es-mx', 'name': 'Mexican Spanish', 'name_local': 'español de Mexico', }, 'es-ni': { 'bidi': False, 'code': 'es-ni', 'name': 'Nicaraguan Spanish', 'name_local': 'español de Nicaragua', }, 'es-ve': { 'bidi': False, 'code': 'es-ve', 'name': 'Venezuelan Spanish', 'name_local': 'español de Venezuela', }, 'et': { 'bidi': False, 'code': 'et', 'name': 'Estonian', 'name_local': 'eesti', }, 'eu': { 'bidi': False, 'code': 'eu', 'name': 'Basque', 'name_local': 'Basque', }, 'fa': { 'bidi': True, 'code': 'fa', 'name': 'Persian', 'name_local': 'فارسی', }, 'fi': { 'bidi': False, 'code': 'fi', 'name': 'Finnish', 'name_local': 'suomi', }, 'fr': { 'bidi': False, 'code': 'fr', 'name': 'French', 'name_local': 'français', }, 'fy': { 'bidi': False, 'code': 'fy', 'name': 'Frisian', 'name_local': 'frysk', }, 'ga': { 'bidi': False, 'code': 'ga', 'name': 'Irish', 'name_local': 'Gaeilge', }, 'gd': { 'bidi': False, 'code': 'gd', 'name': 'Scottish Gaelic', 'name_local': 'Gàidhlig', }, 'gl': { 'bidi': False, 'code': 'gl', 'name': 'Galician', 'name_local': 'galego', }, 'he': { 'bidi': True, 'code': 'he', 'name': 'Hebrew', 'name_local': 'עברית', }, 'hi': { 'bidi': False, 'code': 'hi', 'name': 'Hindi', 'name_local': 'हिंदी', }, 'hr': { 'bidi': False, 'code': 'hr', 'name': 'Croatian', 'name_local': 'Hrvatski', }, 'hsb': { 'bidi': False, 'code': 'hsb', 'name': 'Upper Sorbian', 'name_local': 'hornjoserbsce', }, 'hu': { 'bidi': False, 'code': 'hu', 'name': 'Hungarian', 'name_local': 'Magyar', }, 'hy': { 'bidi': False, 'code': 'hy', 'name': 'Armenian', 'name_local': 'հայերեն', }, 'ia': { 'bidi': False, 'code': 'ia', 'name': 'Interlingua', 'name_local': 'Interlingua', }, 'io': { 'bidi': False, 'code': 'io', 'name': 'Ido', 'name_local': 'ido', }, 'id': { 'bidi': False, 'code': 'id', 'name': 'Indonesian', 'name_local': 'Bahasa Indonesia', }, 'ig': { 'bidi': False, 'code': 'ig', 'name': 'Igbo', 'name_local': 'Asụsụ Ìgbò', }, 'is': { 'bidi': False, 'code': 'is', 'name': 'Icelandic', 'name_local': 'Íslenska', }, 'it': { 'bidi': False, 'code': 'it', 'name': 'Italian', 'name_local': 'italiano', }, 'ja': { 'bidi': False, 'code': 'ja', 'name': 'Japanese', 'name_local': '日本語', }, 'ka': { 'bidi': False, 'code': 'ka', 'name': 'Georgian', 'name_local': 'ქართული', }, 'kab': { 'bidi': False, 'code': 'kab', 'name': 'Kabyle', 'name_local': 'taqbaylit', }, 'kk': { 'bidi': False, 'code': 'kk', 'name': 'Kazakh', 'name_local': 'Қазақ', }, 'km': { 'bidi': False, 'code': 'km', 'name': 'Khmer', 'name_local': 'Khmer', }, 'kn': { 'bidi': False, 'code': 'kn', 'name': 'Kannada', 'name_local': 'Kannada', }, 'ko': { 'bidi': False, 'code': 'ko', 'name': 'Korean', 'name_local': '한국어', }, 'ky': { 'bidi': False, 'code': 'ky', 'name': 'Kyrgyz', 'name_local': 'Кыргызча', }, 'lb': { 'bidi': False, 'code': 'lb', 'name': 'Luxembourgish', 'name_local': 'Lëtzebuergesch', }, 'lt': { 'bidi': False, 'code': 'lt', 'name': 'Lithuanian', 'name_local': 'Lietuviškai', }, 'lv': { 'bidi': False, 'code': 'lv', 'name': 'Latvian', 'name_local': 'latviešu', }, 'mk': { 'bidi': False, 'code': 'mk', 'name': 'Macedonian', 'name_local': 'Македонски', }, 'ml': { 'bidi': False, 'code': 'ml', 'name': 'Malayalam', 'name_local': 'Malayalam', }, 'mn': { 'bidi': False, 'code': 'mn', 'name': 'Mongolian', 'name_local': 'Mongolian', }, 'mr': { 'bidi': False, 'code': 'mr', 'name': 'Marathi', 'name_local': 'मराठी', }, 'my': { 'bidi': False, 'code': 'my', 'name': 'Burmese', 'name_local': 'မြန်မာဘာသာ', }, 'nb': { 'bidi': False, 'code': 'nb', 'name': 'Norwegian Bokmal', 'name_local': 'norsk (bokmål)', }, 'ne': { 'bidi': False, 'code': 'ne', 'name': 'Nepali', 'name_local': 'नेपाली', }, 'nl': { 'bidi': False, 'code': 'nl', 'name': 'Dutch', 'name_local': 'Nederlands', }, 'nn': { 'bidi': False, 'code': 'nn', 'name': 'Norwegian Nynorsk', 'name_local': 'norsk (nynorsk)', }, 'no': { 'bidi': False, 'code': 'no', 'name': 'Norwegian', 'name_local': 'norsk', }, 'os': { 'bidi': False, 'code': 'os', 'name': 'Ossetic', 'name_local': 'Ирон', }, 'pa': { 'bidi': False, 'code': 'pa', 'name': 'Punjabi', 'name_local': 'Punjabi', }, 'pl': { 'bidi': False, 'code': 'pl', 'name': 'Polish', 'name_local': 'polski', }, 'pt': { 'bidi': False, 'code': 'pt', 'name': 'Portuguese', 'name_local': 'Português', }, 'pt-br': { 'bidi': False, 'code': 'pt-br', 'name': 'Brazilian Portuguese', 'name_local': 'Português Brasileiro', }, 'ro': { 'bidi': False, 'code': 'ro', 'name': 'Romanian', 'name_local': 'Română', }, 'ru': { 'bidi': False, 'code': 'ru', 'name': 'Russian', 'name_local': 'Русский', }, 'sk': { 'bidi': False, 'code': 'sk', 'name': 'Slovak', 'name_local': 'Slovensky', }, 'sl': { 'bidi': False, 'code': 'sl', 'name': 'Slovenian', 'name_local': 'Slovenščina', }, 'sq': { 'bidi': False, 'code': 'sq', 'name': 'Albanian', 'name_local': 'shqip', }, 'sr': { 'bidi': False, 'code': 'sr', 'name': 'Serbian', 'name_local': 'српски', }, 'sr-latn': { 'bidi': False, 'code': 'sr-latn', 'name': 'Serbian Latin', 'name_local': 'srpski (latinica)', }, 'sv': { 'bidi': False, 'code': 'sv', 'name': 'Swedish', 'name_local': 'svenska', }, 'sw': { 'bidi': False, 'code': 'sw', 'name': 'Swahili', 'name_local': 'Kiswahili', }, 'ta': { 'bidi': False, 'code': 'ta', 'name': 'Tamil', 'name_local': 'தமிழ்', }, 'te': { 'bidi': False, 'code': 'te', 'name': 'Telugu', 'name_local': 'తెలుగు', }, 'tg': { 'bidi': False, 'code': 'tg', 'name': 'Tajik', 'name_local': 'тоҷикӣ', }, 'th': { 'bidi': False, 'code': 'th', 'name': 'Thai', 'name_local': 'ภาษาไทย', }, 'tk': { 'bidi': False, 'code': 'tk', 'name': 'Turkmen', 'name_local': 'Türkmençe', }, 'tr': { 'bidi': False, 'code': 'tr', 'name': 'Turkish', 'name_local': 'Türkçe', }, 'tt': { 'bidi': False, 'code': 'tt', 'name': 'Tatar', 'name_local': 'Татарча', }, 'udm': { 'bidi': False, 'code': 'udm', 'name': 'Udmurt', 'name_local': 'Удмурт', }, 'uk': { 'bidi': False, 'code': 'uk', 'name': 'Ukrainian', 'name_local': 'Українська', }, 'ur': { 'bidi': True, 'code': 'ur', 'name': 'Urdu', 'name_local': 'اردو', }, 'uz': { 'bidi': False, 'code': 'uz', 'name': 'Uzbek', 'name_local': 'oʻzbek tili', }, 'vi': { 'bidi': False, 'code': 'vi', 'name': 'Vietnamese', 'name_local': 'Tiếng Việt', }, 'zh-cn': { 'fallback': ['zh-hans'], }, 'zh-hans': { 'bidi': False, 'code': 'zh-hans', 'name': 'Simplified Chinese', 'name_local': '简体中文', }, 'zh-hant': { 'bidi': False, 'code': 'zh-hant', 'name': 'Traditional Chinese', 'name_local': '繁體中文', }, 'zh-hk': { 'fallback': ['zh-hant'], }, 'zh-mo': { 'fallback': ['zh-hant'], }, 'zh-my': { 'fallback': ['zh-hans'], }, 'zh-sg': { 'fallback': ['zh-hans'], }, 'zh-tw': { 'fallback': ['zh-hant'], }, }
c7dcdabebf7fe0f70d872dc2b25a1b3f6bd8638784ca31dabcd158210b4cbbf4
# This file is distributed under the same license as the Django package. # # The *_FORMAT strings use the Django date format syntax, # see https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j F Y' TIME_FORMAT = 'P' DATETIME_FORMAT = 'j F Y P' YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j F' SHORT_DATE_FORMAT = 'd.m.Y' SHORT_DATETIME_FORMAT = 'd.m.Y H:i' FIRST_DAY_OF_WEEK = 1 # Monday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see https://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = [ '%d.%m.%Y', # '25.10.2006' '%d.%m.%y', # '25.10.06' ] DATETIME_INPUT_FORMATS = [ '%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59' '%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200' '%d.%m.%Y %H:%M', # '25.10.2006 14:30' '%d.%m.%Y', # '25.10.2006' '%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59' '%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200' '%d.%m.%y %H:%M', # '25.10.06 14:30' '%d.%m.%y', # '25.10.06' ] DECIMAL_SEPARATOR = '.' THOUSAND_SEPARATOR = ',' NUMBER_GROUPING = 3
7a3e43b71998a6ece0bf5ad1754beb3706e42de705d17e2600688389cdd4b952
import pkgutil import sys from importlib import import_module, reload from django.apps import apps from django.conf import settings from django.db.migrations.graph import MigrationGraph from django.db.migrations.recorder import MigrationRecorder from .exceptions import ( AmbiguityError, BadMigrationError, InconsistentMigrationHistory, NodeNotFoundError, ) MIGRATIONS_MODULE_NAME = 'migrations' class MigrationLoader: """ Load migration files from disk and their status from the database. Migration files are expected to live in the "migrations" directory of an app. Their names are entirely unimportant from a code perspective, but will probably follow the 1234_name.py convention. On initialization, this class will scan those directories, and open and read the Python files, looking for a class called Migration, which should inherit from django.db.migrations.Migration. See django.db.migrations.migration for what that looks like. Some migrations will be marked as "replacing" another set of migrations. These are loaded into a separate set of migrations away from the main ones. If all the migrations they replace are either unapplied or missing from disk, then they are injected into the main set, replacing the named migrations. Any dependency pointers to the replaced migrations are re-pointed to the new migration. This does mean that this class MUST also talk to the database as well as to disk, but this is probably fine. We're already not just operating in memory. """ def __init__( self, connection, load=True, ignore_no_migrations=False, replace_migrations=True, ): self.connection = connection self.disk_migrations = None self.applied_migrations = None self.ignore_no_migrations = ignore_no_migrations self.replace_migrations = replace_migrations if load: self.build_graph() @classmethod def migrations_module(cls, app_label): """ Return the path to the migrations module for the specified app_label and a boolean indicating if the module is specified in settings.MIGRATION_MODULE. """ if app_label in settings.MIGRATION_MODULES: return settings.MIGRATION_MODULES[app_label], True else: app_package_name = apps.get_app_config(app_label).name return '%s.%s' % (app_package_name, MIGRATIONS_MODULE_NAME), False def load_disk(self): """Load the migrations from all INSTALLED_APPS from disk.""" self.disk_migrations = {} self.unmigrated_apps = set() self.migrated_apps = set() for app_config in apps.get_app_configs(): # Get the migrations module directory module_name, explicit = self.migrations_module(app_config.label) if module_name is None: self.unmigrated_apps.add(app_config.label) continue was_loaded = module_name in sys.modules try: module = import_module(module_name) except ModuleNotFoundError as e: if ( (explicit and self.ignore_no_migrations) or (not explicit and MIGRATIONS_MODULE_NAME in e.name.split('.')) ): self.unmigrated_apps.add(app_config.label) continue raise else: # Module is not a package (e.g. migrations.py). if not hasattr(module, '__path__'): self.unmigrated_apps.add(app_config.label) continue # Force a reload if it's already loaded (tests need this) if was_loaded: reload(module) migration_names = { name for _, name, is_pkg in pkgutil.iter_modules(module.__path__) if not is_pkg and name[0] not in '_~' } if migration_names or self.ignore_no_migrations: self.migrated_apps.add(app_config.label) else: self.unmigrated_apps.add(app_config.label) # Load migrations for migration_name in migration_names: migration_path = '%s.%s' % (module_name, migration_name) try: migration_module = import_module(migration_path) except ImportError as e: if 'bad magic number' in str(e): raise ImportError( "Couldn't import %r as it appears to be a stale " ".pyc file." % migration_path ) from e else: raise if not hasattr(migration_module, "Migration"): raise BadMigrationError( "Migration %s in app %s has no Migration class" % (migration_name, app_config.label) ) self.disk_migrations[app_config.label, migration_name] = migration_module.Migration( migration_name, app_config.label, ) def get_migration(self, app_label, name_prefix): """Return the named migration or raise NodeNotFoundError.""" return self.graph.nodes[app_label, name_prefix] def get_migration_by_prefix(self, app_label, name_prefix): """ Return the migration(s) which match the given app label and name_prefix. """ # Do the search results = [] for migration_app_label, migration_name in self.disk_migrations: if migration_app_label == app_label and migration_name.startswith(name_prefix): results.append((migration_app_label, migration_name)) if len(results) > 1: raise AmbiguityError( "There is more than one migration for '%s' with the prefix '%s'" % (app_label, name_prefix) ) elif not results: raise KeyError("There no migrations for '%s' with the prefix '%s'" % (app_label, name_prefix)) else: return self.disk_migrations[results[0]] def check_key(self, key, current_app): if (key[1] != "__first__" and key[1] != "__latest__") or key in self.graph: return key # Special-case __first__, which means "the first migration" for # migrated apps, and is ignored for unmigrated apps. It allows # makemigrations to declare dependencies on apps before they even have # migrations. if key[0] == current_app: # Ignore __first__ references to the same app (#22325) return if key[0] in self.unmigrated_apps: # This app isn't migrated, but something depends on it. # The models will get auto-added into the state, though # so we're fine. return if key[0] in self.migrated_apps: try: if key[1] == "__first__": return self.graph.root_nodes(key[0])[0] else: # "__latest__" return self.graph.leaf_nodes(key[0])[0] except IndexError: if self.ignore_no_migrations: return None else: raise ValueError("Dependency on app with no migrations: %s" % key[0]) raise ValueError("Dependency on unknown app: %s" % key[0]) def add_internal_dependencies(self, key, migration): """ Internal dependencies need to be added first to ensure `__first__` dependencies find the correct root node. """ for parent in migration.dependencies: # Ignore __first__ references to the same app. if parent[0] == key[0] and parent[1] != '__first__': self.graph.add_dependency(migration, key, parent, skip_validation=True) def add_external_dependencies(self, key, migration): for parent in migration.dependencies: # Skip internal dependencies if key[0] == parent[0]: continue parent = self.check_key(parent, key[0]) if parent is not None: self.graph.add_dependency(migration, key, parent, skip_validation=True) for child in migration.run_before: child = self.check_key(child, key[0]) if child is not None: self.graph.add_dependency(migration, child, key, skip_validation=True) def build_graph(self): """ Build a migration dependency graph using both the disk and database. You'll need to rebuild the graph if you apply migrations. This isn't usually a problem as generally migration stuff runs in a one-shot process. """ # Load disk data self.load_disk() # Load database data if self.connection is None: self.applied_migrations = {} else: recorder = MigrationRecorder(self.connection) self.applied_migrations = recorder.applied_migrations() # To start, populate the migration graph with nodes for ALL migrations # and their dependencies. Also make note of replacing migrations at this step. self.graph = MigrationGraph() self.replacements = {} for key, migration in self.disk_migrations.items(): self.graph.add_node(key, migration) # Replacing migrations. if migration.replaces: self.replacements[key] = migration for key, migration in self.disk_migrations.items(): # Internal (same app) dependencies. self.add_internal_dependencies(key, migration) # Add external dependencies now that the internal ones have been resolved. for key, migration in self.disk_migrations.items(): self.add_external_dependencies(key, migration) # Carry out replacements where possible and if enabled. if self.replace_migrations: for key, migration in self.replacements.items(): # Get applied status of each of this migration's replacement # targets. applied_statuses = [(target in self.applied_migrations) for target in migration.replaces] # The replacing migration is only marked as applied if all of # its replacement targets are. if all(applied_statuses): self.applied_migrations[key] = migration else: self.applied_migrations.pop(key, None) # A replacing migration can be used if either all or none of # its replacement targets have been applied. if all(applied_statuses) or (not any(applied_statuses)): self.graph.remove_replaced_nodes(key, migration.replaces) else: # This replacing migration cannot be used because it is # partially applied. Remove it from the graph and remap # dependencies to it (#25945). self.graph.remove_replacement_node(key, migration.replaces) # Ensure the graph is consistent. try: self.graph.validate_consistency() except NodeNotFoundError as exc: # Check if the missing node could have been replaced by any squash # migration but wasn't because the squash migration was partially # applied before. In that case raise a more understandable exception # (#23556). # Get reverse replacements. reverse_replacements = {} for key, migration in self.replacements.items(): for replaced in migration.replaces: reverse_replacements.setdefault(replaced, set()).add(key) # Try to reraise exception with more detail. if exc.node in reverse_replacements: candidates = reverse_replacements.get(exc.node, set()) is_replaced = any(candidate in self.graph.nodes for candidate in candidates) if not is_replaced: tries = ', '.join('%s.%s' % c for c in candidates) raise NodeNotFoundError( "Migration {0} depends on nonexistent node ('{1}', '{2}'). " "Django tried to replace migration {1}.{2} with any of [{3}] " "but wasn't able to because some of the replaced migrations " "are already applied.".format( exc.origin, exc.node[0], exc.node[1], tries ), exc.node ) from exc raise self.graph.ensure_not_cyclic() def check_consistent_history(self, connection): """ Raise InconsistentMigrationHistory if any applied migrations have unapplied dependencies. """ recorder = MigrationRecorder(connection) applied = recorder.applied_migrations() for migration in applied: # If the migration is unknown, skip it. if migration not in self.graph.nodes: continue for parent in self.graph.node_map[migration].parents: if parent not in applied: # Skip unapplied squashed migrations that have all of their # `replaces` applied. if parent in self.replacements: if all(m in applied for m in self.replacements[parent].replaces): continue raise InconsistentMigrationHistory( "Migration {}.{} is applied before its dependency " "{}.{} on database '{}'.".format( migration[0], migration[1], parent[0], parent[1], connection.alias, ) ) def detect_conflicts(self): """ Look through the loaded graph and detect any conflicts - apps with more than one leaf migration. Return a dict of the app labels that conflict with the migration names that conflict. """ seen_apps = {} conflicting_apps = set() for app_label, migration_name in self.graph.leaf_nodes(): if app_label in seen_apps: conflicting_apps.add(app_label) seen_apps.setdefault(app_label, set()).add(migration_name) return {app_label: sorted(seen_apps[app_label]) for app_label in conflicting_apps} def project_state(self, nodes=None, at_end=True): """ Return a ProjectState object representing the most recent state that the loaded migrations represent. See graph.make_state() for the meaning of "nodes" and "at_end". """ return self.graph.make_state(nodes=nodes, at_end=at_end, real_apps=list(self.unmigrated_apps)) def collect_sql(self, plan): """ Take a migration plan and return a list of collected SQL statements that represent the best-efforts version of that plan. """ statements = [] state = None for migration, backwards in plan: with self.connection.schema_editor(collect_sql=True, atomic=migration.atomic) as schema_editor: if state is None: state = self.project_state((migration.app_label, migration.name), at_end=False) if not backwards: state = migration.apply(state, schema_editor, collect_sql=True) else: state = migration.unapply(state, schema_editor, collect_sql=True) statements.extend(schema_editor.collected_sql) return statements
6781dfbb475e7d3611a16a0e52a693690d016629fac0b104960503b644cbc74a
import copy import datetime import functools import inspect from decimal import Decimal from uuid import UUID from django.core.exceptions import EmptyResultSet, FieldError from django.db import NotSupportedError, connection from django.db.models import fields from django.db.models.constants import LOOKUP_SEP from django.db.models.query_utils import Q from django.utils.deconstruct import deconstructible from django.utils.functional import cached_property from django.utils.hashable import make_hashable class SQLiteNumericMixin: """ Some expressions with output_field=DecimalField() must be cast to numeric to be properly filtered. """ def as_sqlite(self, compiler, connection, **extra_context): sql, params = self.as_sql(compiler, connection, **extra_context) try: if self.output_field.get_internal_type() == 'DecimalField': sql = 'CAST(%s AS NUMERIC)' % sql except FieldError: pass return sql, params class Combinable: """ Provide the ability to combine one or two objects with some connector. For example F('foo') + F('bar'). """ # Arithmetic connectors ADD = '+' SUB = '-' MUL = '*' DIV = '/' POW = '^' # The following is a quoted % operator - it is quoted because it can be # used in strings that also have parameter substitution. MOD = '%%' # Bitwise operators - note that these are generated by .bitand() # and .bitor(), the '&' and '|' are reserved for boolean operator # usage. BITAND = '&' BITOR = '|' BITLEFTSHIFT = '<<' BITRIGHTSHIFT = '>>' BITXOR = '#' def _combine(self, other, connector, reversed): if not hasattr(other, 'resolve_expression'): # everything must be resolvable to an expression other = Value(other) if reversed: return CombinedExpression(other, connector, self) return CombinedExpression(self, connector, other) ############# # OPERATORS # ############# def __neg__(self): return self._combine(-1, self.MUL, False) def __add__(self, other): return self._combine(other, self.ADD, False) def __sub__(self, other): return self._combine(other, self.SUB, False) def __mul__(self, other): return self._combine(other, self.MUL, False) def __truediv__(self, other): return self._combine(other, self.DIV, False) def __mod__(self, other): return self._combine(other, self.MOD, False) def __pow__(self, other): return self._combine(other, self.POW, False) def __and__(self, other): if getattr(self, 'conditional', False) and getattr(other, 'conditional', False): return Q(self) & Q(other) raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) def bitand(self, other): return self._combine(other, self.BITAND, False) def bitleftshift(self, other): return self._combine(other, self.BITLEFTSHIFT, False) def bitrightshift(self, other): return self._combine(other, self.BITRIGHTSHIFT, False) def bitxor(self, other): return self._combine(other, self.BITXOR, False) def __or__(self, other): if getattr(self, 'conditional', False) and getattr(other, 'conditional', False): return Q(self) | Q(other) raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) def bitor(self, other): return self._combine(other, self.BITOR, False) def __radd__(self, other): return self._combine(other, self.ADD, True) def __rsub__(self, other): return self._combine(other, self.SUB, True) def __rmul__(self, other): return self._combine(other, self.MUL, True) def __rtruediv__(self, other): return self._combine(other, self.DIV, True) def __rmod__(self, other): return self._combine(other, self.MOD, True) def __rpow__(self, other): return self._combine(other, self.POW, True) def __rand__(self, other): raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) def __ror__(self, other): raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) @deconstructible class BaseExpression: """Base class for all query expressions.""" # aggregate specific fields is_summary = False _output_field_resolved_to_none = False # Can the expression be used in a WHERE clause? filterable = True # Can the expression can be used as a source expression in Window? window_compatible = False def __init__(self, output_field=None): if output_field is not None: self.output_field = output_field def __getstate__(self): state = self.__dict__.copy() state.pop('convert_value', None) return state def get_db_converters(self, connection): return ( [] if self.convert_value is self._convert_value_noop else [self.convert_value] ) + self.output_field.get_db_converters(connection) def get_source_expressions(self): return [] def set_source_expressions(self, exprs): assert not exprs def _parse_expressions(self, *expressions): return [ arg if hasattr(arg, 'resolve_expression') else ( F(arg) if isinstance(arg, str) else Value(arg) ) for arg in expressions ] def as_sql(self, compiler, connection): """ Responsible for returning a (sql, [params]) tuple to be included in the current query. Different backends can provide their own implementation, by providing an `as_{vendor}` method and patching the Expression: ``` def override_as_sql(self, compiler, connection): # custom logic return super().as_sql(compiler, connection) setattr(Expression, 'as_' + connection.vendor, override_as_sql) ``` Arguments: * compiler: the query compiler responsible for generating the query. Must have a compile method, returning a (sql, [params]) tuple. Calling compiler(value) will return a quoted `value`. * connection: the database connection used for the current query. Return: (sql, params) Where `sql` is a string containing ordered sql parameters to be replaced with the elements of the list `params`. """ raise NotImplementedError("Subclasses must implement as_sql()") @cached_property def contains_aggregate(self): return any(expr and expr.contains_aggregate for expr in self.get_source_expressions()) @cached_property def contains_over_clause(self): return any(expr and expr.contains_over_clause for expr in self.get_source_expressions()) @cached_property def contains_column_references(self): return any(expr and expr.contains_column_references for expr in self.get_source_expressions()) def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): """ Provide the chance to do any preprocessing or validation before being added to the query. Arguments: * query: the backend query implementation * allow_joins: boolean allowing or denying use of joins in this query * reuse: a set of reusable joins for multijoins * summarize: a terminal aggregate clause * for_save: whether this expression about to be used in a save or update Return: an Expression to be added to the query. """ c = self.copy() c.is_summary = summarize c.set_source_expressions([ expr.resolve_expression(query, allow_joins, reuse, summarize) if expr else None for expr in c.get_source_expressions() ]) return c @property def conditional(self): return isinstance(self.output_field, fields.BooleanField) @property def field(self): return self.output_field @cached_property def output_field(self): """Return the output type of this expressions.""" output_field = self._resolve_output_field() if output_field is None: self._output_field_resolved_to_none = True raise FieldError('Cannot resolve expression type, unknown output_field') return output_field @cached_property def _output_field_or_none(self): """ Return the output field of this expression, or None if _resolve_output_field() didn't return an output type. """ try: return self.output_field except FieldError: if not self._output_field_resolved_to_none: raise def _resolve_output_field(self): """ Attempt to infer the output type of the expression. If the output fields of all source fields match then, simply infer the same type here. This isn't always correct, but it makes sense most of the time. Consider the difference between `2 + 2` and `2 / 3`. Inferring the type here is a convenience for the common case. The user should supply their own output_field with more complex computations. If a source's output field resolves to None, exclude it from this check. If all sources are None, then an error is raised higher up the stack in the output_field property. """ sources_iter = (source for source in self.get_source_fields() if source is not None) for output_field in sources_iter: for source in sources_iter: if not isinstance(output_field, source.__class__): raise FieldError( 'Expression contains mixed types: %s, %s. You must ' 'set output_field.' % ( output_field.__class__.__name__, source.__class__.__name__, ) ) return output_field @staticmethod def _convert_value_noop(value, expression, connection): return value @cached_property def convert_value(self): """ Expressions provide their own converters because users have the option of manually specifying the output_field which may be a different type from the one the database returns. """ field = self.output_field internal_type = field.get_internal_type() if internal_type == 'FloatField': return lambda value, expression, connection: None if value is None else float(value) elif internal_type.endswith('IntegerField'): return lambda value, expression, connection: None if value is None else int(value) elif internal_type == 'DecimalField': return lambda value, expression, connection: None if value is None else Decimal(value) return self._convert_value_noop def get_lookup(self, lookup): return self.output_field.get_lookup(lookup) def get_transform(self, name): return self.output_field.get_transform(name) def relabeled_clone(self, change_map): clone = self.copy() clone.set_source_expressions([ e.relabeled_clone(change_map) if e is not None else None for e in self.get_source_expressions() ]) return clone def copy(self): return copy.copy(self) def get_group_by_cols(self, alias=None): if not self.contains_aggregate: return [self] cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def get_source_fields(self): """Return the underlying field types used by this aggregate.""" return [e._output_field_or_none for e in self.get_source_expressions()] def asc(self, **kwargs): return OrderBy(self, **kwargs) def desc(self, **kwargs): return OrderBy(self, descending=True, **kwargs) def reverse_ordering(self): return self def flatten(self): """ Recursively yield this expression and all subexpressions, in depth-first order. """ yield self for expr in self.get_source_expressions(): if expr: yield from expr.flatten() def select_format(self, compiler, sql, params): """ Custom format for select clauses. For example, EXISTS expressions need to be wrapped in CASE WHEN on Oracle. """ if hasattr(self.output_field, 'select_format'): return self.output_field.select_format(compiler, sql, params) return sql, params @cached_property def identity(self): constructor_signature = inspect.signature(self.__init__) args, kwargs = self._constructor_args signature = constructor_signature.bind_partial(*args, **kwargs) signature.apply_defaults() arguments = signature.arguments.items() identity = [self.__class__] for arg, value in arguments: if isinstance(value, fields.Field): if value.name and value.model: value = (value.model._meta.label, value.name) else: value = type(value) else: value = make_hashable(value) identity.append((arg, value)) return tuple(identity) def __eq__(self, other): if not isinstance(other, BaseExpression): return NotImplemented return other.identity == self.identity def __hash__(self): return hash(self.identity) class Expression(BaseExpression, Combinable): """An expression that can be combined with other expressions.""" pass _connector_combinators = { connector: [ (fields.IntegerField, fields.DecimalField, fields.DecimalField), (fields.DecimalField, fields.IntegerField, fields.DecimalField), (fields.IntegerField, fields.FloatField, fields.FloatField), (fields.FloatField, fields.IntegerField, fields.FloatField), ] for connector in (Combinable.ADD, Combinable.SUB, Combinable.MUL, Combinable.DIV) } @functools.lru_cache(maxsize=128) def _resolve_combined_type(connector, lhs_type, rhs_type): combinators = _connector_combinators.get(connector, ()) for combinator_lhs_type, combinator_rhs_type, combined_type in combinators: if issubclass(lhs_type, combinator_lhs_type) and issubclass(rhs_type, combinator_rhs_type): return combined_type class CombinedExpression(SQLiteNumericMixin, Expression): def __init__(self, lhs, connector, rhs, output_field=None): super().__init__(output_field=output_field) self.connector = connector self.lhs = lhs self.rhs = rhs def __repr__(self): return "<{}: {}>".format(self.__class__.__name__, self) def __str__(self): return "{} {} {}".format(self.lhs, self.connector, self.rhs) def get_source_expressions(self): return [self.lhs, self.rhs] def set_source_expressions(self, exprs): self.lhs, self.rhs = exprs def _resolve_output_field(self): try: return super()._resolve_output_field() except FieldError: combined_type = _resolve_combined_type( self.connector, type(self.lhs.output_field), type(self.rhs.output_field), ) if combined_type is None: raise return combined_type() def as_sql(self, compiler, connection): expressions = [] expression_params = [] sql, params = compiler.compile(self.lhs) expressions.append(sql) expression_params.extend(params) sql, params = compiler.compile(self.rhs) expressions.append(sql) expression_params.extend(params) # order of precedence expression_wrapper = '(%s)' sql = connection.ops.combine_expression(self.connector, expressions) return expression_wrapper % sql, expression_params def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): lhs = self.lhs.resolve_expression(query, allow_joins, reuse, summarize, for_save) rhs = self.rhs.resolve_expression(query, allow_joins, reuse, summarize, for_save) if not isinstance(self, (DurationExpression, TemporalSubtraction)): try: lhs_type = lhs.output_field.get_internal_type() except (AttributeError, FieldError): lhs_type = None try: rhs_type = rhs.output_field.get_internal_type() except (AttributeError, FieldError): rhs_type = None if 'DurationField' in {lhs_type, rhs_type} and lhs_type != rhs_type: return DurationExpression(self.lhs, self.connector, self.rhs).resolve_expression( query, allow_joins, reuse, summarize, for_save, ) datetime_fields = {'DateField', 'DateTimeField', 'TimeField'} if self.connector == self.SUB and lhs_type in datetime_fields and lhs_type == rhs_type: return TemporalSubtraction(self.lhs, self.rhs).resolve_expression( query, allow_joins, reuse, summarize, for_save, ) c = self.copy() c.is_summary = summarize c.lhs = lhs c.rhs = rhs return c class DurationExpression(CombinedExpression): def compile(self, side, compiler, connection): try: output = side.output_field except FieldError: pass else: if output.get_internal_type() == 'DurationField': sql, params = compiler.compile(side) return connection.ops.format_for_duration_arithmetic(sql), params return compiler.compile(side) def as_sql(self, compiler, connection): if connection.features.has_native_duration_field: return super().as_sql(compiler, connection) connection.ops.check_expression_support(self) expressions = [] expression_params = [] sql, params = self.compile(self.lhs, compiler, connection) expressions.append(sql) expression_params.extend(params) sql, params = self.compile(self.rhs, compiler, connection) expressions.append(sql) expression_params.extend(params) # order of precedence expression_wrapper = '(%s)' sql = connection.ops.combine_duration_expression(self.connector, expressions) return expression_wrapper % sql, expression_params class TemporalSubtraction(CombinedExpression): output_field = fields.DurationField() def __init__(self, lhs, rhs): super().__init__(lhs, self.SUB, rhs) def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) lhs = compiler.compile(self.lhs) rhs = compiler.compile(self.rhs) return connection.ops.subtract_temporals(self.lhs.output_field.get_internal_type(), lhs, rhs) @deconstructible class F(Combinable): """An object capable of resolving references to existing query objects.""" def __init__(self, name): """ Arguments: * name: the name of the field this expression references """ self.name = name def __repr__(self): return "{}({})".format(self.__class__.__name__, self.name) def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): return query.resolve_ref(self.name, allow_joins, reuse, summarize) def asc(self, **kwargs): return OrderBy(self, **kwargs) def desc(self, **kwargs): return OrderBy(self, descending=True, **kwargs) def __eq__(self, other): return self.__class__ == other.__class__ and self.name == other.name def __hash__(self): return hash(self.name) class ResolvedOuterRef(F): """ An object that contains a reference to an outer query. In this case, the reference to the outer query has been resolved because the inner query has been used as a subquery. """ contains_aggregate = False def as_sql(self, *args, **kwargs): raise ValueError( 'This queryset contains a reference to an outer query and may ' 'only be used in a subquery.' ) def resolve_expression(self, *args, **kwargs): col = super().resolve_expression(*args, **kwargs) # FIXME: Rename possibly_multivalued to multivalued and fix detection # for non-multivalued JOINs (e.g. foreign key fields). This should take # into account only many-to-many and one-to-many relationships. col.possibly_multivalued = LOOKUP_SEP in self.name return col def relabeled_clone(self, relabels): return self def get_group_by_cols(self, alias=None): return [] class OuterRef(F): contains_aggregate = False def resolve_expression(self, *args, **kwargs): if isinstance(self.name, self.__class__): return self.name return ResolvedOuterRef(self.name) def relabeled_clone(self, relabels): return self class Func(SQLiteNumericMixin, Expression): """An SQL function call.""" function = None template = '%(function)s(%(expressions)s)' arg_joiner = ', ' arity = None # The number of arguments the function accepts. def __init__(self, *expressions, output_field=None, **extra): if self.arity is not None and len(expressions) != self.arity: raise TypeError( "'%s' takes exactly %s %s (%s given)" % ( self.__class__.__name__, self.arity, "argument" if self.arity == 1 else "arguments", len(expressions), ) ) super().__init__(output_field=output_field) self.source_expressions = self._parse_expressions(*expressions) self.extra = extra def __repr__(self): args = self.arg_joiner.join(str(arg) for arg in self.source_expressions) extra = {**self.extra, **self._get_repr_options()} if extra: extra = ', '.join(str(key) + '=' + str(val) for key, val in sorted(extra.items())) return "{}({}, {})".format(self.__class__.__name__, args, extra) return "{}({})".format(self.__class__.__name__, args) def _get_repr_options(self): """Return a dict of extra __init__() options to include in the repr.""" return {} def get_source_expressions(self): return self.source_expressions def set_source_expressions(self, exprs): self.source_expressions = exprs def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize for pos, arg in enumerate(c.source_expressions): c.source_expressions[pos] = arg.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c def as_sql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context): connection.ops.check_expression_support(self) sql_parts = [] params = [] for arg in self.source_expressions: arg_sql, arg_params = compiler.compile(arg) sql_parts.append(arg_sql) params.extend(arg_params) data = {**self.extra, **extra_context} # Use the first supplied value in this order: the parameter to this # method, a value supplied in __init__()'s **extra (the value in # `data`), or the value defined on the class. if function is not None: data['function'] = function else: data.setdefault('function', self.function) template = template or data.get('template', self.template) arg_joiner = arg_joiner or data.get('arg_joiner', self.arg_joiner) data['expressions'] = data['field'] = arg_joiner.join(sql_parts) return template % data, params def copy(self): copy = super().copy() copy.source_expressions = self.source_expressions[:] copy.extra = self.extra.copy() return copy class Value(Expression): """Represent a wrapped value as a node within an expression.""" # Provide a default value for `for_save` in order to allow unresolved # instances to be compiled until a decision is taken in #25425. for_save = False def __init__(self, value, output_field=None): """ Arguments: * value: the value this expression represents. The value will be added into the sql parameter list and properly quoted. * output_field: an instance of the model field type that this expression will return, such as IntegerField() or CharField(). """ super().__init__(output_field=output_field) self.value = value def __repr__(self): return "{}({})".format(self.__class__.__name__, self.value) def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) val = self.value output_field = self._output_field_or_none if output_field is not None: if self.for_save: val = output_field.get_db_prep_save(val, connection=connection) else: val = output_field.get_db_prep_value(val, connection=connection) if hasattr(output_field, 'get_placeholder'): return output_field.get_placeholder(val, compiler, connection), [val] if val is None: # cx_Oracle does not always convert None to the appropriate # NULL type (like in case expressions using numbers), so we # use a literal SQL NULL return 'NULL', [] return '%s', [val] def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) c.for_save = for_save return c def get_group_by_cols(self, alias=None): return [] def _resolve_output_field(self): if isinstance(self.value, str): return fields.CharField() if isinstance(self.value, bool): return fields.BooleanField() if isinstance(self.value, int): return fields.IntegerField() if isinstance(self.value, float): return fields.FloatField() if isinstance(self.value, datetime.datetime): return fields.DateTimeField() if isinstance(self.value, datetime.date): return fields.DateField() if isinstance(self.value, datetime.time): return fields.TimeField() if isinstance(self.value, datetime.timedelta): return fields.DurationField() if isinstance(self.value, Decimal): return fields.DecimalField() if isinstance(self.value, bytes): return fields.BinaryField() if isinstance(self.value, UUID): return fields.UUIDField() class RawSQL(Expression): def __init__(self, sql, params, output_field=None): if output_field is None: output_field = fields.Field() self.sql, self.params = sql, params super().__init__(output_field=output_field) def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params) def as_sql(self, compiler, connection): return '(%s)' % self.sql, self.params def get_group_by_cols(self, alias=None): return [self] def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): # Resolve parents fields used in raw SQL. for parent in query.model._meta.get_parent_list(): for parent_field in parent._meta.local_fields: _, column_name = parent_field.get_attname_column() if column_name.lower() in self.sql.lower(): query.resolve_ref(parent_field.name, allow_joins, reuse, summarize) break return super().resolve_expression(query, allow_joins, reuse, summarize, for_save) class Star(Expression): def __repr__(self): return "'*'" def as_sql(self, compiler, connection): return '*', [] class Random(Expression): output_field = fields.FloatField() def __repr__(self): return "Random()" def as_sql(self, compiler, connection): return connection.ops.random_function_sql(), [] class Col(Expression): contains_column_references = True possibly_multivalued = False def __init__(self, alias, target, output_field=None): if output_field is None: output_field = target super().__init__(output_field=output_field) self.alias, self.target = alias, target def __repr__(self): alias, target = self.alias, self.target identifiers = (alias, str(target)) if alias else (str(target),) return '{}({})'.format(self.__class__.__name__, ', '.join(identifiers)) def as_sql(self, compiler, connection): alias, column = self.alias, self.target.column identifiers = (alias, column) if alias else (column,) sql = '.'.join(map(compiler.quote_name_unless_alias, identifiers)) return sql, [] def relabeled_clone(self, relabels): if self.alias is None: return self return self.__class__(relabels.get(self.alias, self.alias), self.target, self.output_field) def get_group_by_cols(self, alias=None): return [self] def get_db_converters(self, connection): if self.target == self.output_field: return self.output_field.get_db_converters(connection) return (self.output_field.get_db_converters(connection) + self.target.get_db_converters(connection)) class Ref(Expression): """ Reference to column alias of the query. For example, Ref('sum_cost') in qs.annotate(sum_cost=Sum('cost')) query. """ def __init__(self, refs, source): super().__init__() self.refs, self.source = refs, source def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source) def get_source_expressions(self): return [self.source] def set_source_expressions(self, exprs): self.source, = exprs def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): # The sub-expression `source` has already been resolved, as this is # just a reference to the name of `source`. return self def relabeled_clone(self, relabels): return self def as_sql(self, compiler, connection): return connection.ops.quote_name(self.refs), [] def get_group_by_cols(self, alias=None): return [self] class ExpressionList(Func): """ An expression containing multiple expressions. Can be used to provide a list of expressions as an argument to another expression, like an ordering clause. """ template = '%(expressions)s' def __init__(self, *expressions, **extra): if not expressions: raise ValueError('%s requires at least one expression.' % self.__class__.__name__) super().__init__(*expressions, **extra) def __str__(self): return self.arg_joiner.join(str(arg) for arg in self.source_expressions) class ExpressionWrapper(Expression): """ An expression that can wrap another expression so that it can provide extra context to the inner expression, such as the output_field. """ def __init__(self, expression, output_field): super().__init__(output_field=output_field) self.expression = expression def set_source_expressions(self, exprs): self.expression = exprs[0] def get_source_expressions(self): return [self.expression] def get_group_by_cols(self, alias=None): expression = self.expression.copy() expression.output_field = self.output_field return expression.get_group_by_cols(alias=alias) def as_sql(self, compiler, connection): return self.expression.as_sql(compiler, connection) def __repr__(self): return "{}({})".format(self.__class__.__name__, self.expression) class When(Expression): template = 'WHEN %(condition)s THEN %(result)s' # This isn't a complete conditional expression, must be used in Case(). conditional = False def __init__(self, condition=None, then=None, **lookups): if lookups: if condition is None: condition, lookups = Q(**lookups), None elif getattr(condition, 'conditional', False): condition, lookups = Q(condition, **lookups), None if condition is None or not getattr(condition, 'conditional', False) or lookups: raise TypeError( 'When() supports a Q object, a boolean expression, or lookups ' 'as a condition.' ) if isinstance(condition, Q) and not condition: raise ValueError("An empty Q() can't be used as a When() condition.") super().__init__(output_field=None) self.condition = condition self.result = self._parse_expressions(then)[0] def __str__(self): return "WHEN %r THEN %r" % (self.condition, self.result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_source_expressions(self): return [self.condition, self.result] def set_source_expressions(self, exprs): self.condition, self.result = exprs def get_source_fields(self): # We're only interested in the fields of the result expressions. return [self.result._output_field_or_none] def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize if hasattr(c.condition, 'resolve_expression'): c.condition = c.condition.resolve_expression(query, allow_joins, reuse, summarize, False) c.result = c.result.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c def as_sql(self, compiler, connection, template=None, **extra_context): connection.ops.check_expression_support(self) template_params = extra_context sql_params = [] condition_sql, condition_params = compiler.compile(self.condition) template_params['condition'] = condition_sql sql_params.extend(condition_params) result_sql, result_params = compiler.compile(self.result) template_params['result'] = result_sql sql_params.extend(result_params) template = template or self.template return template % template_params, sql_params def get_group_by_cols(self, alias=None): # This is not a complete expression and cannot be used in GROUP BY. cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols class Case(Expression): """ An SQL searched CASE expression: CASE WHEN n > 0 THEN 'positive' WHEN n < 0 THEN 'negative' ELSE 'zero' END """ template = 'CASE %(cases)s ELSE %(default)s END' case_joiner = ' ' def __init__(self, *cases, default=None, output_field=None, **extra): if not all(isinstance(case, When) for case in cases): raise TypeError("Positional arguments must all be When objects.") super().__init__(output_field) self.cases = list(cases) self.default = self._parse_expressions(default)[0] self.extra = extra def __str__(self): return "CASE %s, ELSE %r" % (', '.join(str(c) for c in self.cases), self.default) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_source_expressions(self): return self.cases + [self.default] def set_source_expressions(self, exprs): *self.cases, self.default = exprs def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize for pos, case in enumerate(c.cases): c.cases[pos] = case.resolve_expression(query, allow_joins, reuse, summarize, for_save) c.default = c.default.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c def copy(self): c = super().copy() c.cases = c.cases[:] return c def as_sql(self, compiler, connection, template=None, case_joiner=None, **extra_context): connection.ops.check_expression_support(self) if not self.cases: return compiler.compile(self.default) template_params = {**self.extra, **extra_context} case_parts = [] sql_params = [] for case in self.cases: try: case_sql, case_params = compiler.compile(case) except EmptyResultSet: continue case_parts.append(case_sql) sql_params.extend(case_params) default_sql, default_params = compiler.compile(self.default) if not case_parts: return default_sql, default_params case_joiner = case_joiner or self.case_joiner template_params['cases'] = case_joiner.join(case_parts) template_params['default'] = default_sql sql_params.extend(default_params) template = template or template_params.get('template', self.template) sql = template % template_params if self._output_field_or_none is not None: sql = connection.ops.unification_cast_sql(self.output_field) % sql return sql, sql_params class Subquery(Expression): """ An explicit subquery. It may contain OuterRef() references to the outer query which will be resolved when it is applied to that query. """ template = '(%(subquery)s)' contains_aggregate = False def __init__(self, queryset, output_field=None, **extra): self.query = queryset.query self.extra = extra # Prevent the QuerySet from being evaluated. self.queryset = queryset._chain(_result_cache=[], prefetch_done=True) super().__init__(output_field) def __getstate__(self): state = super().__getstate__() args, kwargs = state['_constructor_args'] if args: args = (self.queryset, *args[1:]) else: kwargs['queryset'] = self.queryset state['_constructor_args'] = args, kwargs return state def get_source_expressions(self): return [self.query] def set_source_expressions(self, exprs): self.query = exprs[0] def _resolve_output_field(self): return self.query.output_field def copy(self): clone = super().copy() clone.query = clone.query.clone() return clone @property def external_aliases(self): return self.query.external_aliases def as_sql(self, compiler, connection, template=None, **extra_context): connection.ops.check_expression_support(self) template_params = {**self.extra, **extra_context} subquery_sql, sql_params = self.query.as_sql(compiler, connection) template_params['subquery'] = subquery_sql[1:-1] template = template or template_params.get('template', self.template) sql = template % template_params return sql, sql_params def get_group_by_cols(self, alias=None): if alias: return [Ref(alias, self)] external_cols = self.query.get_external_cols() if any(col.possibly_multivalued for col in external_cols): return [self] return external_cols class Exists(Subquery): template = 'EXISTS(%(subquery)s)' output_field = fields.BooleanField() def __init__(self, queryset, negated=False, **kwargs): # As a performance optimization, remove ordering since EXISTS doesn't # care about it, just whether or not a row matches. queryset = queryset.order_by() self.negated = negated super().__init__(queryset, **kwargs) def __invert__(self): clone = self.copy() clone.negated = not self.negated return clone def as_sql(self, compiler, connection, template=None, **extra_context): sql, params = super().as_sql(compiler, connection, template, **extra_context) if self.negated: sql = 'NOT {}'.format(sql) return sql, params def select_format(self, compiler, sql, params): # Wrap EXISTS() with a CASE WHEN expression if a database backend # (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP # BY list. if not compiler.connection.features.supports_boolean_expr_in_select_clause: sql = 'CASE WHEN {} THEN 1 ELSE 0 END'.format(sql) return sql, params class OrderBy(BaseExpression): template = '%(expression)s %(ordering)s' conditional = False def __init__(self, expression, descending=False, nulls_first=False, nulls_last=False): if nulls_first and nulls_last: raise ValueError('nulls_first and nulls_last are mutually exclusive') self.nulls_first = nulls_first self.nulls_last = nulls_last self.descending = descending if not hasattr(expression, 'resolve_expression'): raise ValueError('expression must be an expression type') self.expression = expression def __repr__(self): return "{}({}, descending={})".format( self.__class__.__name__, self.expression, self.descending) def set_source_expressions(self, exprs): self.expression = exprs[0] def get_source_expressions(self): return [self.expression] def as_sql(self, compiler, connection, template=None, **extra_context): template = template or self.template if connection.features.supports_order_by_nulls_modifier: if self.nulls_last: template = '%s NULLS LAST' % template elif self.nulls_first: template = '%s NULLS FIRST' % template else: if self.nulls_last and not ( self.descending and connection.features.order_by_nulls_first ): template = '%%(expression)s IS NULL, %s' % template elif self.nulls_first and not ( not self.descending and connection.features.order_by_nulls_first ): template = '%%(expression)s IS NOT NULL, %s' % template connection.ops.check_expression_support(self) expression_sql, params = compiler.compile(self.expression) placeholders = { 'expression': expression_sql, 'ordering': 'DESC' if self.descending else 'ASC', **extra_context, } template = template or self.template params *= template.count('%(expression)s') return (template % placeholders).rstrip(), params def as_oracle(self, compiler, connection): # Oracle doesn't allow ORDER BY EXISTS() unless it's wrapped in # a CASE WHEN. if isinstance(self.expression, Exists): copy = self.copy() copy.expression = Case( When(self.expression, then=True), default=False, ) return copy.as_sql(compiler, connection) return self.as_sql(compiler, connection) def get_group_by_cols(self, alias=None): cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def reverse_ordering(self): self.descending = not self.descending if self.nulls_first or self.nulls_last: self.nulls_first = not self.nulls_first self.nulls_last = not self.nulls_last return self def asc(self): self.descending = False def desc(self): self.descending = True class Window(Expression): template = '%(expression)s OVER (%(window)s)' # Although the main expression may either be an aggregate or an # expression with an aggregate function, the GROUP BY that will # be introduced in the query as a result is not desired. contains_aggregate = False contains_over_clause = True filterable = False def __init__(self, expression, partition_by=None, order_by=None, frame=None, output_field=None): self.partition_by = partition_by self.order_by = order_by self.frame = frame if not getattr(expression, 'window_compatible', False): raise ValueError( "Expression '%s' isn't compatible with OVER clauses." % expression.__class__.__name__ ) if self.partition_by is not None: if not isinstance(self.partition_by, (tuple, list)): self.partition_by = (self.partition_by,) self.partition_by = ExpressionList(*self.partition_by) if self.order_by is not None: if isinstance(self.order_by, (list, tuple)): self.order_by = ExpressionList(*self.order_by) elif not isinstance(self.order_by, BaseExpression): raise ValueError( 'order_by must be either an Expression or a sequence of ' 'expressions.' ) super().__init__(output_field=output_field) self.source_expression = self._parse_expressions(expression)[0] def _resolve_output_field(self): return self.source_expression.output_field def get_source_expressions(self): return [self.source_expression, self.partition_by, self.order_by, self.frame] def set_source_expressions(self, exprs): self.source_expression, self.partition_by, self.order_by, self.frame = exprs def as_sql(self, compiler, connection, template=None): connection.ops.check_expression_support(self) if not connection.features.supports_over_clause: raise NotSupportedError('This backend does not support window expressions.') expr_sql, params = compiler.compile(self.source_expression) window_sql, window_params = [], [] if self.partition_by is not None: sql_expr, sql_params = self.partition_by.as_sql( compiler=compiler, connection=connection, template='PARTITION BY %(expressions)s', ) window_sql.extend(sql_expr) window_params.extend(sql_params) if self.order_by is not None: window_sql.append(' ORDER BY ') order_sql, order_params = compiler.compile(self.order_by) window_sql.extend(order_sql) window_params.extend(order_params) if self.frame: frame_sql, frame_params = compiler.compile(self.frame) window_sql.append(' ' + frame_sql) window_params.extend(frame_params) params.extend(window_params) template = template or self.template return template % { 'expression': expr_sql, 'window': ''.join(window_sql).strip() }, params def __str__(self): return '{} OVER ({}{}{})'.format( str(self.source_expression), 'PARTITION BY ' + str(self.partition_by) if self.partition_by else '', 'ORDER BY ' + str(self.order_by) if self.order_by else '', str(self.frame or ''), ) def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self) def get_group_by_cols(self, alias=None): return [] class WindowFrame(Expression): """ Model the frame clause in window expressions. There are two types of frame clauses which are subclasses, however, all processing and validation (by no means intended to be complete) is done here. Thus, providing an end for a frame is optional (the default is UNBOUNDED FOLLOWING, which is the last row in the frame). """ template = '%(frame_type)s BETWEEN %(start)s AND %(end)s' def __init__(self, start=None, end=None): self.start = Value(start) self.end = Value(end) def set_source_expressions(self, exprs): self.start, self.end = exprs def get_source_expressions(self): return [self.start, self.end] def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) start, end = self.window_frame_start_end(connection, self.start.value, self.end.value) return self.template % { 'frame_type': self.frame_type, 'start': start, 'end': end, }, [] def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self) def get_group_by_cols(self, alias=None): return [] def __str__(self): if self.start.value is not None and self.start.value < 0: start = '%d %s' % (abs(self.start.value), connection.ops.PRECEDING) elif self.start.value is not None and self.start.value == 0: start = connection.ops.CURRENT_ROW else: start = connection.ops.UNBOUNDED_PRECEDING if self.end.value is not None and self.end.value > 0: end = '%d %s' % (self.end.value, connection.ops.FOLLOWING) elif self.end.value is not None and self.end.value == 0: end = connection.ops.CURRENT_ROW else: end = connection.ops.UNBOUNDED_FOLLOWING return self.template % { 'frame_type': self.frame_type, 'start': start, 'end': end, } def window_frame_start_end(self, connection, start, end): raise NotImplementedError('Subclasses must implement window_frame_start_end().') class RowRange(WindowFrame): frame_type = 'ROWS' def window_frame_start_end(self, connection, start, end): return connection.ops.window_frame_rows_start_end(start, end) class ValueRange(WindowFrame): frame_type = 'RANGE' def window_frame_start_end(self, connection, start, end): return connection.ops.window_frame_range_start_end(start, end)
c843824155a4025f0a39c7d2cf5fff19642903561759b9fac9ea34adafa7aab9
import itertools import math import warnings from copy import copy from django.core.exceptions import EmptyResultSet from django.db.models.expressions import Case, Exists, Func, Value, When from django.db.models.fields import ( CharField, DateTimeField, Field, IntegerField, UUIDField, ) from django.db.models.query_utils import RegisterLookupMixin from django.utils.datastructures import OrderedSet from django.utils.deprecation import RemovedInDjango40Warning from django.utils.functional import cached_property class Lookup: lookup_name = None prepare_rhs = True can_use_none_as_rhs = False def __init__(self, lhs, rhs): self.lhs, self.rhs = lhs, rhs self.rhs = self.get_prep_lookup() if hasattr(self.lhs, 'get_bilateral_transforms'): bilateral_transforms = self.lhs.get_bilateral_transforms() else: bilateral_transforms = [] if bilateral_transforms: # Warn the user as soon as possible if they are trying to apply # a bilateral transformation on a nested QuerySet: that won't work. from django.db.models.sql.query import Query # avoid circular import if isinstance(rhs, Query): raise NotImplementedError("Bilateral transformations on nested querysets are not implemented.") self.bilateral_transforms = bilateral_transforms def apply_bilateral_transforms(self, value): for transform in self.bilateral_transforms: value = transform(value) return value def batch_process_rhs(self, compiler, connection, rhs=None): if rhs is None: rhs = self.rhs if self.bilateral_transforms: sqls, sqls_params = [], [] for p in rhs: value = Value(p, output_field=self.lhs.output_field) value = self.apply_bilateral_transforms(value) value = value.resolve_expression(compiler.query) sql, sql_params = compiler.compile(value) sqls.append(sql) sqls_params.extend(sql_params) else: _, params = self.get_db_prep_lookup(rhs, connection) sqls, sqls_params = ['%s'] * len(params), params return sqls, sqls_params def get_source_expressions(self): if self.rhs_is_direct_value(): return [self.lhs] return [self.lhs, self.rhs] def set_source_expressions(self, new_exprs): if len(new_exprs) == 1: self.lhs = new_exprs[0] else: self.lhs, self.rhs = new_exprs def get_prep_lookup(self): if hasattr(self.rhs, 'resolve_expression'): return self.rhs if self.prepare_rhs and hasattr(self.lhs.output_field, 'get_prep_value'): return self.lhs.output_field.get_prep_value(self.rhs) return self.rhs def get_db_prep_lookup(self, value, connection): return ('%s', [value]) def process_lhs(self, compiler, connection, lhs=None): lhs = lhs or self.lhs if hasattr(lhs, 'resolve_expression'): lhs = lhs.resolve_expression(compiler.query) return compiler.compile(lhs) def process_rhs(self, compiler, connection): value = self.rhs if self.bilateral_transforms: if self.rhs_is_direct_value(): # Do not call get_db_prep_lookup here as the value will be # transformed before being used for lookup value = Value(value, output_field=self.lhs.output_field) value = self.apply_bilateral_transforms(value) value = value.resolve_expression(compiler.query) if hasattr(value, 'as_sql'): return compiler.compile(value) else: return self.get_db_prep_lookup(value, connection) def rhs_is_direct_value(self): return not hasattr(self.rhs, 'as_sql') def relabeled_clone(self, relabels): new = copy(self) new.lhs = new.lhs.relabeled_clone(relabels) if hasattr(new.rhs, 'relabeled_clone'): new.rhs = new.rhs.relabeled_clone(relabels) return new def get_group_by_cols(self, alias=None): cols = self.lhs.get_group_by_cols() if hasattr(self.rhs, 'get_group_by_cols'): cols.extend(self.rhs.get_group_by_cols()) return cols def as_sql(self, compiler, connection): raise NotImplementedError def as_oracle(self, compiler, connection): # Oracle doesn't allow EXISTS() to be compared to another expression # unless it's wrapped in a CASE WHEN. wrapped = False exprs = [] for expr in (self.lhs, self.rhs): if isinstance(expr, Exists): expr = Case(When(expr, then=True), default=False) wrapped = True exprs.append(expr) lookup = type(self)(*exprs) if wrapped else self return lookup.as_sql(compiler, connection) @cached_property def contains_aggregate(self): return self.lhs.contains_aggregate or getattr(self.rhs, 'contains_aggregate', False) @cached_property def contains_over_clause(self): return self.lhs.contains_over_clause or getattr(self.rhs, 'contains_over_clause', False) @property def is_summary(self): return self.lhs.is_summary or getattr(self.rhs, 'is_summary', False) class Transform(RegisterLookupMixin, Func): """ RegisterLookupMixin() is first so that get_lookup() and get_transform() first examine self and then check output_field. """ bilateral = False arity = 1 @property def lhs(self): return self.get_source_expressions()[0] def get_bilateral_transforms(self): if hasattr(self.lhs, 'get_bilateral_transforms'): bilateral_transforms = self.lhs.get_bilateral_transforms() else: bilateral_transforms = [] if self.bilateral: bilateral_transforms.append(self.__class__) return bilateral_transforms class BuiltinLookup(Lookup): def process_lhs(self, compiler, connection, lhs=None): lhs_sql, params = super().process_lhs(compiler, connection, lhs) field_internal_type = self.lhs.output_field.get_internal_type() db_type = self.lhs.output_field.db_type(connection=connection) lhs_sql = connection.ops.field_cast_sql( db_type, field_internal_type) % lhs_sql lhs_sql = connection.ops.lookup_cast(self.lookup_name, field_internal_type) % lhs_sql return lhs_sql, list(params) def as_sql(self, compiler, connection): lhs_sql, params = self.process_lhs(compiler, connection) rhs_sql, rhs_params = self.process_rhs(compiler, connection) params.extend(rhs_params) rhs_sql = self.get_rhs_op(connection, rhs_sql) return '%s %s' % (lhs_sql, rhs_sql), params def get_rhs_op(self, connection, rhs): return connection.operators[self.lookup_name] % rhs class FieldGetDbPrepValueMixin: """ Some lookups require Field.get_db_prep_value() to be called on their inputs. """ get_db_prep_lookup_value_is_iterable = False def get_db_prep_lookup(self, value, connection): # For relational fields, use the 'target_field' attribute of the # output_field. field = getattr(self.lhs.output_field, 'target_field', None) get_db_prep_value = getattr(field, 'get_db_prep_value', None) or self.lhs.output_field.get_db_prep_value return ( '%s', [get_db_prep_value(v, connection, prepared=True) for v in value] if self.get_db_prep_lookup_value_is_iterable else [get_db_prep_value(value, connection, prepared=True)] ) class FieldGetDbPrepValueIterableMixin(FieldGetDbPrepValueMixin): """ Some lookups require Field.get_db_prep_value() to be called on each value in an iterable. """ get_db_prep_lookup_value_is_iterable = True def get_prep_lookup(self): if hasattr(self.rhs, 'resolve_expression'): return self.rhs prepared_values = [] for rhs_value in self.rhs: if hasattr(rhs_value, 'resolve_expression'): # An expression will be handled by the database but can coexist # alongside real values. pass elif self.prepare_rhs and hasattr(self.lhs.output_field, 'get_prep_value'): rhs_value = self.lhs.output_field.get_prep_value(rhs_value) prepared_values.append(rhs_value) return prepared_values def process_rhs(self, compiler, connection): if self.rhs_is_direct_value(): # rhs should be an iterable of values. Use batch_process_rhs() # to prepare/transform those values. return self.batch_process_rhs(compiler, connection) else: return super().process_rhs(compiler, connection) def resolve_expression_parameter(self, compiler, connection, sql, param): params = [param] if hasattr(param, 'resolve_expression'): param = param.resolve_expression(compiler.query) if hasattr(param, 'as_sql'): sql, params = param.as_sql(compiler, connection) return sql, params def batch_process_rhs(self, compiler, connection, rhs=None): pre_processed = super().batch_process_rhs(compiler, connection, rhs) # The params list may contain expressions which compile to a # sql/param pair. Zip them to get sql and param pairs that refer to the # same argument and attempt to replace them with the result of # compiling the param step. sql, params = zip(*( self.resolve_expression_parameter(compiler, connection, sql, param) for sql, param in zip(*pre_processed) )) params = itertools.chain.from_iterable(params) return sql, tuple(params) class PostgresOperatorLookup(FieldGetDbPrepValueMixin, Lookup): """Lookup defined by operators on PostgreSQL.""" postgres_operator = None def as_postgresql(self, compiler, connection): lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) params = tuple(lhs_params) + tuple(rhs_params) return '%s %s %s' % (lhs, self.postgres_operator, rhs), params @Field.register_lookup class Exact(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = 'exact' def process_rhs(self, compiler, connection): from django.db.models.sql.query import Query if isinstance(self.rhs, Query): if self.rhs.has_limit_one(): if not self.rhs.has_select_fields: self.rhs.clear_select_clause() self.rhs.add_fields(['pk']) else: raise ValueError( 'The QuerySet value for an exact lookup must be limited to ' 'one result using slicing.' ) return super().process_rhs(compiler, connection) def as_sql(self, compiler, connection): # Avoid comparison against direct rhs if lhs is a boolean value. That # turns "boolfield__exact=True" into "WHERE boolean_field" instead of # "WHERE boolean_field = True" when allowed. if ( isinstance(self.rhs, bool) and getattr(self.lhs, 'conditional', False) and connection.ops.conditional_expression_supported_in_where_clause(self.lhs) ): lhs_sql, params = self.process_lhs(compiler, connection) template = '%s' if self.rhs else 'NOT %s' return template % lhs_sql, params return super().as_sql(compiler, connection) @Field.register_lookup class IExact(BuiltinLookup): lookup_name = 'iexact' prepare_rhs = False def process_rhs(self, qn, connection): rhs, params = super().process_rhs(qn, connection) if params: params[0] = connection.ops.prep_for_iexact_query(params[0]) return rhs, params @Field.register_lookup class GreaterThan(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = 'gt' @Field.register_lookup class GreaterThanOrEqual(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = 'gte' @Field.register_lookup class LessThan(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = 'lt' @Field.register_lookup class LessThanOrEqual(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = 'lte' class IntegerFieldFloatRounding: """ Allow floats to work as query values for IntegerField. Without this, the decimal portion of the float would always be discarded. """ def get_prep_lookup(self): if isinstance(self.rhs, float): self.rhs = math.ceil(self.rhs) return super().get_prep_lookup() @IntegerField.register_lookup class IntegerGreaterThanOrEqual(IntegerFieldFloatRounding, GreaterThanOrEqual): pass @IntegerField.register_lookup class IntegerLessThan(IntegerFieldFloatRounding, LessThan): pass @Field.register_lookup class In(FieldGetDbPrepValueIterableMixin, BuiltinLookup): lookup_name = 'in' def process_rhs(self, compiler, connection): db_rhs = getattr(self.rhs, '_db', None) if db_rhs is not None and db_rhs != connection.alias: raise ValueError( "Subqueries aren't allowed across different databases. Force " "the inner query to be evaluated using `list(inner_query)`." ) if self.rhs_is_direct_value(): # Remove None from the list as NULL is never equal to anything. try: rhs = OrderedSet(self.rhs) rhs.discard(None) except TypeError: # Unhashable items in self.rhs rhs = [r for r in self.rhs if r is not None] if not rhs: raise EmptyResultSet # rhs should be an iterable; use batch_process_rhs() to # prepare/transform those values. sqls, sqls_params = self.batch_process_rhs(compiler, connection, rhs) placeholder = '(' + ', '.join(sqls) + ')' return (placeholder, sqls_params) else: if not getattr(self.rhs, 'has_select_fields', True): self.rhs.clear_select_clause() self.rhs.add_fields(['pk']) return super().process_rhs(compiler, connection) def get_rhs_op(self, connection, rhs): return 'IN %s' % rhs def as_sql(self, compiler, connection): max_in_list_size = connection.ops.max_in_list_size() if self.rhs_is_direct_value() and max_in_list_size and len(self.rhs) > max_in_list_size: return self.split_parameter_list_as_sql(compiler, connection) return super().as_sql(compiler, connection) def split_parameter_list_as_sql(self, compiler, connection): # This is a special case for databases which limit the number of # elements which can appear in an 'IN' clause. max_in_list_size = connection.ops.max_in_list_size() lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.batch_process_rhs(compiler, connection) in_clause_elements = ['('] params = [] for offset in range(0, len(rhs_params), max_in_list_size): if offset > 0: in_clause_elements.append(' OR ') in_clause_elements.append('%s IN (' % lhs) params.extend(lhs_params) sqls = rhs[offset: offset + max_in_list_size] sqls_params = rhs_params[offset: offset + max_in_list_size] param_group = ', '.join(sqls) in_clause_elements.append(param_group) in_clause_elements.append(')') params.extend(sqls_params) in_clause_elements.append(')') return ''.join(in_clause_elements), params class PatternLookup(BuiltinLookup): param_pattern = '%%%s%%' prepare_rhs = False def get_rhs_op(self, connection, rhs): # Assume we are in startswith. We need to produce SQL like: # col LIKE %s, ['thevalue%'] # For python values we can (and should) do that directly in Python, # but if the value is for example reference to other column, then # we need to add the % pattern match to the lookup by something like # col LIKE othercol || '%%' # So, for Python values we don't need any special pattern, but for # SQL reference values or SQL transformations we need the correct # pattern added. if hasattr(self.rhs, 'as_sql') or self.bilateral_transforms: pattern = connection.pattern_ops[self.lookup_name].format(connection.pattern_esc) return pattern.format(rhs) else: return super().get_rhs_op(connection, rhs) def process_rhs(self, qn, connection): rhs, params = super().process_rhs(qn, connection) if self.rhs_is_direct_value() and params and not self.bilateral_transforms: params[0] = self.param_pattern % connection.ops.prep_for_like_query(params[0]) return rhs, params @Field.register_lookup class Contains(PatternLookup): lookup_name = 'contains' @Field.register_lookup class IContains(Contains): lookup_name = 'icontains' @Field.register_lookup class StartsWith(PatternLookup): lookup_name = 'startswith' param_pattern = '%s%%' @Field.register_lookup class IStartsWith(StartsWith): lookup_name = 'istartswith' @Field.register_lookup class EndsWith(PatternLookup): lookup_name = 'endswith' param_pattern = '%%%s' @Field.register_lookup class IEndsWith(EndsWith): lookup_name = 'iendswith' @Field.register_lookup class Range(FieldGetDbPrepValueIterableMixin, BuiltinLookup): lookup_name = 'range' def get_rhs_op(self, connection, rhs): return "BETWEEN %s AND %s" % (rhs[0], rhs[1]) @Field.register_lookup class IsNull(BuiltinLookup): lookup_name = 'isnull' prepare_rhs = False def as_sql(self, compiler, connection): if not isinstance(self.rhs, bool): # When the deprecation ends, replace with: # raise ValueError( # 'The QuerySet value for an isnull lookup must be True or ' # 'False.' # ) warnings.warn( 'Using a non-boolean value for an isnull lookup is ' 'deprecated, use True or False instead.', RemovedInDjango40Warning, ) sql, params = compiler.compile(self.lhs) if self.rhs: return "%s IS NULL" % sql, params else: return "%s IS NOT NULL" % sql, params @Field.register_lookup class Regex(BuiltinLookup): lookup_name = 'regex' prepare_rhs = False def as_sql(self, compiler, connection): if self.lookup_name in connection.operators: return super().as_sql(compiler, connection) else: lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) sql_template = connection.ops.regex_lookup(self.lookup_name) return sql_template % (lhs, rhs), lhs_params + rhs_params @Field.register_lookup class IRegex(Regex): lookup_name = 'iregex' class YearLookup(Lookup): def year_lookup_bounds(self, connection, year): output_field = self.lhs.lhs.output_field if isinstance(output_field, DateTimeField): bounds = connection.ops.year_lookup_bounds_for_datetime_field(year) else: bounds = connection.ops.year_lookup_bounds_for_date_field(year) return bounds def as_sql(self, compiler, connection): # Avoid the extract operation if the rhs is a direct value to allow # indexes to be used. if self.rhs_is_direct_value(): # Skip the extract part by directly using the originating field, # that is self.lhs.lhs. lhs_sql, params = self.process_lhs(compiler, connection, self.lhs.lhs) rhs_sql, _ = self.process_rhs(compiler, connection) rhs_sql = self.get_direct_rhs_sql(connection, rhs_sql) start, finish = self.year_lookup_bounds(connection, self.rhs) params.extend(self.get_bound_params(start, finish)) return '%s %s' % (lhs_sql, rhs_sql), params return super().as_sql(compiler, connection) def get_direct_rhs_sql(self, connection, rhs): return connection.operators[self.lookup_name] % rhs def get_bound_params(self, start, finish): raise NotImplementedError( 'subclasses of YearLookup must provide a get_bound_params() method' ) class YearExact(YearLookup, Exact): def get_direct_rhs_sql(self, connection, rhs): return 'BETWEEN %s AND %s' def get_bound_params(self, start, finish): return (start, finish) class YearGt(YearLookup, GreaterThan): def get_bound_params(self, start, finish): return (finish,) class YearGte(YearLookup, GreaterThanOrEqual): def get_bound_params(self, start, finish): return (start,) class YearLt(YearLookup, LessThan): def get_bound_params(self, start, finish): return (start,) class YearLte(YearLookup, LessThanOrEqual): def get_bound_params(self, start, finish): return (finish,) class UUIDTextMixin: """ Strip hyphens from a value when filtering a UUIDField on backends without a native datatype for UUID. """ def process_rhs(self, qn, connection): if not connection.features.has_native_uuid_field: from django.db.models.functions import Replace if self.rhs_is_direct_value(): self.rhs = Value(self.rhs) self.rhs = Replace(self.rhs, Value('-'), Value(''), output_field=CharField()) rhs, params = super().process_rhs(qn, connection) return rhs, params @UUIDField.register_lookup class UUIDIExact(UUIDTextMixin, IExact): pass @UUIDField.register_lookup class UUIDContains(UUIDTextMixin, Contains): pass @UUIDField.register_lookup class UUIDIContains(UUIDTextMixin, IContains): pass @UUIDField.register_lookup class UUIDStartsWith(UUIDTextMixin, StartsWith): pass @UUIDField.register_lookup class UUIDIStartsWith(UUIDTextMixin, IStartsWith): pass @UUIDField.register_lookup class UUIDEndsWith(UUIDTextMixin, EndsWith): pass @UUIDField.register_lookup class UUIDIEndsWith(UUIDTextMixin, IEndsWith): pass
f25e318a6faa761ec36ced7f4cddabba603064e88678c128fe36a1006586032e
""" Create SQL statements for QuerySets. The code in here encapsulates all of the SQL construction so that QuerySets themselves do not have to (and could be backed by things other than SQL databases). The abstraction barrier only works one way: this module has to know all about the internals of models in order to get the information it needs. """ import copy import difflib import functools import inspect import sys import warnings from collections import Counter, namedtuple from collections.abc import Iterator, Mapping from itertools import chain, count, product from string import ascii_uppercase from django.core.exceptions import ( EmptyResultSet, FieldDoesNotExist, FieldError, ) from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections from django.db.models.aggregates import Count from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import BaseExpression, Col, F, OuterRef, Ref from django.db.models.fields import Field from django.db.models.fields.related_lookups import MultiColSource from django.db.models.lookups import Lookup from django.db.models.query_utils import ( Q, check_rel_lookup_compatibility, refs_expression, ) from django.db.models.sql.constants import INNER, LOUTER, ORDER_DIR, SINGLE from django.db.models.sql.datastructures import ( BaseTable, Empty, Join, MultiJoin, ) from django.db.models.sql.where import ( AND, OR, ExtraWhere, NothingNode, WhereNode, ) from django.utils.deprecation import RemovedInDjango40Warning from django.utils.functional import cached_property from django.utils.tree import Node __all__ = ['Query', 'RawQuery'] def get_field_names_from_opts(opts): return set(chain.from_iterable( (f.name, f.attname) if f.concrete else (f.name,) for f in opts.get_fields() )) def get_children_from_q(q): for child in q.children: if isinstance(child, Node): yield from get_children_from_q(child) else: yield child JoinInfo = namedtuple( 'JoinInfo', ('final_field', 'targets', 'opts', 'joins', 'path', 'transform_function') ) class RawQuery: """A single raw SQL query.""" def __init__(self, sql, using, params=None): self.params = params or () self.sql = sql self.using = using self.cursor = None # Mirror some properties of a normal query so that # the compiler can be used to process results. self.low_mark, self.high_mark = 0, None # Used for offset/limit self.extra_select = {} self.annotation_select = {} def chain(self, using): return self.clone(using) def clone(self, using): return RawQuery(self.sql, using, params=self.params) def get_columns(self): if self.cursor is None: self._execute_query() converter = connections[self.using].introspection.identifier_converter return [converter(column_meta[0]) for column_meta in self.cursor.description] def __iter__(self): # Always execute a new query for a new iterator. # This could be optimized with a cache at the expense of RAM. self._execute_query() if not connections[self.using].features.can_use_chunked_reads: # If the database can't use chunked reads we need to make sure we # evaluate the entire query up front. result = list(self.cursor) else: result = self.cursor return iter(result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) @property def params_type(self): return dict if isinstance(self.params, Mapping) else tuple def __str__(self): return self.sql % self.params_type(self.params) def _execute_query(self): connection = connections[self.using] # Adapt parameters to the database, as much as possible considering # that the target type isn't known. See #17755. params_type = self.params_type adapter = connection.ops.adapt_unknown_value if params_type is tuple: params = tuple(adapter(val) for val in self.params) elif params_type is dict: params = {key: adapter(val) for key, val in self.params.items()} else: raise RuntimeError("Unexpected params type: %s" % params_type) self.cursor = connection.cursor() self.cursor.execute(self.sql, params) class Query(BaseExpression): """A single SQL query.""" alias_prefix = 'T' subq_aliases = frozenset([alias_prefix]) compiler = 'SQLCompiler' def __init__(self, model, where=WhereNode, alias_cols=True): self.model = model self.alias_refcount = {} # alias_map is the most important data structure regarding joins. # It's used for recording which joins exist in the query and what # types they are. The key is the alias of the joined table (possibly # the table name) and the value is a Join-like object (see # sql.datastructures.Join for more information). self.alias_map = {} # Whether to provide alias to columns during reference resolving. self.alias_cols = alias_cols # Sometimes the query contains references to aliases in outer queries (as # a result of split_exclude). Correct alias quoting needs to know these # aliases too. # Map external tables to whether they are aliased. self.external_aliases = {} self.table_map = {} # Maps table names to list of aliases. self.default_cols = True self.default_ordering = True self.standard_ordering = True self.used_aliases = set() self.filter_is_sticky = False self.subquery = False # SQL-related attributes # Select and related select clauses are expressions to use in the # SELECT clause of the query. # The select is used for cases where we want to set up the select # clause to contain other than default fields (values(), subqueries...) # Note that annotations go to annotations dictionary. self.select = () self.where = where() self.where_class = where # The group_by attribute can have one of the following forms: # - None: no group by at all in the query # - A tuple of expressions: group by (at least) those expressions. # String refs are also allowed for now. # - True: group by all select fields of the model # See compiler.get_group_by() for details. self.group_by = None self.order_by = () self.low_mark, self.high_mark = 0, None # Used for offset/limit self.distinct = False self.distinct_fields = () self.select_for_update = False self.select_for_update_nowait = False self.select_for_update_skip_locked = False self.select_for_update_of = () self.select_for_no_key_update = False self.select_related = False # Arbitrary limit for select_related to prevents infinite recursion. self.max_depth = 5 # Holds the selects defined by a call to values() or values_list() # excluding annotation_select and extra_select. self.values_select = () # SQL annotation-related attributes self.annotations = {} # Maps alias -> Annotation Expression self.annotation_select_mask = None self._annotation_select_cache = None # Set combination attributes self.combinator = None self.combinator_all = False self.combined_queries = () # These are for extensions. The contents are more or less appended # verbatim to the appropriate clause. self.extra = {} # Maps col_alias -> (col_sql, params). self.extra_select_mask = None self._extra_select_cache = None self.extra_tables = () self.extra_order_by = () # A tuple that is a set of model field names and either True, if these # are the fields to defer, or False if these are the only fields to # load. self.deferred_loading = (frozenset(), True) self._filtered_relations = {} self.explain_query = False self.explain_format = None self.explain_options = {} @property def output_field(self): if len(self.select) == 1: select = self.select[0] return getattr(select, 'target', None) or select.field elif len(self.annotation_select) == 1: return next(iter(self.annotation_select.values())).output_field @property def has_select_fields(self): return bool(self.select or self.annotation_select_mask or self.extra_select_mask) @cached_property def base_table(self): for alias in self.alias_map: return alias def __str__(self): """ Return the query as a string of SQL with the parameter values substituted in (use sql_with_params() to see the unsubstituted string). Parameter values won't necessarily be quoted correctly, since that is done by the database interface at execution time. """ sql, params = self.sql_with_params() return sql % params def sql_with_params(self): """ Return the query as an SQL string and the parameters that will be substituted into the query. """ return self.get_compiler(DEFAULT_DB_ALIAS).as_sql() def __deepcopy__(self, memo): """Limit the amount of work when a Query is deepcopied.""" result = self.clone() memo[id(self)] = result return result def get_compiler(self, using=None, connection=None): if using is None and connection is None: raise ValueError("Need either using or connection") if using: connection = connections[using] return connection.ops.compiler(self.compiler)(self, connection, using) def get_meta(self): """ Return the Options instance (the model._meta) from which to start processing. Normally, this is self.model._meta, but it can be changed by subclasses. """ return self.model._meta def clone(self): """ Return a copy of the current Query. A lightweight alternative to to deepcopy(). """ obj = Empty() obj.__class__ = self.__class__ # Copy references to everything. obj.__dict__ = self.__dict__.copy() # Clone attributes that can't use shallow copy. obj.alias_refcount = self.alias_refcount.copy() obj.alias_map = self.alias_map.copy() obj.external_aliases = self.external_aliases.copy() obj.table_map = self.table_map.copy() obj.where = self.where.clone() obj.annotations = self.annotations.copy() if self.annotation_select_mask is None: obj.annotation_select_mask = None else: obj.annotation_select_mask = self.annotation_select_mask.copy() obj.combined_queries = tuple(query.clone() for query in self.combined_queries) # _annotation_select_cache cannot be copied, as doing so breaks the # (necessary) state in which both annotations and # _annotation_select_cache point to the same underlying objects. # It will get re-populated in the cloned queryset the next time it's # used. obj._annotation_select_cache = None obj.extra = self.extra.copy() if self.extra_select_mask is None: obj.extra_select_mask = None else: obj.extra_select_mask = self.extra_select_mask.copy() if self._extra_select_cache is None: obj._extra_select_cache = None else: obj._extra_select_cache = self._extra_select_cache.copy() if self.select_related is not False: # Use deepcopy because select_related stores fields in nested # dicts. obj.select_related = copy.deepcopy(obj.select_related) if 'subq_aliases' in self.__dict__: obj.subq_aliases = self.subq_aliases.copy() obj.used_aliases = self.used_aliases.copy() obj._filtered_relations = self._filtered_relations.copy() # Clear the cached_property try: del obj.base_table except AttributeError: pass return obj def chain(self, klass=None): """ Return a copy of the current Query that's ready for another operation. The klass argument changes the type of the Query, e.g. UpdateQuery. """ obj = self.clone() if klass and obj.__class__ != klass: obj.__class__ = klass if not obj.filter_is_sticky: obj.used_aliases = set() obj.filter_is_sticky = False if hasattr(obj, '_setup_query'): obj._setup_query() return obj def relabeled_clone(self, change_map): clone = self.clone() clone.change_aliases(change_map) return clone def _get_col(self, target, field, alias): if not self.alias_cols: alias = None return target.get_col(alias, field) def rewrite_cols(self, annotation, col_cnt): # We must make sure the inner query has the referred columns in it. # If we are aggregating over an annotation, then Django uses Ref() # instances to note this. However, if we are annotating over a column # of a related model, then it might be that column isn't part of the # SELECT clause of the inner query, and we must manually make sure # the column is selected. An example case is: # .aggregate(Sum('author__awards')) # Resolving this expression results in a join to author, but there # is no guarantee the awards column of author is in the select clause # of the query. Thus we must manually add the column to the inner # query. orig_exprs = annotation.get_source_expressions() new_exprs = [] for expr in orig_exprs: # FIXME: These conditions are fairly arbitrary. Identify a better # method of having expressions decide which code path they should # take. if isinstance(expr, Ref): # Its already a Ref to subquery (see resolve_ref() for # details) new_exprs.append(expr) elif isinstance(expr, (WhereNode, Lookup)): # Decompose the subexpressions further. The code here is # copied from the else clause, but this condition must appear # before the contains_aggregate/is_summary condition below. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) else: # Reuse aliases of expressions already selected in subquery. for col_alias, selected_annotation in self.annotation_select.items(): if selected_annotation is expr: new_expr = Ref(col_alias, expr) break else: # An expression that is not selected the subquery. if isinstance(expr, Col) or (expr.contains_aggregate and not expr.is_summary): # Reference column or another aggregate. Select it # under a non-conflicting alias. col_cnt += 1 col_alias = '__col%d' % col_cnt self.annotations[col_alias] = expr self.append_annotation_mask([col_alias]) new_expr = Ref(col_alias, expr) else: # Some other expression not referencing database values # directly. Its subexpression might contain Cols. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) annotation.set_source_expressions(new_exprs) return annotation, col_cnt def get_aggregation(self, using, added_aggregate_names): """ Return the dictionary with the values of the existing aggregations. """ if not self.annotation_select: return {} existing_annotations = [ annotation for alias, annotation in self.annotations.items() if alias not in added_aggregate_names ] # Decide if we need to use a subquery. # # Existing annotations would cause incorrect results as get_aggregation() # must produce just one result and thus must not use GROUP BY. But we # aren't smart enough to remove the existing annotations from the # query, so those would force us to use GROUP BY. # # If the query has limit or distinct, or uses set operations, then # those operations must be done in a subquery so that the query # aggregates on the limit and/or distinct results instead of applying # the distinct and limit after the aggregation. if (isinstance(self.group_by, tuple) or self.is_sliced or existing_annotations or self.distinct or self.combinator): from django.db.models.sql.subqueries import AggregateQuery outer_query = AggregateQuery(self.model) inner_query = self.clone() inner_query.select_for_update = False inner_query.select_related = False inner_query.set_annotation_mask(self.annotation_select) if not self.is_sliced and not self.distinct_fields: # Queries with distinct_fields need ordering and when a limit # is applied we must take the slice from the ordered query. # Otherwise no need for ordering. inner_query.clear_ordering(True) if not inner_query.distinct: # If the inner query uses default select and it has some # aggregate annotations, then we must make sure the inner # query is grouped by the main model's primary key. However, # clearing the select clause can alter results if distinct is # used. has_existing_aggregate_annotations = any( annotation for annotation in existing_annotations if getattr(annotation, 'contains_aggregate', True) ) if inner_query.default_cols and has_existing_aggregate_annotations: inner_query.group_by = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),) inner_query.default_cols = False relabels = {t: 'subquery' for t in inner_query.alias_map} relabels[None] = 'subquery' # Remove any aggregates marked for reduction from the subquery # and move them to the outer AggregateQuery. col_cnt = 0 for alias, expression in list(inner_query.annotation_select.items()): annotation_select_mask = inner_query.annotation_select_mask if expression.is_summary: expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt) outer_query.annotations[alias] = expression.relabeled_clone(relabels) del inner_query.annotations[alias] annotation_select_mask.remove(alias) # Make sure the annotation_select wont use cached results. inner_query.set_annotation_mask(inner_query.annotation_select_mask) if inner_query.select == () and not inner_query.default_cols and not inner_query.annotation_select_mask: # In case of Model.objects[0:3].count(), there would be no # field selected in the inner query, yet we must use a subquery. # So, make sure at least one field is selected. inner_query.select = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),) try: outer_query.add_subquery(inner_query, using) except EmptyResultSet: return { alias: None for alias in outer_query.annotation_select } else: outer_query = self self.select = () self.default_cols = False self.extra = {} outer_query.clear_ordering(True) outer_query.clear_limits() outer_query.select_for_update = False outer_query.select_related = False compiler = outer_query.get_compiler(using) result = compiler.execute_sql(SINGLE) if result is None: result = [None] * len(outer_query.annotation_select) converters = compiler.get_converters(outer_query.annotation_select.values()) result = next(compiler.apply_converters((result,), converters)) return dict(zip(outer_query.annotation_select, result)) def get_count(self, using): """ Perform a COUNT() query using the current filter constraints. """ obj = self.clone() obj.add_annotation(Count('*'), alias='__count', is_summary=True) number = obj.get_aggregation(using, ['__count'])['__count'] if number is None: number = 0 return number def has_filters(self): return self.where def has_results(self, using): q = self.clone() if not q.distinct: if q.group_by is True: q.add_fields((f.attname for f in self.model._meta.concrete_fields), False) # Disable GROUP BY aliases to avoid orphaning references to the # SELECT clause which is about to be cleared. q.set_group_by(allow_aliases=False) q.clear_select_clause() q.clear_ordering(True) q.set_limits(high=1) compiler = q.get_compiler(using=using) return compiler.has_results() def explain(self, using, format=None, **options): q = self.clone() q.explain_query = True q.explain_format = format q.explain_options = options compiler = q.get_compiler(using=using) return '\n'.join(compiler.explain_query()) def combine(self, rhs, connector): """ Merge the 'rhs' query into the current one (with any 'rhs' effects being applied *after* (that is, "to the right of") anything in the current query. 'rhs' is not modified during a call to this function. The 'connector' parameter describes how to connect filters from the 'rhs' query. """ assert self.model == rhs.model, \ "Cannot combine queries on two different base models." assert not self.is_sliced, \ "Cannot combine queries once a slice has been taken." assert self.distinct == rhs.distinct, \ "Cannot combine a unique query with a non-unique query." assert self.distinct_fields == rhs.distinct_fields, \ "Cannot combine queries with different distinct fields." # Work out how to relabel the rhs aliases, if necessary. change_map = {} conjunction = (connector == AND) # Determine which existing joins can be reused. When combining the # query with AND we must recreate all joins for m2m filters. When # combining with OR we can reuse joins. The reason is that in AND # case a single row can't fulfill a condition like: # revrel__col=1 & revrel__col=2 # But, there might be two different related rows matching this # condition. In OR case a single True is enough, so single row is # enough, too. # # Note that we will be creating duplicate joins for non-m2m joins in # the AND case. The results will be correct but this creates too many # joins. This is something that could be fixed later on. reuse = set() if conjunction else set(self.alias_map) # Base table must be present in the query - this is the same # table on both sides. self.get_initial_alias() joinpromoter = JoinPromoter(connector, 2, False) joinpromoter.add_votes( j for j in self.alias_map if self.alias_map[j].join_type == INNER) rhs_votes = set() # Now, add the joins from rhs query into the new query (skipping base # table). rhs_tables = list(rhs.alias_map)[1:] for alias in rhs_tables: join = rhs.alias_map[alias] # If the left side of the join was already relabeled, use the # updated alias. join = join.relabeled_clone(change_map) new_alias = self.join(join, reuse=reuse) if join.join_type == INNER: rhs_votes.add(new_alias) # We can't reuse the same join again in the query. If we have two # distinct joins for the same connection in rhs query, then the # combined query must have two joins, too. reuse.discard(new_alias) if alias != new_alias: change_map[alias] = new_alias if not rhs.alias_refcount[alias]: # The alias was unused in the rhs query. Unref it so that it # will be unused in the new query, too. We have to add and # unref the alias so that join promotion has information of # the join type for the unused alias. self.unref_alias(new_alias) joinpromoter.add_votes(rhs_votes) joinpromoter.update_join_types(self) # Now relabel a copy of the rhs where-clause and add it to the current # one. w = rhs.where.clone() w.relabel_aliases(change_map) self.where.add(w, connector) # Selection columns and extra extensions are those provided by 'rhs'. if rhs.select: self.set_select([col.relabeled_clone(change_map) for col in rhs.select]) else: self.select = () if connector == OR: # It would be nice to be able to handle this, but the queries don't # really make sense (or return consistent value sets). Not worth # the extra complexity when you can write a real query instead. if self.extra and rhs.extra: raise ValueError("When merging querysets using 'or', you cannot have extra(select=...) on both sides.") self.extra.update(rhs.extra) extra_select_mask = set() if self.extra_select_mask is not None: extra_select_mask.update(self.extra_select_mask) if rhs.extra_select_mask is not None: extra_select_mask.update(rhs.extra_select_mask) if extra_select_mask: self.set_extra_mask(extra_select_mask) self.extra_tables += rhs.extra_tables # Ordering uses the 'rhs' ordering, unless it has none, in which case # the current ordering is used. self.order_by = rhs.order_by or self.order_by self.extra_order_by = rhs.extra_order_by or self.extra_order_by def deferred_to_data(self, target, callback): """ Convert the self.deferred_loading data structure to an alternate data structure, describing the field that *will* be loaded. This is used to compute the columns to select from the database and also by the QuerySet class to work out which fields are being initialized on each model. Models that have all their fields included aren't mentioned in the result, only those that have field restrictions in place. The "target" parameter is the instance that is populated (in place). The "callback" is a function that is called whenever a (model, field) pair need to be added to "target". It accepts three parameters: "target", and the model and list of fields being added for that model. """ field_names, defer = self.deferred_loading if not field_names: return orig_opts = self.get_meta() seen = {} must_include = {orig_opts.concrete_model: {orig_opts.pk}} for field_name in field_names: parts = field_name.split(LOOKUP_SEP) cur_model = self.model._meta.concrete_model opts = orig_opts for name in parts[:-1]: old_model = cur_model if name in self._filtered_relations: name = self._filtered_relations[name].relation_name source = opts.get_field(name) if is_reverse_o2o(source): cur_model = source.related_model else: cur_model = source.remote_field.model opts = cur_model._meta # Even if we're "just passing through" this model, we must add # both the current model's pk and the related reference field # (if it's not a reverse relation) to the things we select. if not is_reverse_o2o(source): must_include[old_model].add(source) add_to_dict(must_include, cur_model, opts.pk) field = opts.get_field(parts[-1]) is_reverse_object = field.auto_created and not field.concrete model = field.related_model if is_reverse_object else field.model model = model._meta.concrete_model if model == opts.model: model = cur_model if not is_reverse_o2o(field): add_to_dict(seen, model, field) if defer: # We need to load all fields for each model, except those that # appear in "seen" (for all models that appear in "seen"). The only # slight complexity here is handling fields that exist on parent # models. workset = {} for model, values in seen.items(): for field in model._meta.local_fields: if field not in values: m = field.model._meta.concrete_model add_to_dict(workset, m, field) for model, values in must_include.items(): # If we haven't included a model in workset, we don't add the # corresponding must_include fields for that model, since an # empty set means "include all fields". That's why there's no # "else" branch here. if model in workset: workset[model].update(values) for model, values in workset.items(): callback(target, model, values) else: for model, values in must_include.items(): if model in seen: seen[model].update(values) else: # As we've passed through this model, but not explicitly # included any fields, we have to make sure it's mentioned # so that only the "must include" fields are pulled in. seen[model] = values # Now ensure that every model in the inheritance chain is mentioned # in the parent list. Again, it must be mentioned to ensure that # only "must include" fields are pulled in. for model in orig_opts.get_parent_list(): seen.setdefault(model, set()) for model, values in seen.items(): callback(target, model, values) def table_alias(self, table_name, create=False, filtered_relation=None): """ Return a table alias for the given table_name and whether this is a new alias or not. If 'create' is true, a new alias is always created. Otherwise, the most recently created alias for the table (if one exists) is reused. """ alias_list = self.table_map.get(table_name) if not create and alias_list: alias = alias_list[0] self.alias_refcount[alias] += 1 return alias, False # Create a new alias for this table. if alias_list: alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1) alias_list.append(alias) else: # The first occurrence of a table uses the table name directly. alias = filtered_relation.alias if filtered_relation is not None else table_name self.table_map[table_name] = [alias] self.alias_refcount[alias] = 1 return alias, True def ref_alias(self, alias): """Increases the reference count for this alias.""" self.alias_refcount[alias] += 1 def unref_alias(self, alias, amount=1): """Decreases the reference count for this alias.""" self.alias_refcount[alias] -= amount def promote_joins(self, aliases): """ Promote recursively the join type of given aliases and its children to an outer join. If 'unconditional' is False, only promote the join if it is nullable or the parent join is an outer join. The children promotion is done to avoid join chains that contain a LOUTER b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted, then we must also promote b->c automatically, or otherwise the promotion of a->b doesn't actually change anything in the query results. """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type is None: # This is the base table (first FROM entry) - this table # isn't really joined at all in the query, so we should not # alter its join type. continue # Only the first alias (skipped above) should have None join_type assert self.alias_map[alias].join_type is not None parent_alias = self.alias_map[alias].parent_alias parent_louter = parent_alias and self.alias_map[parent_alias].join_type == LOUTER already_louter = self.alias_map[alias].join_type == LOUTER if ((self.alias_map[alias].nullable or parent_louter) and not already_louter): self.alias_map[alias] = self.alias_map[alias].promote() # Join type of 'alias' changed, so re-examine all aliases that # refer to this one. aliases.extend( join for join in self.alias_map if self.alias_map[join].parent_alias == alias and join not in aliases ) def demote_joins(self, aliases): """ Change join type from LOUTER to INNER for all joins in aliases. Similarly to promote_joins(), this method must ensure no join chains containing first an outer, then an inner join are generated. If we are demoting b->c join in chain a LOUTER b LOUTER c then we must demote a->b automatically, or otherwise the demotion of b->c doesn't actually change anything in the query results. . """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type == LOUTER: self.alias_map[alias] = self.alias_map[alias].demote() parent_alias = self.alias_map[alias].parent_alias if self.alias_map[parent_alias].join_type == INNER: aliases.append(parent_alias) def reset_refcounts(self, to_counts): """ Reset reference counts for aliases so that they match the value passed in `to_counts`. """ for alias, cur_refcount in self.alias_refcount.copy().items(): unref_amount = cur_refcount - to_counts.get(alias, 0) self.unref_alias(alias, unref_amount) def change_aliases(self, change_map): """ Change the aliases in change_map (which maps old-alias -> new-alias), relabelling any references to them in select columns and the where clause. """ assert set(change_map).isdisjoint(change_map.values()) # 1. Update references in "select" (normal columns plus aliases), # "group by" and "where". self.where.relabel_aliases(change_map) if isinstance(self.group_by, tuple): self.group_by = tuple([col.relabeled_clone(change_map) for col in self.group_by]) self.select = tuple([col.relabeled_clone(change_map) for col in self.select]) self.annotations = self.annotations and { key: col.relabeled_clone(change_map) for key, col in self.annotations.items() } # 2. Rename the alias in the internal table/alias datastructures. for old_alias, new_alias in change_map.items(): if old_alias not in self.alias_map: continue alias_data = self.alias_map[old_alias].relabeled_clone(change_map) self.alias_map[new_alias] = alias_data self.alias_refcount[new_alias] = self.alias_refcount[old_alias] del self.alias_refcount[old_alias] del self.alias_map[old_alias] table_aliases = self.table_map[alias_data.table_name] for pos, alias in enumerate(table_aliases): if alias == old_alias: table_aliases[pos] = new_alias break self.external_aliases = { # Table is aliased or it's being changed and thus is aliased. change_map.get(alias, alias): (aliased or alias in change_map) for alias, aliased in self.external_aliases.items() } def bump_prefix(self, outer_query): """ Change the alias prefix to the next letter in the alphabet in a way that the outer query's aliases and this query's aliases will not conflict. Even tables that previously had no alias will get an alias after this call. """ def prefix_gen(): """ Generate a sequence of characters in alphabetical order: -> 'A', 'B', 'C', ... When the alphabet is finished, the sequence will continue with the Cartesian product: -> 'AA', 'AB', 'AC', ... """ alphabet = ascii_uppercase prefix = chr(ord(self.alias_prefix) + 1) yield prefix for n in count(1): seq = alphabet[alphabet.index(prefix):] if prefix else alphabet for s in product(seq, repeat=n): yield ''.join(s) prefix = None if self.alias_prefix != outer_query.alias_prefix: # No clashes between self and outer query should be possible. return # Explicitly avoid infinite loop. The constant divider is based on how # much depth recursive subquery references add to the stack. This value # might need to be adjusted when adding or removing function calls from # the code path in charge of performing these operations. local_recursion_limit = sys.getrecursionlimit() // 16 for pos, prefix in enumerate(prefix_gen()): if prefix not in self.subq_aliases: self.alias_prefix = prefix break if pos > local_recursion_limit: raise RecursionError( 'Maximum recursion depth exceeded: too many subqueries.' ) self.subq_aliases = self.subq_aliases.union([self.alias_prefix]) outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases) self.change_aliases({ alias: '%s%d' % (self.alias_prefix, pos) for pos, alias in enumerate(self.alias_map) }) def get_initial_alias(self): """ Return the first alias for this query, after increasing its reference count. """ if self.alias_map: alias = self.base_table self.ref_alias(alias) else: alias = self.join(BaseTable(self.get_meta().db_table, None)) return alias def count_active_tables(self): """ Return the number of tables in this query with a non-zero reference count. After execution, the reference counts are zeroed, so tables added in compiler will not be seen by this method. """ return len([1 for count in self.alias_refcount.values() if count]) def join(self, join, reuse=None, reuse_with_filtered_relation=False): """ Return an alias for the 'join', either reusing an existing alias for that join or creating a new one. 'join' is either a sql.datastructures.BaseTable or Join. The 'reuse' parameter can be either None which means all joins are reusable, or it can be a set containing the aliases that can be reused. The 'reuse_with_filtered_relation' parameter is used when computing FilteredRelation instances. A join is always created as LOUTER if the lhs alias is LOUTER to make sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new joins are created as LOUTER if the join is nullable. """ if reuse_with_filtered_relation and reuse: reuse_aliases = [ a for a, j in self.alias_map.items() if a in reuse and j.equals(join, with_filtered_relation=False) ] else: reuse_aliases = [ a for a, j in self.alias_map.items() if (reuse is None or a in reuse) and j == join ] if reuse_aliases: if join.table_alias in reuse_aliases: reuse_alias = join.table_alias else: # Reuse the most recent alias of the joined table # (a many-to-many relation may be joined multiple times). reuse_alias = reuse_aliases[-1] self.ref_alias(reuse_alias) return reuse_alias # No reuse is possible, so we need a new alias. alias, _ = self.table_alias(join.table_name, create=True, filtered_relation=join.filtered_relation) if join.join_type: if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable: join_type = LOUTER else: join_type = INNER join.join_type = join_type join.table_alias = alias self.alias_map[alias] = join return alias def join_parent_model(self, opts, model, alias, seen): """ Make sure the given 'model' is joined in the query. If 'model' isn't a parent of 'opts' or if it is None this method is a no-op. The 'alias' is the root alias for starting the join, 'seen' is a dict of model -> alias of existing joins. It must also contain a mapping of None -> some alias. This will be returned in the no-op case. """ if model in seen: return seen[model] chain = opts.get_base_chain(model) if not chain: return alias curr_opts = opts for int_model in chain: if int_model in seen: curr_opts = int_model._meta alias = seen[int_model] continue # Proxy model have elements in base chain # with no parents, assign the new options # object and skip to the next base in that # case if not curr_opts.parents[int_model]: curr_opts = int_model._meta continue link_field = curr_opts.get_ancestor_link(int_model) join_info = self.setup_joins([link_field.name], curr_opts, alias) curr_opts = int_model._meta alias = seen[int_model] = join_info.joins[-1] return alias or seen[None] def add_annotation(self, annotation, alias, is_summary=False): """Add a single annotation expression to the Query.""" annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None, summarize=is_summary) self.append_annotation_mask([alias]) self.annotations[alias] = annotation def resolve_expression(self, query, *args, **kwargs): clone = self.clone() # Subqueries need to use a different set of aliases than the outer query. clone.bump_prefix(query) clone.subquery = True # It's safe to drop ordering if the queryset isn't using slicing, # distinct(*fields) or select_for_update(). if (self.low_mark == 0 and self.high_mark is None and not self.distinct_fields and not self.select_for_update): clone.clear_ordering(True) clone.where.resolve_expression(query, *args, **kwargs) for key, value in clone.annotations.items(): resolved = value.resolve_expression(query, *args, **kwargs) if hasattr(resolved, 'external_aliases'): resolved.external_aliases.update(clone.external_aliases) clone.annotations[key] = resolved # Outer query's aliases are considered external. for alias, table in query.alias_map.items(): clone.external_aliases[alias] = ( (isinstance(table, Join) and table.join_field.related_model._meta.db_table != alias) or (isinstance(table, BaseTable) and table.table_name != table.table_alias) ) return clone def get_external_cols(self): exprs = chain(self.annotations.values(), self.where.children) return [ col for col in self._gen_cols(exprs) if col.alias in self.external_aliases ] def as_sql(self, compiler, connection): sql, params = self.get_compiler(connection=connection).as_sql() if self.subquery: sql = '(%s)' % sql return sql, params def resolve_lookup_value(self, value, can_reuse, allow_joins): if hasattr(value, 'resolve_expression'): value = value.resolve_expression( self, reuse=can_reuse, allow_joins=allow_joins, ) elif isinstance(value, (list, tuple)): # The items of the iterable may be expressions and therefore need # to be resolved independently. return type(value)( self.resolve_lookup_value(sub_value, can_reuse, allow_joins) for sub_value in value ) return value def solve_lookup_type(self, lookup): """ Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains'). """ lookup_splitted = lookup.split(LOOKUP_SEP) if self.annotations: expression, expression_lookups = refs_expression(lookup_splitted, self.annotations) if expression: return expression_lookups, (), expression _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta()) field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)] if len(lookup_parts) > 1 and not field_parts: raise FieldError( 'Invalid lookup "%s" for model %s".' % (lookup, self.get_meta().model.__name__) ) return lookup_parts, field_parts, False def check_query_object_type(self, value, opts, field): """ Check whether the object passed while querying is of the correct type. If not, raise a ValueError specifying the wrong object. """ if hasattr(value, '_meta'): if not check_rel_lookup_compatibility(value._meta.model, opts, field): raise ValueError( 'Cannot query "%s": Must be "%s" instance.' % (value, opts.object_name)) def check_related_objects(self, field, value, opts): """Check the type of object passed to query relations.""" if field.is_relation: # Check that the field and the queryset use the same model in a # query like .filter(author=Author.objects.all()). For example, the # opts would be Author's (from the author field) and value.model # would be Author.objects.all() queryset's .model (Author also). # The field is the related field on the lhs side. if (isinstance(value, Query) and not value.has_select_fields and not check_rel_lookup_compatibility(value.model, opts, field)): raise ValueError( 'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' % (value.model._meta.object_name, opts.object_name) ) elif hasattr(value, '_meta'): self.check_query_object_type(value, opts, field) elif hasattr(value, '__iter__'): for v in value: self.check_query_object_type(v, opts, field) def check_filterable(self, expression): """Raise an error if expression cannot be used in a WHERE clause.""" if ( hasattr(expression, 'resolve_expression') and not getattr(expression, 'filterable', True) ): raise NotSupportedError( expression.__class__.__name__ + ' is disallowed in the filter ' 'clause.' ) if hasattr(expression, 'get_source_expressions'): for expr in expression.get_source_expressions(): self.check_filterable(expr) def build_lookup(self, lookups, lhs, rhs): """ Try to extract transforms and lookup from given lhs. The lhs value is something that works like SQLExpression. The rhs value is what the lookup is going to compare against. The lookups is a list of names to extract using get_lookup() and get_transform(). """ # __exact is the default lookup if one isn't given. *transforms, lookup_name = lookups or ['exact'] for name in transforms: lhs = self.try_transform(lhs, name) # First try get_lookup() so that the lookup takes precedence if the lhs # supports both transform and lookup for the name. lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: if lhs.field.is_relation: raise FieldError('Related Field got invalid lookup: {}'.format(lookup_name)) # A lookup wasn't found. Try to interpret the name as a transform # and do an Exact lookup against it. lhs = self.try_transform(lhs, lookup_name) lookup_name = 'exact' lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: return lookup = lookup_class(lhs, rhs) # Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all # uses of None as a query value unless the lookup supports it. if lookup.rhs is None and not lookup.can_use_none_as_rhs: if lookup_name not in ('exact', 'iexact'): raise ValueError("Cannot use None as a query value") return lhs.get_lookup('isnull')(lhs, True) # For Oracle '' is equivalent to null. The check must be done at this # stage because join promotion can't be done in the compiler. Using # DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here. # A similar thing is done in is_nullable(), too. if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and lookup_name == 'exact' and lookup.rhs == ''): return lhs.get_lookup('isnull')(lhs, True) return lookup def try_transform(self, lhs, name): """ Helper method for build_lookup(). Try to fetch and initialize a transform for name parameter from lhs. """ transform_class = lhs.get_transform(name) if transform_class: return transform_class(lhs) else: output_field = lhs.output_field.__class__ suggested_lookups = difflib.get_close_matches(name, output_field.get_lookups()) if suggested_lookups: suggestion = ', perhaps you meant %s?' % ' or '.join(suggested_lookups) else: suggestion = '.' raise FieldError( "Unsupported lookup '%s' for %s or join on the field not " "permitted%s" % (name, output_field.__name__, suggestion) ) def build_filter(self, filter_expr, branch_negated=False, current_negated=False, can_reuse=None, allow_joins=True, split_subq=True, reuse_with_filtered_relation=False, check_filterable=True): """ Build a WhereNode for a single filter clause but don't add it to this Query. Query.add_q() will then add this filter to the where Node. The 'branch_negated' tells us if the current branch contains any negations. This will be used to determine if subqueries are needed. The 'current_negated' is used to determine if the current filter is negated or not and this will be used to determine if IS NULL filtering is needed. The difference between current_negated and branch_negated is that branch_negated is set on first negation, but current_negated is flipped for each negation. Note that add_filter will not do any negating itself, that is done upper in the code by add_q(). The 'can_reuse' is a set of reusable joins for multijoins. If 'reuse_with_filtered_relation' is True, then only joins in can_reuse will be reused. The method will create a filter clause that can be added to the current query. However, if the filter isn't added to the query then the caller is responsible for unreffing the joins used. """ if isinstance(filter_expr, dict): raise FieldError("Cannot parse keyword query as dict") if isinstance(filter_expr, Q): return self._add_q( filter_expr, branch_negated=branch_negated, current_negated=current_negated, used_aliases=can_reuse, allow_joins=allow_joins, split_subq=split_subq, check_filterable=check_filterable, ) if hasattr(filter_expr, 'resolve_expression'): if not getattr(filter_expr, 'conditional', False): raise TypeError('Cannot filter against a non-conditional expression.') condition = self.build_lookup( ['exact'], filter_expr.resolve_expression(self, allow_joins=allow_joins), True ) clause = self.where_class() clause.add(condition, AND) return clause, [] arg, value = filter_expr if not arg: raise FieldError("Cannot parse keyword query %r" % arg) lookups, parts, reffed_expression = self.solve_lookup_type(arg) if check_filterable: self.check_filterable(reffed_expression) if not allow_joins and len(parts) > 1: raise FieldError("Joined field references are not permitted in this query") pre_joins = self.alias_refcount.copy() value = self.resolve_lookup_value(value, can_reuse, allow_joins) used_joins = {k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)} if check_filterable: self.check_filterable(value) clause = self.where_class() if reffed_expression: condition = self.build_lookup(lookups, reffed_expression, value) clause.add(condition, AND) return clause, [] opts = self.get_meta() alias = self.get_initial_alias() allow_many = not branch_negated or not split_subq try: join_info = self.setup_joins( parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many, reuse_with_filtered_relation=reuse_with_filtered_relation, ) # Prevent iterator from being consumed by check_related_objects() if isinstance(value, Iterator): value = list(value) self.check_related_objects(join_info.final_field, value, join_info.opts) # split_exclude() needs to know which joins were generated for the # lookup parts self._lookup_joins = join_info.joins except MultiJoin as e: return self.split_exclude(filter_expr, can_reuse, e.names_with_path) # Update used_joins before trimming since they are reused to determine # which joins could be later promoted to INNER. used_joins.update(join_info.joins) targets, alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path) if can_reuse is not None: can_reuse.update(join_list) if join_info.final_field.is_relation: # No support for transforms for relational fields num_lookups = len(lookups) if num_lookups > 1: raise FieldError('Related Field got invalid lookup: {}'.format(lookups[0])) if len(targets) == 1: col = self._get_col(targets[0], join_info.final_field, alias) else: col = MultiColSource(alias, targets, join_info.targets, join_info.final_field) else: col = self._get_col(targets[0], join_info.final_field, alias) condition = self.build_lookup(lookups, col, value) lookup_type = condition.lookup_name clause.add(condition, AND) require_outer = lookup_type == 'isnull' and condition.rhs is True and not current_negated if current_negated and (lookup_type != 'isnull' or condition.rhs is False) and condition.rhs is not None: require_outer = True if lookup_type != 'isnull': # The condition added here will be SQL like this: # NOT (col IS NOT NULL), where the first NOT is added in # upper layers of code. The reason for addition is that if col # is null, then col != someval will result in SQL "unknown" # which isn't the same as in Python. The Python None handling # is wanted, and it can be gotten by # (col IS NULL OR col != someval) # <=> # NOT (col IS NOT NULL AND col = someval). if ( self.is_nullable(targets[0]) or self.alias_map[join_list[-1]].join_type == LOUTER ): lookup_class = targets[0].get_lookup('isnull') col = self._get_col(targets[0], join_info.targets[0], alias) clause.add(lookup_class(col, False), AND) # If someval is a nullable column, someval IS NOT NULL is # added. if isinstance(value, Col) and self.is_nullable(value.target): lookup_class = value.target.get_lookup('isnull') clause.add(lookup_class(value, False), AND) return clause, used_joins if not require_outer else () def add_filter(self, filter_clause): self.add_q(Q(**{filter_clause[0]: filter_clause[1]})) def add_q(self, q_object): """ A preprocessor for the internal _add_q(). Responsible for doing final join promotion. """ # For join promotion this case is doing an AND for the added q_object # and existing conditions. So, any existing inner join forces the join # type to remain inner. Existing outer joins can however be demoted. # (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if # rel_a doesn't produce any rows, then the whole condition must fail. # So, demotion is OK. existing_inner = {a for a in self.alias_map if self.alias_map[a].join_type == INNER} clause, _ = self._add_q(q_object, self.used_aliases) if clause: self.where.add(clause, AND) self.demote_joins(existing_inner) def build_where(self, filter_expr): return self.build_filter(filter_expr, allow_joins=False)[0] def _add_q(self, q_object, used_aliases, branch_negated=False, current_negated=False, allow_joins=True, split_subq=True, check_filterable=True): """Add a Q-object to the current filter.""" connector = q_object.connector current_negated = current_negated ^ q_object.negated branch_negated = branch_negated or q_object.negated target_clause = self.where_class(connector=connector, negated=q_object.negated) joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated) for child in q_object.children: child_clause, needed_inner = self.build_filter( child, can_reuse=used_aliases, branch_negated=branch_negated, current_negated=current_negated, allow_joins=allow_joins, split_subq=split_subq, check_filterable=check_filterable, ) joinpromoter.add_votes(needed_inner) if child_clause: target_clause.add(child_clause, connector) needed_inner = joinpromoter.update_join_types(self) return target_clause, needed_inner def build_filtered_relation_q(self, q_object, reuse, branch_negated=False, current_negated=False): """Add a FilteredRelation object to the current filter.""" connector = q_object.connector current_negated ^= q_object.negated branch_negated = branch_negated or q_object.negated target_clause = self.where_class(connector=connector, negated=q_object.negated) for child in q_object.children: if isinstance(child, Node): child_clause = self.build_filtered_relation_q( child, reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, ) else: child_clause, _ = self.build_filter( child, can_reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, allow_joins=True, split_subq=False, reuse_with_filtered_relation=True, ) target_clause.add(child_clause, connector) return target_clause def add_filtered_relation(self, filtered_relation, alias): filtered_relation.alias = alias lookups = dict(get_children_from_q(filtered_relation.condition)) relation_lookup_parts, relation_field_parts, _ = self.solve_lookup_type(filtered_relation.relation_name) if relation_lookup_parts: raise ValueError( "FilteredRelation's relation_name cannot contain lookups " "(got %r)." % filtered_relation.relation_name ) for lookup in chain(lookups): lookup_parts, lookup_field_parts, _ = self.solve_lookup_type(lookup) shift = 2 if not lookup_parts else 1 lookup_field_path = lookup_field_parts[:-shift] for idx, lookup_field_part in enumerate(lookup_field_path): if len(relation_field_parts) > idx: if relation_field_parts[idx] != lookup_field_part: raise ValueError( "FilteredRelation's condition doesn't support " "relations outside the %r (got %r)." % (filtered_relation.relation_name, lookup) ) else: raise ValueError( "FilteredRelation's condition doesn't support nested " "relations deeper than the relation_name (got %r for " "%r)." % (lookup, filtered_relation.relation_name) ) self._filtered_relations[filtered_relation.alias] = filtered_relation def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False): """ Walk the list of names and turns them into PathInfo tuples. A single name in 'names' can generate multiple PathInfos (m2m, for example). 'names' is the path of names to travel, 'opts' is the model Options we start the name resolving from, 'allow_many' is as for setup_joins(). If fail_on_missing is set to True, then a name that can't be resolved will generate a FieldError. Return a list of PathInfo tuples. In addition return the final field (the last used join field) and target (which is a field guaranteed to contain the same value as the final field). Finally, return those names that weren't found (which are likely transforms and the final lookup). """ path, names_with_path = [], [] for pos, name in enumerate(names): cur_names_with_path = (name, []) if name == 'pk': name = opts.pk.name field = None filtered_relation = None try: field = opts.get_field(name) except FieldDoesNotExist: if name in self.annotation_select: field = self.annotation_select[name].output_field elif name in self._filtered_relations and pos == 0: filtered_relation = self._filtered_relations[name] if LOOKUP_SEP in filtered_relation.relation_name: parts = filtered_relation.relation_name.split(LOOKUP_SEP) filtered_relation_path, field, _, _ = self.names_to_path( parts, opts, allow_many, fail_on_missing, ) path.extend(filtered_relation_path[:-1]) else: field = opts.get_field(filtered_relation.relation_name) if field is not None: # Fields that contain one-to-many relations with a generic # model (like a GenericForeignKey) cannot generate reverse # relations and therefore cannot be used for reverse querying. if field.is_relation and not field.related_model: raise FieldError( "Field %r does not generate an automatic reverse " "relation and therefore cannot be used for reverse " "querying. If it is a GenericForeignKey, consider " "adding a GenericRelation." % name ) try: model = field.model._meta.concrete_model except AttributeError: # QuerySet.annotate() may introduce fields that aren't # attached to a model. model = None else: # We didn't find the current field, so move position back # one step. pos -= 1 if pos == -1 or fail_on_missing: available = sorted([ *get_field_names_from_opts(opts), *self.annotation_select, *self._filtered_relations, ]) raise FieldError("Cannot resolve keyword '%s' into field. " "Choices are: %s" % (name, ", ".join(available))) break # Check if we need any joins for concrete inheritance cases (the # field lives in parent, but we are currently in one of its # children) if model is not opts.model: path_to_parent = opts.get_path_to_parent(model) if path_to_parent: path.extend(path_to_parent) cur_names_with_path[1].extend(path_to_parent) opts = path_to_parent[-1].to_opts if hasattr(field, 'get_path_info'): pathinfos = field.get_path_info(filtered_relation) if not allow_many: for inner_pos, p in enumerate(pathinfos): if p.m2m: cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1]) names_with_path.append(cur_names_with_path) raise MultiJoin(pos + 1, names_with_path) last = pathinfos[-1] path.extend(pathinfos) final_field = last.join_field opts = last.to_opts targets = last.target_fields cur_names_with_path[1].extend(pathinfos) names_with_path.append(cur_names_with_path) else: # Local non-relational field. final_field = field targets = (field,) if fail_on_missing and pos + 1 != len(names): raise FieldError( "Cannot resolve keyword %r into field. Join on '%s'" " not permitted." % (names[pos + 1], name)) break return path, final_field, targets, names[pos + 1:] def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True, reuse_with_filtered_relation=False): """ Compute the necessary table joins for the passage through the fields given in 'names'. 'opts' is the Options class for the current model (which gives the table we are starting from), 'alias' is the alias for the table to start the joining from. The 'can_reuse' defines the reverse foreign key joins we can reuse. It can be None in which case all joins are reusable or a set of aliases that can be reused. Note that non-reverse foreign keys are always reusable when using setup_joins(). The 'reuse_with_filtered_relation' can be used to force 'can_reuse' parameter and force the relation on the given connections. If 'allow_many' is False, then any reverse foreign key seen will generate a MultiJoin exception. Return the final field involved in the joins, the target field (used for any 'where' constraint), the final 'opts' value, the joins, the field path traveled to generate the joins, and a transform function that takes a field and alias and is equivalent to `field.get_col(alias)` in the simple case but wraps field transforms if they were included in names. The target field is the field containing the concrete value. Final field can be something different, for example foreign key pointing to that value. Final field is needed for example in some value conversions (convert 'obj' in fk__id=obj to pk val using the foreign key field for example). """ joins = [alias] # The transform can't be applied yet, as joins must be trimmed later. # To avoid making every caller of this method look up transforms # directly, compute transforms here and create a partial that converts # fields to the appropriate wrapped version. def final_transformer(field, alias): return field.get_col(alias) # Try resolving all the names as fields first. If there's an error, # treat trailing names as lookups until a field can be resolved. last_field_exception = None for pivot in range(len(names), 0, -1): try: path, final_field, targets, rest = self.names_to_path( names[:pivot], opts, allow_many, fail_on_missing=True, ) except FieldError as exc: if pivot == 1: # The first item cannot be a lookup, so it's safe # to raise the field error here. raise else: last_field_exception = exc else: # The transforms are the remaining items that couldn't be # resolved into fields. transforms = names[pivot:] break for name in transforms: def transform(field, alias, *, name, previous): try: wrapped = previous(field, alias) return self.try_transform(wrapped, name) except FieldError: # FieldError is raised if the transform doesn't exist. if isinstance(final_field, Field) and last_field_exception: raise last_field_exception else: raise final_transformer = functools.partial(transform, name=name, previous=final_transformer) # Then, add the path to the query's joins. Note that we can't trim # joins at this stage - we will need the information about join type # of the trimmed joins. for join in path: if join.filtered_relation: filtered_relation = join.filtered_relation.clone() table_alias = filtered_relation.alias else: filtered_relation = None table_alias = None opts = join.to_opts if join.direct: nullable = self.is_nullable(join.join_field) else: nullable = True connection = Join( opts.db_table, alias, table_alias, INNER, join.join_field, nullable, filtered_relation=filtered_relation, ) reuse = can_reuse if join.m2m or reuse_with_filtered_relation else None alias = self.join( connection, reuse=reuse, reuse_with_filtered_relation=reuse_with_filtered_relation, ) joins.append(alias) if filtered_relation: filtered_relation.path = joins[:] return JoinInfo(final_field, targets, opts, joins, path, final_transformer) def trim_joins(self, targets, joins, path): """ The 'target' parameter is the final field being joined to, 'joins' is the full list of join aliases. The 'path' contain the PathInfos used to create the joins. Return the final target field and table alias and the new active joins. Always trim any direct join if the target column is already in the previous table. Can't trim reverse joins as it's unknown if there's anything on the other side of the join. """ joins = joins[:] for pos, info in enumerate(reversed(path)): if len(joins) == 1 or not info.direct: break if info.filtered_relation: break join_targets = {t.column for t in info.join_field.foreign_related_fields} cur_targets = {t.column for t in targets} if not cur_targets.issubset(join_targets): break targets_dict = {r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets} targets = tuple(targets_dict[t.column] for t in targets) self.unref_alias(joins.pop()) return targets, joins[-1], joins @classmethod def _gen_cols(cls, exprs): for expr in exprs: if isinstance(expr, Col): yield expr else: yield from cls._gen_cols(expr.get_source_expressions()) @classmethod def _gen_col_aliases(cls, exprs): yield from (expr.alias for expr in cls._gen_cols(exprs)) def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False): if not allow_joins and LOOKUP_SEP in name: raise FieldError("Joined field references are not permitted in this query") annotation = self.annotations.get(name) if annotation is not None: if not allow_joins: for alias in self._gen_col_aliases([annotation]): if isinstance(self.alias_map[alias], Join): raise FieldError( 'Joined field references are not permitted in ' 'this query' ) if summarize: # Summarize currently means we are doing an aggregate() query # which is executed as a wrapped subquery if any of the # aggregate() elements reference an existing annotation. In # that case we need to return a Ref to the subquery's annotation. return Ref(name, self.annotation_select[name]) else: return annotation else: field_list = name.split(LOOKUP_SEP) join_info = self.setup_joins(field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse) targets, final_alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path) if not allow_joins and len(join_list) > 1: raise FieldError('Joined field references are not permitted in this query') if len(targets) > 1: raise FieldError("Referencing multicolumn fields with F() objects " "isn't supported") # Verify that the last lookup in name is a field or a transform: # transform_function() raises FieldError if not. join_info.transform_function(targets[0], final_alias) if reuse is not None: reuse.update(join_list) return self._get_col(targets[0], join_info.targets[0], join_list[-1]) def split_exclude(self, filter_expr, can_reuse, names_with_path): """ When doing an exclude against any kind of N-to-many relation, we need to use a subquery. This method constructs the nested query, given the original exclude filter (filter_expr) and the portion up to the first N-to-many relation field. For example, if the origin filter is ~Q(child__name='foo'), filter_expr is ('child__name', 'foo') and can_reuse is a set of joins usable for filters in the original query. We will turn this into equivalent of: WHERE NOT (pk IN (SELECT parent_id FROM thetable WHERE name = 'foo' AND parent_id IS NOT NULL)) It might be worth it to consider using WHERE NOT EXISTS as that has saner null handling, and is easier for the backend's optimizer to handle. """ filter_lhs, filter_rhs = filter_expr if isinstance(filter_rhs, OuterRef): filter_expr = (filter_lhs, OuterRef(filter_rhs)) elif isinstance(filter_rhs, F): filter_expr = (filter_lhs, OuterRef(filter_rhs.name)) # Generate the inner query. query = Query(self.model) query._filtered_relations = self._filtered_relations query.add_filter(filter_expr) query.clear_ordering(True) # Try to have as simple as possible subquery -> trim leading joins from # the subquery. trimmed_prefix, contains_louter = query.trim_start(names_with_path) # Add extra check to make sure the selected field will not be null # since we are adding an IN <subquery> clause. This prevents the # database from tripping over IN (...,NULL,...) selects and returning # nothing col = query.select[0] select_field = col.target alias = col.alias if self.is_nullable(select_field): lookup_class = select_field.get_lookup('isnull') lookup = lookup_class(select_field.get_col(alias), False) query.where.add(lookup, AND) if alias in can_reuse: pk = select_field.model._meta.pk # Need to add a restriction so that outer query's filters are in effect for # the subquery, too. query.bump_prefix(self) lookup_class = select_field.get_lookup('exact') # Note that the query.select[0].alias is different from alias # due to bump_prefix above. lookup = lookup_class(pk.get_col(query.select[0].alias), pk.get_col(alias)) query.where.add(lookup, AND) query.external_aliases[alias] = True condition, needed_inner = self.build_filter( ('%s__in' % trimmed_prefix, query), current_negated=True, branch_negated=True, can_reuse=can_reuse) if contains_louter: or_null_condition, _ = self.build_filter( ('%s__isnull' % trimmed_prefix, True), current_negated=True, branch_negated=True, can_reuse=can_reuse) condition.add(or_null_condition, OR) # Note that the end result will be: # (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL. # This might look crazy but due to how IN works, this seems to be # correct. If the IS NOT NULL check is removed then outercol NOT # IN will return UNKNOWN. If the IS NULL check is removed, then if # outercol IS NULL we will not match the row. return condition, needed_inner def set_empty(self): self.where.add(NothingNode(), AND) for query in self.combined_queries: query.set_empty() def is_empty(self): return any(isinstance(c, NothingNode) for c in self.where.children) def set_limits(self, low=None, high=None): """ Adjust the limits on the rows retrieved. Use low/high to set these, as it makes it more Pythonic to read and write. When the SQL query is created, convert them to the appropriate offset and limit values. Apply any limits passed in here to the existing constraints. Add low to the current low value and clamp both to any existing high value. """ if high is not None: if self.high_mark is not None: self.high_mark = min(self.high_mark, self.low_mark + high) else: self.high_mark = self.low_mark + high if low is not None: if self.high_mark is not None: self.low_mark = min(self.high_mark, self.low_mark + low) else: self.low_mark = self.low_mark + low if self.low_mark == self.high_mark: self.set_empty() def clear_limits(self): """Clear any existing limits.""" self.low_mark, self.high_mark = 0, None @property def is_sliced(self): return self.low_mark != 0 or self.high_mark is not None def has_limit_one(self): return self.high_mark is not None and (self.high_mark - self.low_mark) == 1 def can_filter(self): """ Return True if adding filters to this instance is still possible. Typically, this means no limits or offsets have been put on the results. """ return not self.is_sliced def clear_select_clause(self): """Remove all fields from SELECT clause.""" self.select = () self.default_cols = False self.select_related = False self.set_extra_mask(()) self.set_annotation_mask(()) def clear_select_fields(self): """ Clear the list of fields to select (but not extra_select columns). Some queryset types completely replace any existing list of select columns. """ self.select = () self.values_select = () def add_select_col(self, col): self.select += col, self.values_select += col.output_field.name, def set_select(self, cols): self.default_cols = False self.select = tuple(cols) def add_distinct_fields(self, *field_names): """ Add and resolve the given fields to the query's "distinct on" clause. """ self.distinct_fields = field_names self.distinct = True def add_fields(self, field_names, allow_m2m=True): """ Add the given (model) fields to the select set. Add the field names in the order specified. """ alias = self.get_initial_alias() opts = self.get_meta() try: cols = [] for name in field_names: # Join promotion note - we must not remove any rows here, so # if there is no existing joins, use outer join. join_info = self.setup_joins(name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m) targets, final_alias, joins = self.trim_joins( join_info.targets, join_info.joins, join_info.path, ) for target in targets: cols.append(join_info.transform_function(target, final_alias)) if cols: self.set_select(cols) except MultiJoin: raise FieldError("Invalid field name: '%s'" % name) except FieldError: if LOOKUP_SEP in name: # For lookups spanning over relationships, show the error # from the model on which the lookup failed. raise else: names = sorted([ *get_field_names_from_opts(opts), *self.extra, *self.annotation_select, *self._filtered_relations ]) raise FieldError("Cannot resolve keyword %r into field. " "Choices are: %s" % (name, ", ".join(names))) def add_ordering(self, *ordering): """ Add items from the 'ordering' sequence to the query's "order by" clause. These items are either field names (not column names) -- possibly with a direction prefix ('-' or '?') -- or OrderBy expressions. If 'ordering' is empty, clear all ordering from the query. """ errors = [] for item in ordering: if isinstance(item, str): if '.' in item: warnings.warn( 'Passing column raw column aliases to order_by() is ' 'deprecated. Wrap %r in a RawSQL expression before ' 'passing it to order_by().' % item, category=RemovedInDjango40Warning, stacklevel=3, ) continue if item == '?': continue if item.startswith('-'): item = item[1:] if item in self.annotations: continue if self.extra and item in self.extra: continue # names_to_path() validates the lookup. A descriptive # FieldError will be raise if it's not. self.names_to_path(item.split(LOOKUP_SEP), self.model._meta) elif not hasattr(item, 'resolve_expression'): errors.append(item) if getattr(item, 'contains_aggregate', False): raise FieldError( 'Using an aggregate in order_by() without also including ' 'it in annotate() is not allowed: %s' % item ) if errors: raise FieldError('Invalid order_by arguments: %s' % errors) if ordering: self.order_by += ordering else: self.default_ordering = False def clear_ordering(self, force_empty): """ Remove any ordering settings. If 'force_empty' is True, there will be no ordering in the resulting query (not even the model's default). """ self.order_by = () self.extra_order_by = () if force_empty: self.default_ordering = False def set_group_by(self, allow_aliases=True): """ Expand the GROUP BY clause required by the query. This will usually be the set of all non-aggregate fields in the return data. If the database backend supports grouping by the primary key, and the query would be equivalent, the optimization will be made automatically. """ # Column names from JOINs to check collisions with aliases. if allow_aliases: column_names = set() seen_models = set() for join in list(self.alias_map.values())[1:]: # Skip base table. model = join.join_field.related_model if model not in seen_models: column_names.update({ field.column for field in model._meta.local_concrete_fields }) seen_models.add(model) group_by = list(self.select) if self.annotation_select: for alias, annotation in self.annotation_select.items(): signature = inspect.signature(annotation.get_group_by_cols) if 'alias' not in signature.parameters: annotation_class = annotation.__class__ msg = ( '`alias=None` must be added to the signature of ' '%s.%s.get_group_by_cols().' ) % (annotation_class.__module__, annotation_class.__qualname__) warnings.warn(msg, category=RemovedInDjango40Warning) group_by_cols = annotation.get_group_by_cols() else: if not allow_aliases or alias in column_names: alias = None group_by_cols = annotation.get_group_by_cols(alias=alias) group_by.extend(group_by_cols) self.group_by = tuple(group_by) def add_select_related(self, fields): """ Set up the select_related data structure so that we only select certain related models (as opposed to all models, when self.select_related=True). """ if isinstance(self.select_related, bool): field_dict = {} else: field_dict = self.select_related for field in fields: d = field_dict for part in field.split(LOOKUP_SEP): d = d.setdefault(part, {}) self.select_related = field_dict def add_extra(self, select, select_params, where, params, tables, order_by): """ Add data to the various extra_* attributes for user-created additions to the query. """ if select: # We need to pair any placeholder markers in the 'select' # dictionary with their parameters in 'select_params' so that # subsequent updates to the select dictionary also adjust the # parameters appropriately. select_pairs = {} if select_params: param_iter = iter(select_params) else: param_iter = iter([]) for name, entry in select.items(): entry = str(entry) entry_params = [] pos = entry.find("%s") while pos != -1: if pos == 0 or entry[pos - 1] != '%': entry_params.append(next(param_iter)) pos = entry.find("%s", pos + 2) select_pairs[name] = (entry, entry_params) self.extra.update(select_pairs) if where or params: self.where.add(ExtraWhere(where, params), AND) if tables: self.extra_tables += tuple(tables) if order_by: self.extra_order_by = order_by def clear_deferred_loading(self): """Remove any fields from the deferred loading set.""" self.deferred_loading = (frozenset(), True) def add_deferred_loading(self, field_names): """ Add the given list of model field names to the set of fields to exclude from loading from the database when automatic column selection is done. Add the new field names to any existing field names that are deferred (or removed from any existing field names that are marked as the only ones for immediate loading). """ # Fields on related models are stored in the literal double-underscore # format, so that we can use a set datastructure. We do the foo__bar # splitting and handling when computing the SQL column names (as part of # get_columns()). existing, defer = self.deferred_loading if defer: # Add to existing deferred names. self.deferred_loading = existing.union(field_names), True else: # Remove names from the set of any existing "immediate load" names. self.deferred_loading = existing.difference(field_names), False def add_immediate_loading(self, field_names): """ Add the given list of model field names to the set of fields to retrieve when the SQL is executed ("immediate loading" fields). The field names replace any existing immediate loading field names. If there are field names already specified for deferred loading, remove those names from the new field_names before storing the new names for immediate loading. (That is, immediate loading overrides any existing immediate values, but respects existing deferrals.) """ existing, defer = self.deferred_loading field_names = set(field_names) if 'pk' in field_names: field_names.remove('pk') field_names.add(self.get_meta().pk.name) if defer: # Remove any existing deferred names from the current set before # setting the new names. self.deferred_loading = field_names.difference(existing), False else: # Replace any existing "immediate load" field names. self.deferred_loading = frozenset(field_names), False def get_loaded_field_names(self): """ If any fields are marked to be deferred, return a dictionary mapping models to a set of names in those fields that will be loaded. If a model is not in the returned dictionary, none of its fields are deferred. If no fields are marked for deferral, return an empty dictionary. """ # We cache this because we call this function multiple times # (compiler.fill_related_selections, query.iterator) try: return self._loaded_field_names_cache except AttributeError: collection = {} self.deferred_to_data(collection, self.get_loaded_field_names_cb) self._loaded_field_names_cache = collection return collection def get_loaded_field_names_cb(self, target, model, fields): """Callback used by get_deferred_field_names().""" target[model] = {f.attname for f in fields} def set_annotation_mask(self, names): """Set the mask of annotations that will be returned by the SELECT.""" if names is None: self.annotation_select_mask = None else: self.annotation_select_mask = set(names) self._annotation_select_cache = None def append_annotation_mask(self, names): if self.annotation_select_mask is not None: self.set_annotation_mask(self.annotation_select_mask.union(names)) def set_extra_mask(self, names): """ Set the mask of extra select items that will be returned by SELECT. Don't remove them from the Query since they might be used later. """ if names is None: self.extra_select_mask = None else: self.extra_select_mask = set(names) self._extra_select_cache = None def set_values(self, fields): self.select_related = False self.clear_deferred_loading() self.clear_select_fields() if fields: field_names = [] extra_names = [] annotation_names = [] if not self.extra and not self.annotations: # Shortcut - if there are no extra or annotations, then # the values() clause must be just field names. field_names = list(fields) else: self.default_cols = False for f in fields: if f in self.extra_select: extra_names.append(f) elif f in self.annotation_select: annotation_names.append(f) else: field_names.append(f) self.set_extra_mask(extra_names) self.set_annotation_mask(annotation_names) else: field_names = [f.attname for f in self.model._meta.concrete_fields] # Selected annotations must be known before setting the GROUP BY # clause. if self.group_by is True: self.add_fields((f.attname for f in self.model._meta.concrete_fields), False) # Disable GROUP BY aliases to avoid orphaning references to the # SELECT clause which is about to be cleared. self.set_group_by(allow_aliases=False) self.clear_select_fields() elif self.group_by: # Resolve GROUP BY annotation references if they are not part of # the selected fields anymore. group_by = [] for expr in self.group_by: if isinstance(expr, Ref) and expr.refs not in field_names: expr = self.annotations[expr.refs] group_by.append(expr) self.group_by = tuple(group_by) self.values_select = tuple(field_names) self.add_fields(field_names, True) @property def annotation_select(self): """ Return the dictionary of aggregate columns that are not masked and should be used in the SELECT clause. Cache this result for performance. """ if self._annotation_select_cache is not None: return self._annotation_select_cache elif not self.annotations: return {} elif self.annotation_select_mask is not None: self._annotation_select_cache = { k: v for k, v in self.annotations.items() if k in self.annotation_select_mask } return self._annotation_select_cache else: return self.annotations @property def extra_select(self): if self._extra_select_cache is not None: return self._extra_select_cache if not self.extra: return {} elif self.extra_select_mask is not None: self._extra_select_cache = { k: v for k, v in self.extra.items() if k in self.extra_select_mask } return self._extra_select_cache else: return self.extra def trim_start(self, names_with_path): """ Trim joins from the start of the join path. The candidates for trim are the PathInfos in names_with_path structure that are m2m joins. Also set the select column so the start matches the join. This method is meant to be used for generating the subquery joins & cols in split_exclude(). Return a lookup usable for doing outerq.filter(lookup=self) and a boolean indicating if the joins in the prefix contain a LEFT OUTER join. _""" all_paths = [] for _, paths in names_with_path: all_paths.extend(paths) contains_louter = False # Trim and operate only on tables that were generated for # the lookup part of the query. That is, avoid trimming # joins generated for F() expressions. lookup_tables = [ t for t in self.alias_map if t in self._lookup_joins or t == self.base_table ] for trimmed_paths, path in enumerate(all_paths): if path.m2m: break if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER: contains_louter = True alias = lookup_tables[trimmed_paths] self.unref_alias(alias) # The path.join_field is a Rel, lets get the other side's field join_field = path.join_field.field # Build the filter prefix. paths_in_prefix = trimmed_paths trimmed_prefix = [] for name, path in names_with_path: if paths_in_prefix - len(path) < 0: break trimmed_prefix.append(name) paths_in_prefix -= len(path) trimmed_prefix.append( join_field.foreign_related_fields[0].name) trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix) # Lets still see if we can trim the first join from the inner query # (that is, self). We can't do this for: # - LEFT JOINs because we would miss those rows that have nothing on # the outer side, # - INNER JOINs from filtered relations because we would miss their # filters. first_join = self.alias_map[lookup_tables[trimmed_paths + 1]] if first_join.join_type != LOUTER and not first_join.filtered_relation: select_fields = [r[0] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths + 1] self.unref_alias(lookup_tables[trimmed_paths]) extra_restriction = join_field.get_extra_restriction( self.where_class, None, lookup_tables[trimmed_paths + 1]) if extra_restriction: self.where.add(extra_restriction, AND) else: # TODO: It might be possible to trim more joins from the start of the # inner query if it happens to have a longer join chain containing the # values in select_fields. Lets punt this one for now. select_fields = [r[1] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths] # The found starting point is likely a Join instead of a BaseTable reference. # But the first entry in the query's FROM clause must not be a JOIN. for table in self.alias_map: if self.alias_refcount[table] > 0: self.alias_map[table] = BaseTable(self.alias_map[table].table_name, table) break self.set_select([f.get_col(select_alias) for f in select_fields]) return trimmed_prefix, contains_louter def is_nullable(self, field): """ Check if the given field should be treated as nullable. Some backends treat '' as null and Django treats such fields as nullable for those backends. In such situations field.null can be False even if we should treat the field as nullable. """ # We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have # (nor should it have) knowledge of which connection is going to be # used. The proper fix would be to defer all decisions where # is_nullable() is needed to the compiler stage, but that is not easy # to do currently. return ( connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and field.empty_strings_allowed ) or field.null def get_order_dir(field, default='ASC'): """ Return the field name and direction for an order specification. For example, '-foo' is returned as ('foo', 'DESC'). The 'default' param is used to indicate which way no prefix (or a '+' prefix) should sort. The '-' prefix always sorts the opposite way. """ dirn = ORDER_DIR[default] if field[0] == '-': return field[1:], dirn[1] return field, dirn[0] def add_to_dict(data, key, value): """ Add "value" to the set of values for "key", whether or not "key" already exists. """ if key in data: data[key].add(value) else: data[key] = {value} def is_reverse_o2o(field): """ Check if the given field is reverse-o2o. The field is expected to be some sort of relation field or related object. """ return field.is_relation and field.one_to_one and not field.concrete class JoinPromoter: """ A class to abstract away join promotion problems for complex filter conditions. """ def __init__(self, connector, num_children, negated): self.connector = connector self.negated = negated if self.negated: if connector == AND: self.effective_connector = OR else: self.effective_connector = AND else: self.effective_connector = self.connector self.num_children = num_children # Maps of table alias to how many times it is seen as required for # inner and/or outer joins. self.votes = Counter() def add_votes(self, votes): """ Add single vote per item to self.votes. Parameter can be any iterable. """ self.votes.update(votes) def update_join_types(self, query): """ Change join types so that the generated query is as efficient as possible, but still correct. So, change as many joins as possible to INNER, but don't make OUTER joins INNER if that could remove results from the query. """ to_promote = set() to_demote = set() # The effective_connector is used so that NOT (a AND b) is treated # similarly to (a OR b) for join promotion. for table, votes in self.votes.items(): # We must use outer joins in OR case when the join isn't contained # in all of the joins. Otherwise the INNER JOIN itself could remove # valid results. Consider the case where a model with rel_a and # rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now, # if rel_a join doesn't produce any results is null (for example # reverse foreign key or null value in direct foreign key), and # there is a matching row in rel_b with col=2, then an INNER join # to rel_a would remove a valid match from the query. So, we need # to promote any existing INNER to LOUTER (it is possible this # promotion in turn will be demoted later on). if self.effective_connector == 'OR' and votes < self.num_children: to_promote.add(table) # If connector is AND and there is a filter that can match only # when there is a joinable row, then use INNER. For example, in # rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL # as join output, then the col=1 or col=2 can't match (as # NULL=anything is always false). # For the OR case, if all children voted for a join to be inner, # then we can use INNER for the join. For example: # (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell) # then if rel_a doesn't produce any rows, the whole condition # can't match. Hence we can safely use INNER join. if self.effective_connector == 'AND' or ( self.effective_connector == 'OR' and votes == self.num_children): to_demote.add(table) # Finally, what happens in cases where we have: # (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0 # Now, we first generate the OR clause, and promote joins for it # in the first if branch above. Both rel_a and rel_b are promoted # to LOUTER joins. After that we do the AND case. The OR case # voted no inner joins but the rel_a__col__gte=0 votes inner join # for rel_a. We demote it back to INNER join (in AND case a single # vote is enough). The demotion is OK, if rel_a doesn't produce # rows, then the rel_a__col__gte=0 clause can't be true, and thus # the whole clause must be false. So, it is safe to use INNER # join. # Note that in this example we could just as well have the __gte # clause and the OR clause swapped. Or we could replace the __gte # clause with an OR clause containing rel_a__col=1|rel_a__col=2, # and again we could safely demote to INNER. query.promote_joins(to_promote) query.demote_joins(to_demote) return to_demote
a3d9b9ad90f41254369b960c12621f88045c9042f8d4273d48509a04ce16f80b
import operator from django.db.backends.base.features import BaseDatabaseFeatures from django.utils.functional import cached_property class DatabaseFeatures(BaseDatabaseFeatures): empty_fetchmany_value = () allows_group_by_pk = True related_fields_match_type = True # MySQL doesn't support sliced subqueries with IN/ALL/ANY/SOME. allow_sliced_subqueries_with_in = False has_select_for_update = True supports_forward_references = False supports_regex_backreferencing = False supports_date_lookup_using_string = False supports_index_column_ordering = False supports_timezones = False requires_explicit_null_ordering_when_grouping = True allows_auto_pk_0 = False can_release_savepoints = True atomic_transactions = False can_clone_databases = True supports_temporal_subtraction = True supports_select_intersection = False supports_select_difference = False supports_slicing_ordering_in_compound = True supports_index_on_text_field = False has_case_insensitive_like = False create_test_procedure_without_params_sql = """ CREATE PROCEDURE test_procedure () BEGIN DECLARE V_I INTEGER; SET V_I = 1; END; """ create_test_procedure_with_int_param_sql = """ CREATE PROCEDURE test_procedure (P_I INTEGER) BEGIN DECLARE V_I INTEGER; SET V_I = P_I; END; """ # Neither MySQL nor MariaDB support partial indexes. supports_partial_indexes = False supports_order_by_nulls_modifier = False order_by_nulls_first = True @cached_property def _mysql_storage_engine(self): "Internal method used in Django tests. Don't rely on this from your code" return self.connection.mysql_server_data['default_storage_engine'] @cached_property def update_can_self_select(self): return self.connection.mysql_is_mariadb and self.connection.mysql_version >= (10, 3, 2) @cached_property def can_introspect_foreign_keys(self): "Confirm support for introspected foreign keys" return self._mysql_storage_engine != 'MyISAM' @cached_property def introspected_field_types(self): return { **super().introspected_field_types, 'BinaryField': 'TextField', 'BooleanField': 'IntegerField', 'DurationField': 'BigIntegerField', 'GenericIPAddressField': 'CharField', } @cached_property def can_return_columns_from_insert(self): return self.connection.mysql_is_mariadb and self.connection.mysql_version >= (10, 5, 0) can_return_rows_from_bulk_insert = property(operator.attrgetter('can_return_columns_from_insert')) @cached_property def has_zoneinfo_database(self): return self.connection.mysql_server_data['has_zoneinfo_database'] @cached_property def is_sql_auto_is_null_enabled(self): return self.connection.mysql_server_data['sql_auto_is_null'] @cached_property def supports_over_clause(self): if self.connection.mysql_is_mariadb: return True return self.connection.mysql_version >= (8, 0, 2) supports_frame_range_fixed_distance = property(operator.attrgetter('supports_over_clause')) @cached_property def supports_column_check_constraints(self): if self.connection.mysql_is_mariadb: return self.connection.mysql_version >= (10, 2, 1) return self.connection.mysql_version >= (8, 0, 16) supports_table_check_constraints = property(operator.attrgetter('supports_column_check_constraints')) @cached_property def can_introspect_check_constraints(self): if self.connection.mysql_is_mariadb: version = self.connection.mysql_version return (version >= (10, 2, 22) and version < (10, 3)) or version >= (10, 3, 10) return self.connection.mysql_version >= (8, 0, 16) @cached_property def has_select_for_update_skip_locked(self): return not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (8, 0, 1) @cached_property def has_select_for_update_nowait(self): if self.connection.mysql_is_mariadb: return self.connection.mysql_version >= (10, 3, 0) return self.connection.mysql_version >= (8, 0, 1) @cached_property def has_select_for_update_of(self): return not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (8, 0, 1) @cached_property def supports_explain_analyze(self): return self.connection.mysql_is_mariadb or self.connection.mysql_version >= (8, 0, 18) @cached_property def supported_explain_formats(self): # Alias MySQL's TRADITIONAL to TEXT for consistency with other # backends. formats = {'JSON', 'TEXT', 'TRADITIONAL'} if not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (8, 0, 16): formats.add('TREE') return formats @cached_property def supports_transactions(self): """ All storage engines except MyISAM support transactions. """ return self._mysql_storage_engine != 'MyISAM' @cached_property def ignores_table_name_case(self): return self.connection.mysql_server_data['lower_case_table_names'] @cached_property def supports_default_in_lead_lag(self): # To be added in https://jira.mariadb.org/browse/MDEV-12981. return not self.connection.mysql_is_mariadb @cached_property def supports_json_field(self): if self.connection.mysql_is_mariadb: return self.connection.mysql_version >= (10, 2, 7) return self.connection.mysql_version >= (5, 7, 8) @cached_property def can_introspect_json_field(self): if self.connection.mysql_is_mariadb: return self.supports_json_field and self.can_introspect_check_constraints return self.supports_json_field
165f5d4f3f2ba63f0cd7c388772fb302b5f9e0c7f721cc4b32875a4452425a1e
from django.core.exceptions import FieldError from django.db.models.sql import compiler class SQLCompiler(compiler.SQLCompiler): def as_subquery_condition(self, alias, columns, compiler): qn = compiler.quote_name_unless_alias qn2 = self.connection.ops.quote_name sql, params = self.as_sql() return '(%s) IN (%s)' % (', '.join('%s.%s' % (qn(alias), qn2(column)) for column in columns), sql), params class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler): pass class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler): def as_sql(self): if self.connection.features.update_can_self_select or self.single_alias: return super().as_sql() # MySQL and MariaDB < 10.3.2 doesn't support deletion with a subquery # which is what the default implementation of SQLDeleteCompiler uses # when multiple tables are involved. Use the MySQL/MariaDB specific # DELETE table FROM table syntax instead to avoid performing the # operation in two queries. result = [ 'DELETE %s FROM' % self.quote_name_unless_alias( self.query.get_initial_alias() ) ] from_sql, from_params = self.get_from_clause() result.extend(from_sql) where, params = self.compile(self.query.where) if where: result.append('WHERE %s' % where) return ' '.join(result), tuple(from_params) + tuple(params) class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler): def as_sql(self): update_query, update_params = super().as_sql() # MySQL and MariaDB support UPDATE ... ORDER BY syntax. if self.query.order_by: order_by_sql = [] order_by_params = [] try: for _, (sql, params, _) in self.get_order_by(): order_by_sql.append(sql) order_by_params.extend(params) update_query += ' ORDER BY ' + ', '.join(order_by_sql) update_params += tuple(order_by_params) except FieldError: # Ignore ordering if it contains annotations, because they're # removed in .update() and cannot be resolved. pass return update_query, update_params class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler): pass
9ca9a1cfcdd332e5677356de43d5164ffc2480b1eed4b0bcc992fba9ba3a580b
from psycopg2.extras import Inet from django.conf import settings from django.db.backends.base.operations import BaseDatabaseOperations class DatabaseOperations(BaseDatabaseOperations): cast_char_field_without_max_length = 'varchar' explain_prefix = 'EXPLAIN' cast_data_types = { 'AutoField': 'integer', 'BigAutoField': 'bigint', 'SmallAutoField': 'smallint', } def unification_cast_sql(self, output_field): internal_type = output_field.get_internal_type() if internal_type in ("GenericIPAddressField", "IPAddressField", "TimeField", "UUIDField"): # PostgreSQL will resolve a union as type 'text' if input types are # 'unknown'. # https://www.postgresql.org/docs/current/typeconv-union-case.html # These fields cannot be implicitly cast back in the default # PostgreSQL configuration so we need to explicitly cast them. # We must also remove components of the type within brackets: # varchar(255) -> varchar. return 'CAST(%%s AS %s)' % output_field.db_type(self.connection).split('(')[0] return '%s' def date_extract_sql(self, lookup_type, field_name): # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT if lookup_type == 'week_day': # For consistency across backends, we return Sunday=1, Saturday=7. return "EXTRACT('dow' FROM %s) + 1" % field_name elif lookup_type == 'iso_week_day': return "EXTRACT('isodow' FROM %s)" % field_name elif lookup_type == 'iso_year': return "EXTRACT('isoyear' FROM %s)" % field_name else: return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name) def date_trunc_sql(self, lookup_type, field_name): # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name) def _prepare_tzname_delta(self, tzname): if '+' in tzname: return tzname.replace('+', '-') elif '-' in tzname: return tzname.replace('-', '+') return tzname def _convert_field_to_tz(self, field_name, tzname): if settings.USE_TZ: field_name = "%s AT TIME ZONE '%s'" % (field_name, self._prepare_tzname_delta(tzname)) return field_name def datetime_cast_date_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return '(%s)::date' % field_name def datetime_cast_time_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return '(%s)::time' % field_name def datetime_extract_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return self.date_extract_sql(lookup_type, field_name) def datetime_trunc_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name) def time_trunc_sql(self, lookup_type, field_name): return "DATE_TRUNC('%s', %s)::time" % (lookup_type, field_name) def json_cast_text_sql(self, field_name): return '(%s)::text' % field_name def deferrable_sql(self): return " DEFERRABLE INITIALLY DEFERRED" def fetch_returned_insert_rows(self, cursor): """ Given a cursor object that has just performed an INSERT...RETURNING statement into a table, return the tuple of returned data. """ return cursor.fetchall() def lookup_cast(self, lookup_type, internal_type=None): lookup = '%s' # Cast text lookups to text to allow things like filter(x__contains=4) if lookup_type in ('iexact', 'contains', 'icontains', 'startswith', 'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'): if internal_type in ('IPAddressField', 'GenericIPAddressField'): lookup = "HOST(%s)" elif internal_type in ('CICharField', 'CIEmailField', 'CITextField'): lookup = '%s::citext' else: lookup = "%s::text" # Use UPPER(x) for case-insensitive lookups; it's faster. if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'): lookup = 'UPPER(%s)' % lookup return lookup def no_limit_value(self): return None def prepare_sql_script(self, sql): return [sql] def quote_name(self, name): if name.startswith('"') and name.endswith('"'): return name # Quoting once is enough. return '"%s"' % name def set_time_zone_sql(self): return "SET TIME ZONE %s" def sql_flush(self, style, tables, *, reset_sequences=False, allow_cascade=False): if not tables: return [] # Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows us # to truncate tables referenced by a foreign key in any other table. sql_parts = [ style.SQL_KEYWORD('TRUNCATE'), ', '.join(style.SQL_FIELD(self.quote_name(table)) for table in tables), ] if reset_sequences: sql_parts.append(style.SQL_KEYWORD('RESTART IDENTITY')) if allow_cascade: sql_parts.append(style.SQL_KEYWORD('CASCADE')) return ['%s;' % ' '.join(sql_parts)] def sequence_reset_by_name_sql(self, style, sequences): # 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements # to reset sequence indices sql = [] for sequence_info in sequences: table_name = sequence_info['table'] # 'id' will be the case if it's an m2m using an autogenerated # intermediate table (see BaseDatabaseIntrospection.sequence_list). column_name = sequence_info['column'] or 'id' sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" % ( style.SQL_KEYWORD('SELECT'), style.SQL_TABLE(self.quote_name(table_name)), style.SQL_FIELD(column_name), )) return sql def tablespace_sql(self, tablespace, inline=False): if inline: return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace) else: return "TABLESPACE %s" % self.quote_name(tablespace) def sequence_reset_sql(self, style, model_list): from django.db import models output = [] qn = self.quote_name for model in model_list: # Use `coalesce` to set the sequence for each model to the max pk value if there are records, # or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true # if there are records (as the max pk value is already in use), otherwise set it to false. # Use pg_get_serial_sequence to get the underlying sequence name from the table name # and column name (available since PostgreSQL 8) for f in model._meta.local_fields: if isinstance(f, models.AutoField): output.append( "%s setval(pg_get_serial_sequence('%s','%s'), " "coalesce(max(%s), 1), max(%s) %s null) %s %s;" % ( style.SQL_KEYWORD('SELECT'), style.SQL_TABLE(qn(model._meta.db_table)), style.SQL_FIELD(f.column), style.SQL_FIELD(qn(f.column)), style.SQL_FIELD(qn(f.column)), style.SQL_KEYWORD('IS NOT'), style.SQL_KEYWORD('FROM'), style.SQL_TABLE(qn(model._meta.db_table)), ) ) break # Only one AutoField is allowed per model, so don't bother continuing. for f in model._meta.many_to_many: if not f.remote_field.through: output.append( "%s setval(pg_get_serial_sequence('%s','%s'), " "coalesce(max(%s), 1), max(%s) %s null) %s %s;" % ( style.SQL_KEYWORD('SELECT'), style.SQL_TABLE(qn(f.m2m_db_table())), style.SQL_FIELD('id'), style.SQL_FIELD(qn('id')), style.SQL_FIELD(qn('id')), style.SQL_KEYWORD('IS NOT'), style.SQL_KEYWORD('FROM'), style.SQL_TABLE(qn(f.m2m_db_table())) ) ) return output def prep_for_iexact_query(self, x): return x def max_name_length(self): """ Return the maximum length of an identifier. The maximum length of an identifier is 63 by default, but can be changed by recompiling PostgreSQL after editing the NAMEDATALEN macro in src/include/pg_config_manual.h. This implementation returns 63, but can be overridden by a custom database backend that inherits most of its behavior from this one. """ return 63 def distinct_sql(self, fields, params): if fields: params = [param for param_list in params for param in param_list] return (['DISTINCT ON (%s)' % ', '.join(fields)], params) else: return ['DISTINCT'], [] def last_executed_query(self, cursor, sql, params): # https://www.psycopg.org/docs/cursor.html#cursor.query # The query attribute is a Psycopg extension to the DB API 2.0. if cursor.query is not None: return cursor.query.decode() return None def return_insert_columns(self, fields): if not fields: return '', () columns = [ '%s.%s' % ( self.quote_name(field.model._meta.db_table), self.quote_name(field.column), ) for field in fields ] return 'RETURNING %s' % ', '.join(columns), () def bulk_insert_sql(self, fields, placeholder_rows): placeholder_rows_sql = (", ".join(row) for row in placeholder_rows) values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql) return "VALUES " + values_sql def adapt_datefield_value(self, value): return value def adapt_datetimefield_value(self, value): return value def adapt_timefield_value(self, value): return value def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None): return value def adapt_ipaddressfield_value(self, value): if value: return Inet(value) return None def subtract_temporals(self, internal_type, lhs, rhs): if internal_type == 'DateField': lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs params = (*lhs_params, *rhs_params) return "(interval '1 day' * (%s - %s))" % (lhs_sql, rhs_sql), params return super().subtract_temporals(internal_type, lhs, rhs) def explain_query_prefix(self, format=None, **options): prefix = super().explain_query_prefix(format) extra = {} if format: extra['FORMAT'] = format if options: extra.update({ name.upper(): 'true' if value else 'false' for name, value in options.items() }) if extra: prefix += ' (%s)' % ', '.join('%s %s' % i for i in extra.items()) return prefix def ignore_conflicts_suffix_sql(self, ignore_conflicts=None): return 'ON CONFLICT DO NOTHING' if ignore_conflicts else super().ignore_conflicts_suffix_sql(ignore_conflicts)
b16df910ef642de1145e8c0c9ed1036a4e054c488d43c93c8d14a190379d2f25
import datetime import json from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange, Range from django.contrib.postgres import forms, lookups from django.db import models from django.db.models.lookups import PostgresOperatorLookup from .utils import AttributeSetter __all__ = [ 'RangeField', 'IntegerRangeField', 'BigIntegerRangeField', 'DecimalRangeField', 'DateTimeRangeField', 'DateRangeField', 'RangeBoundary', 'RangeOperators', ] class RangeBoundary(models.Expression): """A class that represents range boundaries.""" def __init__(self, inclusive_lower=True, inclusive_upper=False): self.lower = '[' if inclusive_lower else '(' self.upper = ']' if inclusive_upper else ')' def as_sql(self, compiler, connection): return "'%s%s'" % (self.lower, self.upper), [] class RangeOperators: # https://www.postgresql.org/docs/current/functions-range.html#RANGE-OPERATORS-TABLE EQUAL = '=' NOT_EQUAL = '<>' CONTAINS = '@>' CONTAINED_BY = '<@' OVERLAPS = '&&' FULLY_LT = '<<' FULLY_GT = '>>' NOT_LT = '&>' NOT_GT = '&<' ADJACENT_TO = '-|-' class RangeField(models.Field): empty_strings_allowed = False def __init__(self, *args, **kwargs): # Initializing base_field here ensures that its model matches the model for self. if hasattr(self, 'base_field'): self.base_field = self.base_field() super().__init__(*args, **kwargs) @property def model(self): try: return self.__dict__['model'] except KeyError: raise AttributeError("'%s' object has no attribute 'model'" % self.__class__.__name__) @model.setter def model(self, model): self.__dict__['model'] = model self.base_field.model = model @classmethod def _choices_is_value(cls, value): return isinstance(value, (list, tuple)) or super()._choices_is_value(value) def get_prep_value(self, value): if value is None: return None elif isinstance(value, Range): return value elif isinstance(value, (list, tuple)): return self.range_type(value[0], value[1]) return value def to_python(self, value): if isinstance(value, str): # Assume we're deserializing vals = json.loads(value) for end in ('lower', 'upper'): if end in vals: vals[end] = self.base_field.to_python(vals[end]) value = self.range_type(**vals) elif isinstance(value, (list, tuple)): value = self.range_type(value[0], value[1]) return value def set_attributes_from_name(self, name): super().set_attributes_from_name(name) self.base_field.set_attributes_from_name(name) def value_to_string(self, obj): value = self.value_from_object(obj) if value is None: return None if value.isempty: return json.dumps({"empty": True}) base_field = self.base_field result = {"bounds": value._bounds} for end in ('lower', 'upper'): val = getattr(value, end) if val is None: result[end] = None else: obj = AttributeSetter(base_field.attname, val) result[end] = base_field.value_to_string(obj) return json.dumps(result) def formfield(self, **kwargs): kwargs.setdefault('form_class', self.form_field) return super().formfield(**kwargs) class IntegerRangeField(RangeField): base_field = models.IntegerField range_type = NumericRange form_field = forms.IntegerRangeField def db_type(self, connection): return 'int4range' class BigIntegerRangeField(RangeField): base_field = models.BigIntegerField range_type = NumericRange form_field = forms.IntegerRangeField def db_type(self, connection): return 'int8range' class DecimalRangeField(RangeField): base_field = models.DecimalField range_type = NumericRange form_field = forms.DecimalRangeField def db_type(self, connection): return 'numrange' class DateTimeRangeField(RangeField): base_field = models.DateTimeField range_type = DateTimeTZRange form_field = forms.DateTimeRangeField def db_type(self, connection): return 'tstzrange' class DateRangeField(RangeField): base_field = models.DateField range_type = DateRange form_field = forms.DateRangeField def db_type(self, connection): return 'daterange' RangeField.register_lookup(lookups.DataContains) RangeField.register_lookup(lookups.ContainedBy) RangeField.register_lookup(lookups.Overlap) class DateTimeRangeContains(PostgresOperatorLookup): """ Lookup for Date/DateTimeRange containment to cast the rhs to the correct type. """ lookup_name = 'contains' postgres_operator = RangeOperators.CONTAINS def process_rhs(self, compiler, connection): # Transform rhs value for db lookup. if isinstance(self.rhs, datetime.date): value = models.Value(self.rhs) self.rhs = value.resolve_expression(compiler.query) return super().process_rhs(compiler, connection) def as_postgresql(self, compiler, connection): sql, params = super().as_postgresql(compiler, connection) # Cast the rhs if needed. cast_sql = '' if ( isinstance(self.rhs, models.Expression) and self.rhs._output_field_or_none and # Skip cast if rhs has a matching range type. not isinstance(self.rhs._output_field_or_none, self.lhs.output_field.__class__) ): cast_internal_type = self.lhs.output_field.base_field.get_internal_type() cast_sql = '::{}'.format(connection.data_types.get(cast_internal_type)) return '%s%s' % (sql, cast_sql), params DateRangeField.register_lookup(DateTimeRangeContains) DateTimeRangeField.register_lookup(DateTimeRangeContains) class RangeContainedBy(PostgresOperatorLookup): lookup_name = 'contained_by' type_mapping = { 'smallint': 'int4range', 'integer': 'int4range', 'bigint': 'int8range', 'double precision': 'numrange', 'numeric': 'numrange', 'date': 'daterange', 'timestamp with time zone': 'tstzrange', } postgres_operator = RangeOperators.CONTAINED_BY def process_rhs(self, compiler, connection): rhs, rhs_params = super().process_rhs(compiler, connection) # Ignore precision for DecimalFields. db_type = self.lhs.output_field.cast_db_type(connection).split('(')[0] cast_type = self.type_mapping[db_type] return '%s::%s' % (rhs, cast_type), rhs_params def process_lhs(self, compiler, connection): lhs, lhs_params = super().process_lhs(compiler, connection) if isinstance(self.lhs.output_field, models.FloatField): lhs = '%s::numeric' % lhs elif isinstance(self.lhs.output_field, models.SmallIntegerField): lhs = '%s::integer' % lhs return lhs, lhs_params def get_prep_lookup(self): return RangeField().get_prep_value(self.rhs) models.DateField.register_lookup(RangeContainedBy) models.DateTimeField.register_lookup(RangeContainedBy) models.IntegerField.register_lookup(RangeContainedBy) models.FloatField.register_lookup(RangeContainedBy) models.DecimalField.register_lookup(RangeContainedBy) @RangeField.register_lookup class FullyLessThan(PostgresOperatorLookup): lookup_name = 'fully_lt' postgres_operator = RangeOperators.FULLY_LT @RangeField.register_lookup class FullGreaterThan(PostgresOperatorLookup): lookup_name = 'fully_gt' postgres_operator = RangeOperators.FULLY_GT @RangeField.register_lookup class NotLessThan(PostgresOperatorLookup): lookup_name = 'not_lt' postgres_operator = RangeOperators.NOT_LT @RangeField.register_lookup class NotGreaterThan(PostgresOperatorLookup): lookup_name = 'not_gt' postgres_operator = RangeOperators.NOT_GT @RangeField.register_lookup class AdjacentToLookup(PostgresOperatorLookup): lookup_name = 'adjacent_to' postgres_operator = RangeOperators.ADJACENT_TO @RangeField.register_lookup class RangeStartsWith(models.Transform): lookup_name = 'startswith' function = 'lower' @property def output_field(self): return self.lhs.output_field.base_field @RangeField.register_lookup class RangeEndsWith(models.Transform): lookup_name = 'endswith' function = 'upper' @property def output_field(self): return self.lhs.output_field.base_field @RangeField.register_lookup class IsEmpty(models.Transform): lookup_name = 'isempty' function = 'isempty' output_field = models.BooleanField() @RangeField.register_lookup class LowerInclusive(models.Transform): lookup_name = 'lower_inc' function = 'LOWER_INC' output_field = models.BooleanField() @RangeField.register_lookup class LowerInfinite(models.Transform): lookup_name = 'lower_inf' function = 'LOWER_INF' output_field = models.BooleanField() @RangeField.register_lookup class UpperInclusive(models.Transform): lookup_name = 'upper_inc' function = 'UPPER_INC' output_field = models.BooleanField() @RangeField.register_lookup class UpperInfinite(models.Transform): lookup_name = 'upper_inf' function = 'UPPER_INF' output_field = models.BooleanField()
20155ad24e75afeb84dc7b52a91ba8a81b8c521ba02cb7e7d19bbb0787de4d2a
from decimal import Decimal from django.contrib.gis.db.models.fields import BaseSpatialField, GeometryField from django.contrib.gis.db.models.sql import AreaField, DistanceField from django.contrib.gis.geos import GEOSGeometry from django.core.exceptions import FieldError from django.db import NotSupportedError from django.db.models import ( BinaryField, BooleanField, FloatField, Func, IntegerField, TextField, Transform, Value, ) from django.db.models.functions import Cast from django.utils.functional import cached_property NUMERIC_TYPES = (int, float, Decimal) class GeoFuncMixin: function = None geom_param_pos = (0,) def __init__(self, *expressions, **extra): super().__init__(*expressions, **extra) # Ensure that value expressions are geometric. for pos in self.geom_param_pos: expr = self.source_expressions[pos] if not isinstance(expr, Value): continue try: output_field = expr.output_field except FieldError: output_field = None geom = expr.value if not isinstance(geom, GEOSGeometry) or output_field and not isinstance(output_field, GeometryField): raise TypeError("%s function requires a geometric argument in position %d." % (self.name, pos + 1)) if not geom.srid and not output_field: raise ValueError("SRID is required for all geometries.") if not output_field: self.source_expressions[pos] = Value(geom, output_field=GeometryField(srid=geom.srid)) @property def name(self): return self.__class__.__name__ @cached_property def geo_field(self): return self.source_expressions[self.geom_param_pos[0]].field def as_sql(self, compiler, connection, function=None, **extra_context): if self.function is None and function is None: function = connection.ops.spatial_function_name(self.name) return super().as_sql(compiler, connection, function=function, **extra_context) def resolve_expression(self, *args, **kwargs): res = super().resolve_expression(*args, **kwargs) # Ensure that expressions are geometric. source_fields = res.get_source_fields() for pos in self.geom_param_pos: field = source_fields[pos] if not isinstance(field, GeometryField): raise TypeError( "%s function requires a GeometryField in position %s, got %s." % ( self.name, pos + 1, type(field).__name__, ) ) base_srid = res.geo_field.srid for pos in self.geom_param_pos[1:]: expr = res.source_expressions[pos] expr_srid = expr.output_field.srid if expr_srid != base_srid: # Automatic SRID conversion so objects are comparable. res.source_expressions[pos] = Transform(expr, base_srid).resolve_expression(*args, **kwargs) return res def _handle_param(self, value, param_name='', check_types=None): if not hasattr(value, 'resolve_expression'): if check_types and not isinstance(value, check_types): raise TypeError( "The %s parameter has the wrong type: should be %s." % ( param_name, check_types) ) return value class GeoFunc(GeoFuncMixin, Func): pass class GeomOutputGeoFunc(GeoFunc): @cached_property def output_field(self): return GeometryField(srid=self.geo_field.srid) class SQLiteDecimalToFloatMixin: """ By default, Decimal values are converted to str by the SQLite backend, which is not acceptable by the GIS functions expecting numeric values. """ def as_sqlite(self, compiler, connection, **extra_context): copy = self.copy() copy.set_source_expressions([ Value(float(expr.value)) if hasattr(expr, 'value') and isinstance(expr.value, Decimal) else expr for expr in copy.get_source_expressions() ]) return copy.as_sql(compiler, connection, **extra_context) class OracleToleranceMixin: tolerance = 0.05 def as_oracle(self, compiler, connection, **extra_context): tolerance = Value(self._handle_param( self.extra.get('tolerance', self.tolerance), 'tolerance', NUMERIC_TYPES, )) clone = self.copy() clone.set_source_expressions([*self.get_source_expressions(), tolerance]) return clone.as_sql(compiler, connection, **extra_context) class Area(OracleToleranceMixin, GeoFunc): arity = 1 @cached_property def output_field(self): return AreaField(self.geo_field) def as_sql(self, compiler, connection, **extra_context): if not connection.features.supports_area_geodetic and self.geo_field.geodetic(connection): raise NotSupportedError('Area on geodetic coordinate systems not supported.') return super().as_sql(compiler, connection, **extra_context) def as_sqlite(self, compiler, connection, **extra_context): if self.geo_field.geodetic(connection): extra_context['template'] = '%(function)s(%(expressions)s, %(spheroid)d)' extra_context['spheroid'] = True return self.as_sql(compiler, connection, **extra_context) class Azimuth(GeoFunc): output_field = FloatField() arity = 2 geom_param_pos = (0, 1) class AsGeoJSON(GeoFunc): output_field = TextField() def __init__(self, expression, bbox=False, crs=False, precision=8, **extra): expressions = [expression] if precision is not None: expressions.append(self._handle_param(precision, 'precision', int)) options = 0 if crs and bbox: options = 3 elif bbox: options = 1 elif crs: options = 2 if options: expressions.append(options) super().__init__(*expressions, **extra) def as_oracle(self, compiler, connection, **extra_context): source_expressions = self.get_source_expressions() clone = self.copy() clone.set_source_expressions(source_expressions[:1]) return super(AsGeoJSON, clone).as_sql(compiler, connection, **extra_context) class AsGML(GeoFunc): geom_param_pos = (1,) output_field = TextField() def __init__(self, expression, version=2, precision=8, **extra): expressions = [version, expression] if precision is not None: expressions.append(self._handle_param(precision, 'precision', int)) super().__init__(*expressions, **extra) def as_oracle(self, compiler, connection, **extra_context): source_expressions = self.get_source_expressions() version = source_expressions[0] clone = self.copy() clone.set_source_expressions([source_expressions[1]]) extra_context['function'] = 'SDO_UTIL.TO_GML311GEOMETRY' if version.value == 3 else 'SDO_UTIL.TO_GMLGEOMETRY' return super(AsGML, clone).as_sql(compiler, connection, **extra_context) class AsKML(GeoFunc): output_field = TextField() def __init__(self, expression, precision=8, **extra): expressions = [expression] if precision is not None: expressions.append(self._handle_param(precision, 'precision', int)) super().__init__(*expressions, **extra) class AsSVG(GeoFunc): output_field = TextField() def __init__(self, expression, relative=False, precision=8, **extra): relative = relative if hasattr(relative, 'resolve_expression') else int(relative) expressions = [ expression, relative, self._handle_param(precision, 'precision', int), ] super().__init__(*expressions, **extra) class AsWKB(GeoFunc): output_field = BinaryField() arity = 1 class AsWKT(GeoFunc): output_field = TextField() arity = 1 class BoundingCircle(OracleToleranceMixin, GeomOutputGeoFunc): def __init__(self, expression, num_seg=48, **extra): super().__init__(expression, num_seg, **extra) def as_oracle(self, compiler, connection, **extra_context): clone = self.copy() clone.set_source_expressions([self.get_source_expressions()[0]]) return super(BoundingCircle, clone).as_oracle(compiler, connection, **extra_context) class Centroid(OracleToleranceMixin, GeomOutputGeoFunc): arity = 1 class Difference(OracleToleranceMixin, GeomOutputGeoFunc): arity = 2 geom_param_pos = (0, 1) class DistanceResultMixin: @cached_property def output_field(self): return DistanceField(self.geo_field) def source_is_geography(self): return self.geo_field.geography and self.geo_field.srid == 4326 class Distance(DistanceResultMixin, OracleToleranceMixin, GeoFunc): geom_param_pos = (0, 1) spheroid = None def __init__(self, expr1, expr2, spheroid=None, **extra): expressions = [expr1, expr2] if spheroid is not None: self.spheroid = self._handle_param(spheroid, 'spheroid', bool) super().__init__(*expressions, **extra) def as_postgresql(self, compiler, connection, **extra_context): clone = self.copy() function = None expr2 = clone.source_expressions[1] geography = self.source_is_geography() if expr2.output_field.geography != geography: if isinstance(expr2, Value): expr2.output_field.geography = geography else: clone.source_expressions[1] = Cast( expr2, GeometryField(srid=expr2.output_field.srid, geography=geography), ) if not geography and self.geo_field.geodetic(connection): # Geometry fields with geodetic (lon/lat) coordinates need special distance functions if self.spheroid: # DistanceSpheroid is more accurate and resource intensive than DistanceSphere function = connection.ops.spatial_function_name('DistanceSpheroid') # Replace boolean param by the real spheroid of the base field clone.source_expressions.append(Value(self.geo_field.spheroid(connection))) else: function = connection.ops.spatial_function_name('DistanceSphere') return super(Distance, clone).as_sql(compiler, connection, function=function, **extra_context) def as_sqlite(self, compiler, connection, **extra_context): if self.geo_field.geodetic(connection): # SpatiaLite returns NULL instead of zero on geodetic coordinates extra_context['template'] = 'COALESCE(%(function)s(%(expressions)s, %(spheroid)s), 0)' extra_context['spheroid'] = int(bool(self.spheroid)) return super().as_sql(compiler, connection, **extra_context) class Envelope(GeomOutputGeoFunc): arity = 1 class ForcePolygonCW(GeomOutputGeoFunc): arity = 1 class GeoHash(GeoFunc): output_field = TextField() def __init__(self, expression, precision=None, **extra): expressions = [expression] if precision is not None: expressions.append(self._handle_param(precision, 'precision', int)) super().__init__(*expressions, **extra) def as_mysql(self, compiler, connection, **extra_context): clone = self.copy() # If no precision is provided, set it to the maximum. if len(clone.source_expressions) < 2: clone.source_expressions.append(Value(100)) return clone.as_sql(compiler, connection, **extra_context) class GeometryDistance(GeoFunc): output_field = FloatField() arity = 2 function = '' arg_joiner = ' <-> ' geom_param_pos = (0, 1) class Intersection(OracleToleranceMixin, GeomOutputGeoFunc): arity = 2 geom_param_pos = (0, 1) @BaseSpatialField.register_lookup class IsValid(OracleToleranceMixin, GeoFuncMixin, Transform): lookup_name = 'isvalid' output_field = BooleanField() def as_oracle(self, compiler, connection, **extra_context): sql, params = super().as_oracle(compiler, connection, **extra_context) return "CASE %s WHEN 'TRUE' THEN 1 ELSE 0 END" % sql, params class Length(DistanceResultMixin, OracleToleranceMixin, GeoFunc): def __init__(self, expr1, spheroid=True, **extra): self.spheroid = spheroid super().__init__(expr1, **extra) def as_sql(self, compiler, connection, **extra_context): if self.geo_field.geodetic(connection) and not connection.features.supports_length_geodetic: raise NotSupportedError("This backend doesn't support Length on geodetic fields") return super().as_sql(compiler, connection, **extra_context) def as_postgresql(self, compiler, connection, **extra_context): clone = self.copy() function = None if self.source_is_geography(): clone.source_expressions.append(Value(self.spheroid)) elif self.geo_field.geodetic(connection): # Geometry fields with geodetic (lon/lat) coordinates need length_spheroid function = connection.ops.spatial_function_name('LengthSpheroid') clone.source_expressions.append(Value(self.geo_field.spheroid(connection))) else: dim = min(f.dim for f in self.get_source_fields() if f) if dim > 2: function = connection.ops.length3d return super(Length, clone).as_sql(compiler, connection, function=function, **extra_context) def as_sqlite(self, compiler, connection, **extra_context): function = None if self.geo_field.geodetic(connection): function = 'GeodesicLength' if self.spheroid else 'GreatCircleLength' return super().as_sql(compiler, connection, function=function, **extra_context) class LineLocatePoint(GeoFunc): output_field = FloatField() arity = 2 geom_param_pos = (0, 1) class MakeValid(GeomOutputGeoFunc): pass class MemSize(GeoFunc): output_field = IntegerField() arity = 1 class NumGeometries(GeoFunc): output_field = IntegerField() arity = 1 class NumPoints(GeoFunc): output_field = IntegerField() arity = 1 class Perimeter(DistanceResultMixin, OracleToleranceMixin, GeoFunc): arity = 1 def as_postgresql(self, compiler, connection, **extra_context): function = None if self.geo_field.geodetic(connection) and not self.source_is_geography(): raise NotSupportedError("ST_Perimeter cannot use a non-projected non-geography field.") dim = min(f.dim for f in self.get_source_fields()) if dim > 2: function = connection.ops.perimeter3d return super().as_sql(compiler, connection, function=function, **extra_context) def as_sqlite(self, compiler, connection, **extra_context): if self.geo_field.geodetic(connection): raise NotSupportedError("Perimeter cannot use a non-projected field.") return super().as_sql(compiler, connection, **extra_context) class PointOnSurface(OracleToleranceMixin, GeomOutputGeoFunc): arity = 1 class Reverse(GeoFunc): arity = 1 class Scale(SQLiteDecimalToFloatMixin, GeomOutputGeoFunc): def __init__(self, expression, x, y, z=0.0, **extra): expressions = [ expression, self._handle_param(x, 'x', NUMERIC_TYPES), self._handle_param(y, 'y', NUMERIC_TYPES), ] if z != 0.0: expressions.append(self._handle_param(z, 'z', NUMERIC_TYPES)) super().__init__(*expressions, **extra) class SnapToGrid(SQLiteDecimalToFloatMixin, GeomOutputGeoFunc): def __init__(self, expression, *args, **extra): nargs = len(args) expressions = [expression] if nargs in (1, 2): expressions.extend( [self._handle_param(arg, '', NUMERIC_TYPES) for arg in args] ) elif nargs == 4: # Reverse origin and size param ordering expressions += [ *(self._handle_param(arg, '', NUMERIC_TYPES) for arg in args[2:]), *(self._handle_param(arg, '', NUMERIC_TYPES) for arg in args[0:2]), ] else: raise ValueError('Must provide 1, 2, or 4 arguments to `SnapToGrid`.') super().__init__(*expressions, **extra) class SymDifference(OracleToleranceMixin, GeomOutputGeoFunc): arity = 2 geom_param_pos = (0, 1) class Transform(GeomOutputGeoFunc): def __init__(self, expression, srid, **extra): expressions = [ expression, self._handle_param(srid, 'srid', int), ] if 'output_field' not in extra: extra['output_field'] = GeometryField(srid=srid) super().__init__(*expressions, **extra) class Translate(Scale): def as_sqlite(self, compiler, connection, **extra_context): clone = self.copy() if len(self.source_expressions) < 4: # Always provide the z parameter for ST_Translate clone.source_expressions.append(Value(0)) return super(Translate, clone).as_sqlite(compiler, connection, **extra_context) class Union(OracleToleranceMixin, GeomOutputGeoFunc): arity = 2 geom_param_pos = (0, 1)
0c66d39f34852caa6fb96e66d25d06ea78eed38b191e99be31579f5f486b813e
import json import os import sys import uuid from ctypes import ( addressof, byref, c_buffer, c_char_p, c_double, c_int, c_void_p, string_at, ) from django.contrib.gis.gdal.driver import Driver from django.contrib.gis.gdal.error import GDALException from django.contrib.gis.gdal.prototypes import raster as capi from django.contrib.gis.gdal.raster.band import BandList from django.contrib.gis.gdal.raster.base import GDALRasterBase from django.contrib.gis.gdal.raster.const import ( GDAL_RESAMPLE_ALGORITHMS, VSI_DELETE_BUFFER_ON_READ, VSI_FILESYSTEM_BASE_PATH, VSI_TAKE_BUFFER_OWNERSHIP, ) from django.contrib.gis.gdal.srs import SpatialReference, SRSException from django.contrib.gis.geometry import json_regex from django.utils.encoding import force_bytes, force_str from django.utils.functional import cached_property class TransformPoint(list): indices = { 'origin': (0, 3), 'scale': (1, 5), 'skew': (2, 4), } def __init__(self, raster, prop): x = raster.geotransform[self.indices[prop][0]] y = raster.geotransform[self.indices[prop][1]] super().__init__([x, y]) self._raster = raster self._prop = prop @property def x(self): return self[0] @x.setter def x(self, value): gtf = self._raster.geotransform gtf[self.indices[self._prop][0]] = value self._raster.geotransform = gtf @property def y(self): return self[1] @y.setter def y(self, value): gtf = self._raster.geotransform gtf[self.indices[self._prop][1]] = value self._raster.geotransform = gtf class GDALRaster(GDALRasterBase): """ Wrap a raster GDAL Data Source object. """ destructor = capi.close_ds def __init__(self, ds_input, write=False): self._write = 1 if write else 0 Driver.ensure_registered() # Preprocess json inputs. This converts json strings to dictionaries, # which are parsed below the same way as direct dictionary inputs. if isinstance(ds_input, str) and json_regex.match(ds_input): ds_input = json.loads(ds_input) # If input is a valid file path, try setting file as source. if isinstance(ds_input, str): if ( not ds_input.startswith(VSI_FILESYSTEM_BASE_PATH) and not os.path.exists(ds_input) ): raise GDALException( 'Unable to read raster source input "%s".' % ds_input ) try: # GDALOpen will auto-detect the data source type. self._ptr = capi.open_ds(force_bytes(ds_input), self._write) except GDALException as err: raise GDALException('Could not open the datasource at "{}" ({}).'.format(ds_input, err)) elif isinstance(ds_input, bytes): # Create a new raster in write mode. self._write = 1 # Get size of buffer. size = sys.getsizeof(ds_input) # Pass data to ctypes, keeping a reference to the ctypes object so # that the vsimem file remains available until the GDALRaster is # deleted. self._ds_input = c_buffer(ds_input) # Create random name to reference in vsimem filesystem. vsi_path = os.path.join(VSI_FILESYSTEM_BASE_PATH, str(uuid.uuid4())) # Create vsimem file from buffer. capi.create_vsi_file_from_mem_buffer( force_bytes(vsi_path), byref(self._ds_input), size, VSI_TAKE_BUFFER_OWNERSHIP, ) # Open the new vsimem file as a GDALRaster. try: self._ptr = capi.open_ds(force_bytes(vsi_path), self._write) except GDALException: # Remove the broken file from the VSI filesystem. capi.unlink_vsi_file(force_bytes(vsi_path)) raise GDALException('Failed creating VSI raster from the input buffer.') elif isinstance(ds_input, dict): # A new raster needs to be created in write mode self._write = 1 # Create driver (in memory by default) driver = Driver(ds_input.get('driver', 'MEM')) # For out of memory drivers, check filename argument if driver.name != 'MEM' and 'name' not in ds_input: raise GDALException('Specify name for creation of raster with driver "{}".'.format(driver.name)) # Check if width and height where specified if 'width' not in ds_input or 'height' not in ds_input: raise GDALException('Specify width and height attributes for JSON or dict input.') # Check if srid was specified if 'srid' not in ds_input: raise GDALException('Specify srid for JSON or dict input.') # Create null terminated gdal options array. papsz_options = [] for key, val in ds_input.get('papsz_options', {}).items(): option = '{}={}'.format(key, val) papsz_options.append(option.upper().encode()) papsz_options.append(None) # Convert papszlist to ctypes array. papsz_options = (c_char_p * len(papsz_options))(*papsz_options) # Create GDAL Raster self._ptr = capi.create_ds( driver._ptr, force_bytes(ds_input.get('name', '')), ds_input['width'], ds_input['height'], ds_input.get('nr_of_bands', len(ds_input.get('bands', []))), ds_input.get('datatype', 6), byref(papsz_options), ) # Set band data if provided for i, band_input in enumerate(ds_input.get('bands', [])): band = self.bands[i] if 'nodata_value' in band_input: band.nodata_value = band_input['nodata_value'] # Instantiate band filled with nodata values if only # partial input data has been provided. if band.nodata_value is not None and ( 'data' not in band_input or 'size' in band_input or 'shape' in band_input): band.data(data=(band.nodata_value,), shape=(1, 1)) # Set band data values from input. band.data( data=band_input.get('data'), size=band_input.get('size'), shape=band_input.get('shape'), offset=band_input.get('offset'), ) # Set SRID self.srs = ds_input.get('srid') # Set additional properties if provided if 'origin' in ds_input: self.origin.x, self.origin.y = ds_input['origin'] if 'scale' in ds_input: self.scale.x, self.scale.y = ds_input['scale'] if 'skew' in ds_input: self.skew.x, self.skew.y = ds_input['skew'] elif isinstance(ds_input, c_void_p): # Instantiate the object using an existing pointer to a gdal raster. self._ptr = ds_input else: raise GDALException('Invalid data source input type: "{}".'.format(type(ds_input))) def __del__(self): if self.is_vsi_based: # Remove the temporary file from the VSI in-memory filesystem. capi.unlink_vsi_file(force_bytes(self.name)) super().__del__() def __str__(self): return self.name def __repr__(self): """ Short-hand representation because WKB may be very large. """ return '<Raster object at %s>' % hex(addressof(self._ptr)) def _flush(self): """ Flush all data from memory into the source file if it exists. The data that needs flushing are geotransforms, coordinate systems, nodata_values and pixel values. This function will be called automatically wherever it is needed. """ # Raise an Exception if the value is being changed in read mode. if not self._write: raise GDALException('Raster needs to be opened in write mode to change values.') capi.flush_ds(self._ptr) @property def vsi_buffer(self): if not self.is_vsi_based: return None # Prepare an integer that will contain the buffer length. out_length = c_int() # Get the data using the vsi file name. dat = capi.get_mem_buffer_from_vsi_file( force_bytes(self.name), byref(out_length), VSI_DELETE_BUFFER_ON_READ, ) # Read the full buffer pointer. return string_at(dat, out_length.value) @cached_property def is_vsi_based(self): return self._ptr and self.name.startswith(VSI_FILESYSTEM_BASE_PATH) @property def name(self): """ Return the name of this raster. Corresponds to filename for file-based rasters. """ return force_str(capi.get_ds_description(self._ptr)) @cached_property def driver(self): """ Return the GDAL Driver used for this raster. """ ds_driver = capi.get_ds_driver(self._ptr) return Driver(ds_driver) @property def width(self): """ Width (X axis) in pixels. """ return capi.get_ds_xsize(self._ptr) @property def height(self): """ Height (Y axis) in pixels. """ return capi.get_ds_ysize(self._ptr) @property def srs(self): """ Return the SpatialReference used in this GDALRaster. """ try: wkt = capi.get_ds_projection_ref(self._ptr) if not wkt: return None return SpatialReference(wkt, srs_type='wkt') except SRSException: return None @srs.setter def srs(self, value): """ Set the spatial reference used in this GDALRaster. The input can be a SpatialReference or any parameter accepted by the SpatialReference constructor. """ if isinstance(value, SpatialReference): srs = value elif isinstance(value, (int, str)): srs = SpatialReference(value) else: raise ValueError('Could not create a SpatialReference from input.') capi.set_ds_projection_ref(self._ptr, srs.wkt.encode()) self._flush() @property def srid(self): """ Shortcut to access the srid of this GDALRaster. """ return self.srs.srid @srid.setter def srid(self, value): """ Shortcut to set this GDALRaster's srs from an srid. """ self.srs = value @property def geotransform(self): """ Return the geotransform of the data source. Return the default geotransform if it does not exist or has not been set previously. The default is [0.0, 1.0, 0.0, 0.0, 0.0, -1.0]. """ # Create empty ctypes double array for data gtf = (c_double * 6)() capi.get_ds_geotransform(self._ptr, byref(gtf)) return list(gtf) @geotransform.setter def geotransform(self, values): "Set the geotransform for the data source." if len(values) != 6 or not all(isinstance(x, (int, float)) for x in values): raise ValueError('Geotransform must consist of 6 numeric values.') # Create ctypes double array with input and write data values = (c_double * 6)(*values) capi.set_ds_geotransform(self._ptr, byref(values)) self._flush() @property def origin(self): """ Coordinates of the raster origin. """ return TransformPoint(self, 'origin') @property def scale(self): """ Pixel scale in units of the raster projection. """ return TransformPoint(self, 'scale') @property def skew(self): """ Skew of pixels (rotation parameters). """ return TransformPoint(self, 'skew') @property def extent(self): """ Return the extent as a 4-tuple (xmin, ymin, xmax, ymax). """ # Calculate boundary values based on scale and size xval = self.origin.x + self.scale.x * self.width yval = self.origin.y + self.scale.y * self.height # Calculate min and max values xmin = min(xval, self.origin.x) xmax = max(xval, self.origin.x) ymin = min(yval, self.origin.y) ymax = max(yval, self.origin.y) return xmin, ymin, xmax, ymax @property def bands(self): return BandList(self) def warp(self, ds_input, resampling='NearestNeighbour', max_error=0.0): """ Return a warped GDALRaster with the given input characteristics. The input is expected to be a dictionary containing the parameters of the target raster. Allowed values are width, height, SRID, origin, scale, skew, datatype, driver, and name (filename). By default, the warp functions keeps all parameters equal to the values of the original source raster. For the name of the target raster, the name of the source raster will be used and appended with _copy. + source_driver_name. In addition, the resampling algorithm can be specified with the "resampling" input parameter. The default is NearestNeighbor. For a list of all options consult the GDAL_RESAMPLE_ALGORITHMS constant. """ # Get the parameters defining the geotransform, srid, and size of the raster ds_input.setdefault('width', self.width) ds_input.setdefault('height', self.height) ds_input.setdefault('srid', self.srs.srid) ds_input.setdefault('origin', self.origin) ds_input.setdefault('scale', self.scale) ds_input.setdefault('skew', self.skew) # Get the driver, name, and datatype of the target raster ds_input.setdefault('driver', self.driver.name) if 'name' not in ds_input: ds_input['name'] = self.name + '_copy.' + self.driver.name if 'datatype' not in ds_input: ds_input['datatype'] = self.bands[0].datatype() # Instantiate raster bands filled with nodata values. ds_input['bands'] = [{'nodata_value': bnd.nodata_value} for bnd in self.bands] # Create target raster target = GDALRaster(ds_input, write=True) # Select resampling algorithm algorithm = GDAL_RESAMPLE_ALGORITHMS[resampling] # Reproject image capi.reproject_image( self._ptr, self.srs.wkt.encode(), target._ptr, target.srs.wkt.encode(), algorithm, 0.0, max_error, c_void_p(), c_void_p(), c_void_p() ) # Make sure all data is written to file target._flush() return target def transform(self, srs, driver=None, name=None, resampling='NearestNeighbour', max_error=0.0): """ Return a copy of this raster reprojected into the given spatial reference system. """ # Convert the resampling algorithm name into an algorithm id algorithm = GDAL_RESAMPLE_ALGORITHMS[resampling] if isinstance(srs, SpatialReference): target_srs = srs elif isinstance(srs, (int, str)): target_srs = SpatialReference(srs) else: raise TypeError( 'Transform only accepts SpatialReference, string, and integer ' 'objects.' ) # Create warped virtual dataset in the target reference system target = capi.auto_create_warped_vrt( self._ptr, self.srs.wkt.encode(), target_srs.wkt.encode(), algorithm, max_error, c_void_p() ) target = GDALRaster(target) # Construct the target warp dictionary from the virtual raster data = { 'srid': target_srs.srid, 'width': target.width, 'height': target.height, 'origin': [target.origin.x, target.origin.y], 'scale': [target.scale.x, target.scale.y], 'skew': [target.skew.x, target.skew.y], } # Set the driver and filepath if provided if driver: data['driver'] = driver if name: data['name'] = name # Warp the raster into new srid return self.warp(data, resampling=resampling, max_error=max_error) @property def info(self): """ Return information about this raster in a string format equivalent to the output of the gdalinfo command line utility. """ if not capi.get_ds_info: raise ValueError('GDAL ≥ 2.1 is required for using the info property.') return capi.get_ds_info(self.ptr, None).decode()
d2be4ea5146bf9d39be0b6b83e0f898082ca7da6a5b104847c71b9185948292e
import datetime import importlib import io import os import sys from unittest import mock from django.apps import apps from django.core.management import CommandError, call_command from django.db import ( ConnectionHandler, DatabaseError, OperationalError, connection, connections, models, ) from django.db.backends.base.schema import BaseDatabaseSchemaEditor from django.db.backends.utils import truncate_name from django.db.migrations.exceptions import InconsistentMigrationHistory from django.db.migrations.recorder import MigrationRecorder from django.test import TestCase, override_settings, skipUnlessDBFeature from .models import UnicodeModel, UnserializableModel from .routers import TestRouter from .test_base import MigrationTestBase class MigrateTests(MigrationTestBase): """ Tests running the migrate command. """ databases = {'default', 'other'} @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) def test_migrate(self): """ Tests basic usage of the migrate command. """ # No tables are created self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") self.assertTableNotExists("migrations_book") # Run the migrations to 0001 only stdout = io.StringIO() call_command('migrate', 'migrations', '0001', verbosity=1, stdout=stdout, no_color=True) stdout = stdout.getvalue() self.assertIn('Target specific migration: 0001_initial, from migrations', stdout) self.assertIn('Applying migrations.0001_initial... OK', stdout) # The correct tables exist self.assertTableExists("migrations_author") self.assertTableExists("migrations_tribble") self.assertTableNotExists("migrations_book") # Run migrations all the way call_command("migrate", verbosity=0) # The correct tables exist self.assertTableExists("migrations_author") self.assertTableNotExists("migrations_tribble") self.assertTableExists("migrations_book") # Unmigrate everything stdout = io.StringIO() call_command('migrate', 'migrations', 'zero', verbosity=1, stdout=stdout, no_color=True) stdout = stdout.getvalue() self.assertIn('Unapply all migrations: migrations', stdout) self.assertIn('Unapplying migrations.0002_second... OK', stdout) # Tables are gone self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") self.assertTableNotExists("migrations_book") @override_settings(INSTALLED_APPS=[ 'django.contrib.auth', 'django.contrib.contenttypes', 'migrations.migrations_test_apps.migrated_app', ]) def test_migrate_with_system_checks(self): out = io.StringIO() call_command('migrate', skip_checks=False, no_color=True, stdout=out) self.assertIn('Apply all migrations: migrated_app', out.getvalue()) @override_settings(INSTALLED_APPS=['migrations', 'migrations.migrations_test_apps.unmigrated_app_syncdb']) def test_app_without_migrations(self): msg = "App 'unmigrated_app_syncdb' does not have migrations." with self.assertRaisesMessage(CommandError, msg): call_command('migrate', app_label='unmigrated_app_syncdb') @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_clashing_prefix'}) def test_ambiguous_prefix(self): msg = ( "More than one migration matches 'a' in app 'migrations'. Please " "be more specific." ) with self.assertRaisesMessage(CommandError, msg): call_command('migrate', app_label='migrations', migration_name='a') @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations'}) def test_unknown_prefix(self): msg = "Cannot find a migration matching 'nonexistent' from app 'migrations'." with self.assertRaisesMessage(CommandError, msg): call_command('migrate', app_label='migrations', migration_name='nonexistent') @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_initial_false"}) def test_migrate_initial_false(self): """ `Migration.initial = False` skips fake-initial detection. """ # Make sure no tables are created self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") # Run the migrations to 0001 only call_command("migrate", "migrations", "0001", verbosity=0) # Fake rollback call_command("migrate", "migrations", "zero", fake=True, verbosity=0) # Make sure fake-initial detection does not run with self.assertRaises(DatabaseError): call_command("migrate", "migrations", "0001", fake_initial=True, verbosity=0) call_command("migrate", "migrations", "0001", fake=True, verbosity=0) # Real rollback call_command("migrate", "migrations", "zero", verbosity=0) # Make sure it's all gone self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") self.assertTableNotExists("migrations_book") @override_settings( MIGRATION_MODULES={"migrations": "migrations.test_migrations"}, DATABASE_ROUTERS=['migrations.routers.TestRouter'], ) def test_migrate_fake_initial(self): """ --fake-initial only works if all tables created in the initial migration of an app exists. Database routers must be obeyed when doing that check. """ # Make sure no tables are created for db in self.databases: self.assertTableNotExists("migrations_author", using=db) self.assertTableNotExists("migrations_tribble", using=db) # Run the migrations to 0001 only call_command("migrate", "migrations", "0001", verbosity=0) call_command("migrate", "migrations", "0001", verbosity=0, database="other") # Make sure the right tables exist self.assertTableExists("migrations_author") self.assertTableNotExists("migrations_tribble") # Also check the "other" database self.assertTableNotExists("migrations_author", using="other") self.assertTableExists("migrations_tribble", using="other") # Fake a roll-back call_command("migrate", "migrations", "zero", fake=True, verbosity=0) call_command("migrate", "migrations", "zero", fake=True, verbosity=0, database="other") # Make sure the tables still exist self.assertTableExists("migrations_author") self.assertTableExists("migrations_tribble", using="other") # Try to run initial migration with self.assertRaises(DatabaseError): call_command("migrate", "migrations", "0001", verbosity=0) # Run initial migration with an explicit --fake-initial out = io.StringIO() with mock.patch('django.core.management.color.supports_color', lambda *args: False): call_command("migrate", "migrations", "0001", fake_initial=True, stdout=out, verbosity=1) call_command("migrate", "migrations", "0001", fake_initial=True, verbosity=0, database="other") self.assertIn( "migrations.0001_initial... faked", out.getvalue().lower() ) try: # Run migrations all the way. call_command('migrate', verbosity=0) call_command('migrate', verbosity=0, database="other") self.assertTableExists('migrations_author') self.assertTableNotExists('migrations_tribble') self.assertTableExists('migrations_book') self.assertTableNotExists('migrations_author', using='other') self.assertTableNotExists('migrations_tribble', using='other') self.assertTableNotExists('migrations_book', using='other') # Fake a roll-back. call_command('migrate', 'migrations', 'zero', fake=True, verbosity=0) call_command('migrate', 'migrations', 'zero', fake=True, verbosity=0, database='other') self.assertTableExists('migrations_author') self.assertTableNotExists('migrations_tribble') self.assertTableExists('migrations_book') # Run initial migration. with self.assertRaises(DatabaseError): call_command('migrate', 'migrations', verbosity=0) # Run initial migration with an explicit --fake-initial. with self.assertRaises(DatabaseError): # Fails because "migrations_tribble" does not exist but needs # to in order to make --fake-initial work. call_command('migrate', 'migrations', fake_initial=True, verbosity=0) # Fake an apply. call_command('migrate', 'migrations', fake=True, verbosity=0) call_command('migrate', 'migrations', fake=True, verbosity=0, database='other') finally: # Unmigrate everything. call_command('migrate', 'migrations', 'zero', verbosity=0) call_command('migrate', 'migrations', 'zero', verbosity=0, database='other') # Make sure it's all gone for db in self.databases: self.assertTableNotExists("migrations_author", using=db) self.assertTableNotExists("migrations_tribble", using=db) self.assertTableNotExists("migrations_book", using=db) @skipUnlessDBFeature('ignores_table_name_case') def test_migrate_fake_initial_case_insensitive(self): with override_settings(MIGRATION_MODULES={ 'migrations': 'migrations.test_fake_initial_case_insensitive.initial', }): call_command('migrate', 'migrations', '0001', verbosity=0) call_command('migrate', 'migrations', 'zero', fake=True, verbosity=0) with override_settings(MIGRATION_MODULES={ 'migrations': 'migrations.test_fake_initial_case_insensitive.fake_initial', }): out = io.StringIO() call_command( 'migrate', 'migrations', '0001', fake_initial=True, stdout=out, verbosity=1, no_color=True, ) self.assertIn( 'migrations.0001_initial... faked', out.getvalue().lower(), ) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_fake_split_initial"}) def test_migrate_fake_split_initial(self): """ Split initial migrations can be faked with --fake-initial. """ call_command("migrate", "migrations", "0002", verbosity=0) call_command("migrate", "migrations", "zero", fake=True, verbosity=0) out = io.StringIO() with mock.patch('django.core.management.color.supports_color', lambda *args: False): call_command("migrate", "migrations", "0002", fake_initial=True, stdout=out, verbosity=1) value = out.getvalue().lower() self.assertIn("migrations.0001_initial... faked", value) self.assertIn("migrations.0002_second... faked", value) # Fake an apply call_command("migrate", "migrations", fake=True, verbosity=0) # Unmigrate everything call_command("migrate", "migrations", "zero", verbosity=0) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_conflict"}) def test_migrate_conflict_exit(self): """ migrate exits if it detects a conflict. """ msg = ( "Conflicting migrations detected; multiple leaf nodes in the " "migration graph: (0002_conflicting_second, 0002_second in " "migrations).\n" "To fix them run 'python manage.py makemigrations --merge'" ) with self.assertRaisesMessage(CommandError, msg): call_command("migrate", "migrations") @override_settings(MIGRATION_MODULES={ 'migrations': 'migrations.test_migrations', }) def test_migrate_check(self): with self.assertRaises(SystemExit): call_command('migrate', 'migrations', '0001', check_unapplied=True) self.assertTableNotExists('migrations_author') self.assertTableNotExists('migrations_tribble') self.assertTableNotExists('migrations_book') @override_settings(MIGRATION_MODULES={ 'migrations': 'migrations.test_migrations_plan', }) def test_migrate_check_plan(self): out = io.StringIO() with self.assertRaises(SystemExit): call_command( 'migrate', 'migrations', '0001', check_unapplied=True, plan=True, stdout=out, no_color=True, ) self.assertEqual( 'Planned operations:\n' 'migrations.0001_initial\n' ' Create model Salamander\n' ' Raw Python operation -> Grow salamander tail.\n', out.getvalue(), ) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) def test_showmigrations_list(self): """ showmigrations --list displays migrations and whether or not they're applied. """ out = io.StringIO() with mock.patch('django.core.management.color.supports_color', lambda *args: True): call_command("showmigrations", format='list', stdout=out, verbosity=0, no_color=False) self.assertEqual( '\x1b[1mmigrations\n\x1b[0m' ' [ ] 0001_initial\n' ' [ ] 0002_second\n', out.getvalue().lower() ) call_command("migrate", "migrations", "0001", verbosity=0) out = io.StringIO() # Giving the explicit app_label tests for selective `show_list` in the command call_command("showmigrations", "migrations", format='list', stdout=out, verbosity=0, no_color=True) self.assertEqual( 'migrations\n' ' [x] 0001_initial\n' ' [ ] 0002_second\n', out.getvalue().lower() ) out = io.StringIO() # Applied datetimes are displayed at verbosity 2+. call_command('showmigrations', 'migrations', stdout=out, verbosity=2, no_color=True) migration1 = MigrationRecorder(connection).migration_qs.get(app='migrations', name='0001_initial') self.assertEqual( 'migrations\n' ' [x] 0001_initial (applied at %s)\n' ' [ ] 0002_second\n' % migration1.applied.strftime('%Y-%m-%d %H:%M:%S'), out.getvalue().lower() ) # Cleanup by unmigrating everything call_command("migrate", "migrations", "zero", verbosity=0) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_run_before"}) def test_showmigrations_plan(self): """ Tests --plan output of showmigrations command """ out = io.StringIO() call_command("showmigrations", format='plan', stdout=out) self.assertEqual( "[ ] migrations.0001_initial\n" "[ ] migrations.0003_third\n" "[ ] migrations.0002_second\n", out.getvalue().lower() ) out = io.StringIO() call_command("showmigrations", format='plan', stdout=out, verbosity=2) self.assertEqual( "[ ] migrations.0001_initial\n" "[ ] migrations.0003_third ... (migrations.0001_initial)\n" "[ ] migrations.0002_second ... (migrations.0001_initial, migrations.0003_third)\n", out.getvalue().lower() ) call_command("migrate", "migrations", "0003", verbosity=0) out = io.StringIO() call_command("showmigrations", format='plan', stdout=out) self.assertEqual( "[x] migrations.0001_initial\n" "[x] migrations.0003_third\n" "[ ] migrations.0002_second\n", out.getvalue().lower() ) out = io.StringIO() call_command("showmigrations", format='plan', stdout=out, verbosity=2) self.assertEqual( "[x] migrations.0001_initial\n" "[x] migrations.0003_third ... (migrations.0001_initial)\n" "[ ] migrations.0002_second ... (migrations.0001_initial, migrations.0003_third)\n", out.getvalue().lower() ) # Cleanup by unmigrating everything call_command("migrate", "migrations", "zero", verbosity=0) @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_plan'}) def test_migrate_plan(self): """Tests migrate --plan output.""" out = io.StringIO() # Show the plan up to the third migration. call_command('migrate', 'migrations', '0003', plan=True, stdout=out, no_color=True) self.assertEqual( 'Planned operations:\n' 'migrations.0001_initial\n' ' Create model Salamander\n' ' Raw Python operation -> Grow salamander tail.\n' 'migrations.0002_second\n' ' Create model Book\n' " Raw SQL operation -> ['SELECT * FROM migrations_book']\n" 'migrations.0003_third\n' ' Create model Author\n' " Raw SQL operation -> ['SELECT * FROM migrations_author']\n", out.getvalue() ) try: # Migrate to the third migration. call_command('migrate', 'migrations', '0003', verbosity=0) out = io.StringIO() # Show the plan for when there is nothing to apply. call_command('migrate', 'migrations', '0003', plan=True, stdout=out, no_color=True) self.assertEqual( 'Planned operations:\n' ' No planned migration operations.\n', out.getvalue() ) out = io.StringIO() # Show the plan for reverse migration back to 0001. call_command('migrate', 'migrations', '0001', plan=True, stdout=out, no_color=True) self.assertEqual( 'Planned operations:\n' 'migrations.0003_third\n' ' Undo Create model Author\n' " Raw SQL operation -> ['SELECT * FROM migrations_book']\n" 'migrations.0002_second\n' ' Undo Create model Book\n' " Raw SQL operation -> ['SELECT * FROM migrations_salamand…\n", out.getvalue() ) out = io.StringIO() # Show the migration plan to fourth, with truncated details. call_command('migrate', 'migrations', '0004', plan=True, stdout=out, no_color=True) self.assertEqual( 'Planned operations:\n' 'migrations.0004_fourth\n' ' Raw SQL operation -> SELECT * FROM migrations_author WHE…\n', out.getvalue() ) # Show the plan when an operation is irreversible. # Migrate to the fourth migration. call_command('migrate', 'migrations', '0004', verbosity=0) out = io.StringIO() call_command('migrate', 'migrations', '0003', plan=True, stdout=out, no_color=True) self.assertEqual( 'Planned operations:\n' 'migrations.0004_fourth\n' ' Raw SQL operation -> IRREVERSIBLE\n', out.getvalue() ) out = io.StringIO() call_command('migrate', 'migrations', '0005', plan=True, stdout=out, no_color=True) # Operation is marked as irreversible only in the revert plan. self.assertEqual( 'Planned operations:\n' 'migrations.0005_fifth\n' ' Raw Python operation\n' ' Raw Python operation\n' ' Raw Python operation -> Feed salamander.\n', out.getvalue() ) call_command('migrate', 'migrations', '0005', verbosity=0) out = io.StringIO() call_command('migrate', 'migrations', '0004', plan=True, stdout=out, no_color=True) self.assertEqual( 'Planned operations:\n' 'migrations.0005_fifth\n' ' Raw Python operation -> IRREVERSIBLE\n' ' Raw Python operation -> IRREVERSIBLE\n' ' Raw Python operation\n', out.getvalue() ) finally: # Cleanup by unmigrating everything: fake the irreversible, then # migrate all to zero. call_command('migrate', 'migrations', '0003', fake=True, verbosity=0) call_command('migrate', 'migrations', 'zero', verbosity=0) @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_empty'}) def test_showmigrations_no_migrations(self): out = io.StringIO() call_command('showmigrations', stdout=out, no_color=True) self.assertEqual('migrations\n (no migrations)\n', out.getvalue().lower()) @override_settings(INSTALLED_APPS=['migrations.migrations_test_apps.unmigrated_app']) def test_showmigrations_unmigrated_app(self): out = io.StringIO() call_command('showmigrations', 'unmigrated_app', stdout=out, no_color=True) self.assertEqual('unmigrated_app\n (no migrations)\n', out.getvalue().lower()) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_empty"}) def test_showmigrations_plan_no_migrations(self): """ Tests --plan output of showmigrations command without migrations """ out = io.StringIO() call_command('showmigrations', format='plan', stdout=out, no_color=True) self.assertEqual('(no migrations)\n', out.getvalue().lower()) out = io.StringIO() call_command('showmigrations', format='plan', stdout=out, verbosity=2, no_color=True) self.assertEqual('(no migrations)\n', out.getvalue().lower()) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_complex"}) def test_showmigrations_plan_squashed(self): """ Tests --plan output of showmigrations command with squashed migrations. """ out = io.StringIO() call_command("showmigrations", format='plan', stdout=out) self.assertEqual( "[ ] migrations.1_auto\n" "[ ] migrations.2_auto\n" "[ ] migrations.3_squashed_5\n" "[ ] migrations.6_auto\n" "[ ] migrations.7_auto\n", out.getvalue().lower() ) out = io.StringIO() call_command("showmigrations", format='plan', stdout=out, verbosity=2) self.assertEqual( "[ ] migrations.1_auto\n" "[ ] migrations.2_auto ... (migrations.1_auto)\n" "[ ] migrations.3_squashed_5 ... (migrations.2_auto)\n" "[ ] migrations.6_auto ... (migrations.3_squashed_5)\n" "[ ] migrations.7_auto ... (migrations.6_auto)\n", out.getvalue().lower() ) call_command("migrate", "migrations", "3_squashed_5", verbosity=0) out = io.StringIO() call_command("showmigrations", format='plan', stdout=out) self.assertEqual( "[x] migrations.1_auto\n" "[x] migrations.2_auto\n" "[x] migrations.3_squashed_5\n" "[ ] migrations.6_auto\n" "[ ] migrations.7_auto\n", out.getvalue().lower() ) out = io.StringIO() call_command("showmigrations", format='plan', stdout=out, verbosity=2) self.assertEqual( "[x] migrations.1_auto\n" "[x] migrations.2_auto ... (migrations.1_auto)\n" "[x] migrations.3_squashed_5 ... (migrations.2_auto)\n" "[ ] migrations.6_auto ... (migrations.3_squashed_5)\n" "[ ] migrations.7_auto ... (migrations.6_auto)\n", out.getvalue().lower() ) @override_settings(INSTALLED_APPS=[ 'migrations.migrations_test_apps.mutate_state_b', 'migrations.migrations_test_apps.alter_fk.author_app', 'migrations.migrations_test_apps.alter_fk.book_app', ]) def test_showmigrations_plan_single_app_label(self): """ `showmigrations --plan app_label` output with a single app_label. """ # Single app with no dependencies on other apps. out = io.StringIO() call_command('showmigrations', 'mutate_state_b', format='plan', stdout=out) self.assertEqual( '[ ] mutate_state_b.0001_initial\n' '[ ] mutate_state_b.0002_add_field\n', out.getvalue() ) # Single app with dependencies. out = io.StringIO() call_command('showmigrations', 'author_app', format='plan', stdout=out) self.assertEqual( '[ ] author_app.0001_initial\n' '[ ] book_app.0001_initial\n' '[ ] author_app.0002_alter_id\n', out.getvalue() ) # Some migrations already applied. call_command('migrate', 'author_app', '0001', verbosity=0) out = io.StringIO() call_command('showmigrations', 'author_app', format='plan', stdout=out) self.assertEqual( '[X] author_app.0001_initial\n' '[ ] book_app.0001_initial\n' '[ ] author_app.0002_alter_id\n', out.getvalue() ) # Cleanup by unmigrating author_app. call_command('migrate', 'author_app', 'zero', verbosity=0) @override_settings(INSTALLED_APPS=[ 'migrations.migrations_test_apps.mutate_state_b', 'migrations.migrations_test_apps.alter_fk.author_app', 'migrations.migrations_test_apps.alter_fk.book_app', ]) def test_showmigrations_plan_multiple_app_labels(self): """ `showmigrations --plan app_label` output with multiple app_labels. """ # Multiple apps: author_app depends on book_app; mutate_state_b doesn't # depend on other apps. out = io.StringIO() call_command('showmigrations', 'mutate_state_b', 'author_app', format='plan', stdout=out) self.assertEqual( '[ ] author_app.0001_initial\n' '[ ] book_app.0001_initial\n' '[ ] author_app.0002_alter_id\n' '[ ] mutate_state_b.0001_initial\n' '[ ] mutate_state_b.0002_add_field\n', out.getvalue() ) # Multiple apps: args order shouldn't matter (the same result is # expected as above). out = io.StringIO() call_command('showmigrations', 'author_app', 'mutate_state_b', format='plan', stdout=out) self.assertEqual( '[ ] author_app.0001_initial\n' '[ ] book_app.0001_initial\n' '[ ] author_app.0002_alter_id\n' '[ ] mutate_state_b.0001_initial\n' '[ ] mutate_state_b.0002_add_field\n', out.getvalue() ) @override_settings(INSTALLED_APPS=['migrations.migrations_test_apps.unmigrated_app']) def test_showmigrations_plan_app_label_no_migrations(self): out = io.StringIO() call_command('showmigrations', 'unmigrated_app', format='plan', stdout=out, no_color=True) self.assertEqual('(no migrations)\n', out.getvalue()) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) def test_sqlmigrate_forwards(self): """ sqlmigrate outputs forward looking SQL. """ out = io.StringIO() call_command("sqlmigrate", "migrations", "0001", stdout=out) output = out.getvalue().lower() index_tx_start = output.find(connection.ops.start_transaction_sql().lower()) index_op_desc_author = output.find('-- create model author') index_create_table = output.find('create table') index_op_desc_tribble = output.find('-- create model tribble') index_op_desc_unique_together = output.find('-- alter unique_together') index_tx_end = output.find(connection.ops.end_transaction_sql().lower()) if connection.features.can_rollback_ddl: self.assertGreater(index_tx_start, -1, "Transaction start not found") self.assertGreater( index_tx_end, index_op_desc_unique_together, "Transaction end not found or found before operation description (unique_together)" ) self.assertGreater( index_op_desc_author, index_tx_start, "Operation description (author) not found or found before transaction start" ) self.assertGreater( index_create_table, index_op_desc_author, "CREATE TABLE not found or found before operation description (author)" ) self.assertGreater( index_op_desc_tribble, index_create_table, "Operation description (tribble) not found or found before CREATE TABLE (author)" ) self.assertGreater( index_op_desc_unique_together, index_op_desc_tribble, "Operation description (unique_together) not found or found before operation description (tribble)" ) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) def test_sqlmigrate_backwards(self): """ sqlmigrate outputs reverse looking SQL. """ # Cannot generate the reverse SQL unless we've applied the migration. call_command("migrate", "migrations", verbosity=0) out = io.StringIO() call_command("sqlmigrate", "migrations", "0001", stdout=out, backwards=True) output = out.getvalue().lower() index_tx_start = output.find(connection.ops.start_transaction_sql().lower()) index_op_desc_unique_together = output.find('-- alter unique_together') index_op_desc_tribble = output.find('-- create model tribble') index_op_desc_author = output.find('-- create model author') index_drop_table = output.rfind('drop table') index_tx_end = output.find(connection.ops.end_transaction_sql().lower()) if connection.features.can_rollback_ddl: self.assertGreater(index_tx_start, -1, "Transaction start not found") self.assertGreater( index_tx_end, index_op_desc_unique_together, "Transaction end not found or found before DROP TABLE" ) self.assertGreater( index_op_desc_unique_together, index_tx_start, "Operation description (unique_together) not found or found before transaction start" ) self.assertGreater( index_op_desc_tribble, index_op_desc_unique_together, "Operation description (tribble) not found or found before operation description (unique_together)" ) self.assertGreater( index_op_desc_author, index_op_desc_tribble, "Operation description (author) not found or found before operation description (tribble)" ) self.assertGreater( index_drop_table, index_op_desc_author, "DROP TABLE not found or found before operation description (author)" ) # Cleanup by unmigrating everything call_command("migrate", "migrations", "zero", verbosity=0) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_non_atomic"}) def test_sqlmigrate_for_non_atomic_migration(self): """ Transaction wrappers aren't shown for non-atomic migrations. """ out = io.StringIO() call_command("sqlmigrate", "migrations", "0001", stdout=out) output = out.getvalue().lower() queries = [q.strip() for q in output.splitlines()] if connection.ops.start_transaction_sql(): self.assertNotIn(connection.ops.start_transaction_sql().lower(), queries) self.assertNotIn(connection.ops.end_transaction_sql().lower(), queries) @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations'}) def test_sqlmigrate_for_non_transactional_databases(self): """ Transaction wrappers aren't shown for databases that don't support transactional DDL. """ out = io.StringIO() with mock.patch.object(connection.features, 'can_rollback_ddl', False): call_command('sqlmigrate', 'migrations', '0001', stdout=out) output = out.getvalue().lower() queries = [q.strip() for q in output.splitlines()] start_transaction_sql = connection.ops.start_transaction_sql() if start_transaction_sql: self.assertNotIn(start_transaction_sql.lower(), queries) self.assertNotIn(connection.ops.end_transaction_sql().lower(), queries) @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_squashed'}) def test_sqlmigrate_ambiguous_prefix_squashed_migrations(self): msg = ( "More than one migration matches '0001' in app 'migrations'. " "Please be more specific." ) with self.assertRaisesMessage(CommandError, msg): call_command('sqlmigrate', 'migrations', '0001') @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_squashed'}) def test_sqlmigrate_squashed_migration(self): out = io.StringIO() call_command('sqlmigrate', 'migrations', '0001_squashed_0002', stdout=out) output = out.getvalue().lower() self.assertIn('-- create model author', output) self.assertIn('-- create model book', output) self.assertNotIn('-- create model tribble', output) @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_squashed'}) def test_sqlmigrate_replaced_migration(self): out = io.StringIO() call_command('sqlmigrate', 'migrations', '0001_initial', stdout=out) output = out.getvalue().lower() self.assertIn('-- create model author', output) self.assertIn('-- create model tribble', output) @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_no_operations'}) def test_migrations_no_operations(self): err = io.StringIO() call_command('sqlmigrate', 'migrations', '0001_initial', stderr=err) self.assertEqual(err.getvalue(), 'No operations found.\n') @override_settings( INSTALLED_APPS=[ "migrations.migrations_test_apps.migrated_app", "migrations.migrations_test_apps.migrated_unapplied_app", "migrations.migrations_test_apps.unmigrated_app", ], ) def test_regression_22823_unmigrated_fk_to_migrated_model(self): """ Assuming you have 3 apps, `A`, `B`, and `C`, such that: * `A` has migrations * `B` has a migration we want to apply * `C` has no migrations, but has an FK to `A` When we try to migrate "B", an exception occurs because the "B" was not included in the ProjectState that is used to detect soft-applied migrations (#22823). """ call_command('migrate', 'migrated_unapplied_app', verbosity=0) # unmigrated_app.SillyModel has a foreign key to 'migrations.Tribble', # but that model is only defined in a migration, so the global app # registry never sees it and the reference is left dangling. Remove it # to avoid problems in subsequent tests. del apps._pending_operations[('migrations', 'tribble')] @override_settings(INSTALLED_APPS=['migrations.migrations_test_apps.unmigrated_app_syncdb']) def test_migrate_syncdb_deferred_sql_executed_with_schemaeditor(self): """ For an app without migrations, editor.execute() is used for executing the syncdb deferred SQL. """ stdout = io.StringIO() with mock.patch.object(BaseDatabaseSchemaEditor, 'execute') as execute: call_command('migrate', run_syncdb=True, verbosity=1, stdout=stdout, no_color=True) create_table_count = len([call for call in execute.mock_calls if 'CREATE TABLE' in str(call)]) self.assertEqual(create_table_count, 2) # There's at least one deferred SQL for creating the foreign key # index. self.assertGreater(len(execute.mock_calls), 2) stdout = stdout.getvalue() self.assertIn('Synchronize unmigrated apps: unmigrated_app_syncdb', stdout) self.assertIn('Creating tables...', stdout) table_name = truncate_name('unmigrated_app_syncdb_classroom', connection.ops.max_name_length()) self.assertIn('Creating table %s' % table_name, stdout) @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations'}) def test_migrate_syncdb_app_with_migrations(self): msg = "Can't use run_syncdb with app 'migrations' as it has migrations." with self.assertRaisesMessage(CommandError, msg): call_command('migrate', 'migrations', run_syncdb=True, verbosity=0) @override_settings(INSTALLED_APPS=[ 'migrations.migrations_test_apps.unmigrated_app_syncdb', 'migrations.migrations_test_apps.unmigrated_app_simple', ]) def test_migrate_syncdb_app_label(self): """ Running migrate --run-syncdb with an app_label only creates tables for the specified app. """ stdout = io.StringIO() with mock.patch.object(BaseDatabaseSchemaEditor, 'execute') as execute: call_command('migrate', 'unmigrated_app_syncdb', run_syncdb=True, stdout=stdout) create_table_count = len([call for call in execute.mock_calls if 'CREATE TABLE' in str(call)]) self.assertEqual(create_table_count, 2) self.assertGreater(len(execute.mock_calls), 2) self.assertIn('Synchronize unmigrated app: unmigrated_app_syncdb', stdout.getvalue()) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}) def test_migrate_record_replaced(self): """ Running a single squashed migration should record all of the original replaced migrations as run. """ recorder = MigrationRecorder(connection) out = io.StringIO() call_command("migrate", "migrations", verbosity=0) call_command("showmigrations", "migrations", stdout=out, no_color=True) self.assertEqual( 'migrations\n' ' [x] 0001_squashed_0002 (2 squashed migrations)\n', out.getvalue().lower() ) applied_migrations = recorder.applied_migrations() self.assertIn(("migrations", "0001_initial"), applied_migrations) self.assertIn(("migrations", "0002_second"), applied_migrations) self.assertIn(("migrations", "0001_squashed_0002"), applied_migrations) # Rollback changes call_command("migrate", "migrations", "zero", verbosity=0) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}) def test_migrate_record_squashed(self): """ Running migrate for a squashed migration should record as run if all of the replaced migrations have been run (#25231). """ recorder = MigrationRecorder(connection) recorder.record_applied("migrations", "0001_initial") recorder.record_applied("migrations", "0002_second") out = io.StringIO() call_command("migrate", "migrations", verbosity=0) call_command("showmigrations", "migrations", stdout=out, no_color=True) self.assertEqual( 'migrations\n' ' [x] 0001_squashed_0002 (2 squashed migrations)\n', out.getvalue().lower() ) self.assertIn( ("migrations", "0001_squashed_0002"), recorder.applied_migrations() ) # No changes were actually applied so there is nothing to rollback @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations'}) def test_migrate_inconsistent_history(self): """ Running migrate with some migrations applied before their dependencies should not be allowed. """ recorder = MigrationRecorder(connection) recorder.record_applied("migrations", "0002_second") msg = "Migration migrations.0002_second is applied before its dependency migrations.0001_initial" with self.assertRaisesMessage(InconsistentMigrationHistory, msg): call_command("migrate") applied_migrations = recorder.applied_migrations() self.assertNotIn(("migrations", "0001_initial"), applied_migrations) @override_settings(INSTALLED_APPS=[ 'migrations.migrations_test_apps.migrated_unapplied_app', 'migrations.migrations_test_apps.migrated_app', ]) def test_migrate_not_reflected_changes(self): class NewModel1(models.Model): class Meta(): app_label = 'migrated_app' class NewModel2(models.Model): class Meta(): app_label = 'migrated_unapplied_app' out = io.StringIO() try: call_command('migrate', verbosity=0) call_command('migrate', stdout=out, no_color=True) self.assertEqual( "operations to perform:\n" " apply all migrations: migrated_app, migrated_unapplied_app\n" "running migrations:\n" " no migrations to apply.\n" " your models in app(s): 'migrated_app', " "'migrated_unapplied_app' have changes that are not yet " "reflected in a migration, and so won't be applied.\n" " run 'manage.py makemigrations' to make new migrations, and " "then re-run 'manage.py migrate' to apply them.\n", out.getvalue().lower(), ) finally: # Unmigrate everything. call_command('migrate', 'migrated_app', 'zero', verbosity=0) call_command('migrate', 'migrated_unapplied_app', 'zero', verbosity=0) class MakeMigrationsTests(MigrationTestBase): """ Tests running the makemigrations command. """ def setUp(self): super().setUp() self._old_models = apps.app_configs['migrations'].models.copy() def tearDown(self): apps.app_configs['migrations'].models = self._old_models apps.all_models['migrations'] = self._old_models apps.clear_cache() super().tearDown() def test_files_content(self): self.assertTableNotExists("migrations_unicodemodel") apps.register_model('migrations', UnicodeModel) with self.temporary_migration_module() as migration_dir: call_command("makemigrations", "migrations", verbosity=0) # Check for empty __init__.py file in migrations folder init_file = os.path.join(migration_dir, "__init__.py") self.assertTrue(os.path.exists(init_file)) with open(init_file) as fp: content = fp.read() self.assertEqual(content, '') # Check for existing 0001_initial.py file in migration folder initial_file = os.path.join(migration_dir, "0001_initial.py") self.assertTrue(os.path.exists(initial_file)) with open(initial_file, encoding='utf-8') as fp: content = fp.read() self.assertIn('migrations.CreateModel', content) self.assertIn('initial = True', content) self.assertIn('úñí©óðé µóðéø', content) # Meta.verbose_name self.assertIn('úñí©óðé µóðéøß', content) # Meta.verbose_name_plural self.assertIn('ÚÑÍ¢ÓÐÉ', content) # title.verbose_name self.assertIn('“Ðjáñgó”', content) # title.default def test_makemigrations_order(self): """ makemigrations should recognize number-only migrations (0001.py). """ module = 'migrations.test_migrations_order' with self.temporary_migration_module(module=module) as migration_dir: if hasattr(importlib, 'invalidate_caches'): # importlib caches os.listdir() on some platforms like macOS # (#23850). importlib.invalidate_caches() call_command('makemigrations', 'migrations', '--empty', '-n', 'a', '-v', '0') self.assertTrue(os.path.exists(os.path.join(migration_dir, '0002_a.py'))) def test_makemigrations_empty_connections(self): empty_connections = ConnectionHandler({'default': {}}) with mock.patch('django.core.management.commands.makemigrations.connections', new=empty_connections): # with no apps out = io.StringIO() call_command('makemigrations', stdout=out) self.assertIn('No changes detected', out.getvalue()) # with an app with self.temporary_migration_module() as migration_dir: call_command('makemigrations', 'migrations', verbosity=0) init_file = os.path.join(migration_dir, '__init__.py') self.assertTrue(os.path.exists(init_file)) @override_settings(INSTALLED_APPS=['migrations', 'migrations2']) def test_makemigrations_consistency_checks_respect_routers(self): """ The history consistency checks in makemigrations respect settings.DATABASE_ROUTERS. """ def patched_has_table(migration_recorder): if migration_recorder.connection is connections['other']: raise Exception('Other connection') else: return mock.DEFAULT self.assertTableNotExists('migrations_unicodemodel') apps.register_model('migrations', UnicodeModel) with mock.patch.object( MigrationRecorder, 'has_table', autospec=True, side_effect=patched_has_table) as has_table: with self.temporary_migration_module() as migration_dir: call_command("makemigrations", "migrations", verbosity=0) initial_file = os.path.join(migration_dir, "0001_initial.py") self.assertTrue(os.path.exists(initial_file)) self.assertEqual(has_table.call_count, 1) # 'default' is checked # Router says not to migrate 'other' so consistency shouldn't # be checked. with self.settings(DATABASE_ROUTERS=['migrations.routers.TestRouter']): call_command('makemigrations', 'migrations', verbosity=0) self.assertEqual(has_table.call_count, 2) # 'default' again # With a router that doesn't prohibit migrating 'other', # consistency is checked. with self.settings(DATABASE_ROUTERS=['migrations.routers.DefaultOtherRouter']): with self.assertRaisesMessage(Exception, 'Other connection'): call_command('makemigrations', 'migrations', verbosity=0) self.assertEqual(has_table.call_count, 4) # 'default' and 'other' # With a router that doesn't allow migrating on any database, # no consistency checks are made. with self.settings(DATABASE_ROUTERS=['migrations.routers.TestRouter']): with mock.patch.object(TestRouter, 'allow_migrate', return_value=False) as allow_migrate: call_command('makemigrations', 'migrations', verbosity=0) allow_migrate.assert_any_call('other', 'migrations', model_name='UnicodeModel') # allow_migrate() is called with the correct arguments. self.assertGreater(len(allow_migrate.mock_calls), 0) called_aliases = set() for mock_call in allow_migrate.mock_calls: _, call_args, call_kwargs = mock_call connection_alias, app_name = call_args called_aliases.add(connection_alias) # Raises an error if invalid app_name/model_name occurs. apps.get_app_config(app_name).get_model(call_kwargs['model_name']) self.assertEqual(called_aliases, set(connections)) self.assertEqual(has_table.call_count, 4) def test_failing_migration(self): # If a migration fails to serialize, it shouldn't generate an empty file. #21280 apps.register_model('migrations', UnserializableModel) with self.temporary_migration_module() as migration_dir: with self.assertRaisesMessage(ValueError, 'Cannot serialize'): call_command("makemigrations", "migrations", verbosity=0) initial_file = os.path.join(migration_dir, "0001_initial.py") self.assertFalse(os.path.exists(initial_file)) def test_makemigrations_conflict_exit(self): """ makemigrations exits if it detects a conflict. """ with self.temporary_migration_module(module="migrations.test_migrations_conflict"): with self.assertRaises(CommandError) as context: call_command("makemigrations") self.assertEqual( str(context.exception), "Conflicting migrations detected; multiple leaf nodes in the " "migration graph: (0002_conflicting_second, 0002_second in " "migrations).\n" "To fix them run 'python manage.py makemigrations --merge'" ) def test_makemigrations_merge_no_conflict(self): """ makemigrations exits if in merge mode with no conflicts. """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations"): call_command("makemigrations", merge=True, stdout=out) self.assertIn("No conflicts detected to merge.", out.getvalue()) def test_makemigrations_empty_no_app_specified(self): """ makemigrations exits if no app is specified with 'empty' mode. """ msg = 'You must supply at least one app label when using --empty.' with self.assertRaisesMessage(CommandError, msg): call_command("makemigrations", empty=True) def test_makemigrations_empty_migration(self): """ makemigrations properly constructs an empty migration. """ with self.temporary_migration_module() as migration_dir: call_command("makemigrations", "migrations", empty=True, verbosity=0) # Check for existing 0001_initial.py file in migration folder initial_file = os.path.join(migration_dir, "0001_initial.py") self.assertTrue(os.path.exists(initial_file)) with open(initial_file, encoding='utf-8') as fp: content = fp.read() # Remove all whitespace to check for empty dependencies and operations content = content.replace(' ', '') self.assertIn('dependencies=[\n]', content) self.assertIn('operations=[\n]', content) @override_settings(MIGRATION_MODULES={"migrations": None}) def test_makemigrations_disabled_migrations_for_app(self): """ makemigrations raises a nice error when migrations are disabled for an app. """ msg = ( "Django can't create migrations for app 'migrations' because migrations " "have been disabled via the MIGRATION_MODULES setting." ) with self.assertRaisesMessage(ValueError, msg): call_command("makemigrations", "migrations", empty=True, verbosity=0) def test_makemigrations_no_changes_no_apps(self): """ makemigrations exits when there are no changes and no apps are specified. """ out = io.StringIO() call_command("makemigrations", stdout=out) self.assertIn("No changes detected", out.getvalue()) def test_makemigrations_no_changes(self): """ makemigrations exits when there are no changes to an app. """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_no_changes"): call_command("makemigrations", "migrations", stdout=out) self.assertIn("No changes detected in app 'migrations'", out.getvalue()) def test_makemigrations_no_apps_initial(self): """ makemigrations should detect initial is needed on empty migration modules if no app provided. """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_empty"): call_command("makemigrations", stdout=out) self.assertIn("0001_initial.py", out.getvalue()) def test_makemigrations_no_init(self): """Migration directories without an __init__.py file are allowed.""" out = io.StringIO() with self.temporary_migration_module(module='migrations.test_migrations_no_init'): call_command('makemigrations', stdout=out) self.assertIn('0001_initial.py', out.getvalue()) def test_makemigrations_migrations_announce(self): """ makemigrations announces the migration at the default verbosity level. """ out = io.StringIO() with self.temporary_migration_module(): call_command("makemigrations", "migrations", stdout=out) self.assertIn("Migrations for 'migrations'", out.getvalue()) def test_makemigrations_no_common_ancestor(self): """ makemigrations fails to merge migrations with no common ancestor. """ with self.assertRaises(ValueError) as context: with self.temporary_migration_module(module="migrations.test_migrations_no_ancestor"): call_command("makemigrations", "migrations", merge=True) exception_message = str(context.exception) self.assertIn("Could not find common ancestor of", exception_message) self.assertIn("0002_second", exception_message) self.assertIn("0002_conflicting_second", exception_message) def test_makemigrations_interactive_reject(self): """ makemigrations enters and exits interactive mode properly. """ # Monkeypatch interactive questioner to auto reject with mock.patch('builtins.input', mock.Mock(return_value='N')): with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir: call_command("makemigrations", "migrations", name="merge", merge=True, interactive=True, verbosity=0) merge_file = os.path.join(migration_dir, '0003_merge.py') self.assertFalse(os.path.exists(merge_file)) def test_makemigrations_interactive_accept(self): """ makemigrations enters interactive mode and merges properly. """ # Monkeypatch interactive questioner to auto accept with mock.patch('builtins.input', mock.Mock(return_value='y')): out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir: call_command("makemigrations", "migrations", name="merge", merge=True, interactive=True, stdout=out) merge_file = os.path.join(migration_dir, '0003_merge.py') self.assertTrue(os.path.exists(merge_file)) self.assertIn("Created new merge migration", out.getvalue()) @mock.patch('django.db.migrations.utils.datetime') def test_makemigrations_default_merge_name(self, mock_datetime): mock_datetime.datetime.now.return_value = datetime.datetime(2016, 1, 2, 3, 4) with mock.patch('builtins.input', mock.Mock(return_value='y')): out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir: call_command("makemigrations", "migrations", merge=True, interactive=True, stdout=out) merge_file = os.path.join(migration_dir, '0003_merge_20160102_0304.py') self.assertTrue(os.path.exists(merge_file)) self.assertIn("Created new merge migration", out.getvalue()) def test_makemigrations_non_interactive_not_null_addition(self): """ Non-interactive makemigrations fails when a default is missing on a new not-null field. """ class SillyModel(models.Model): silly_field = models.BooleanField(default=False) silly_int = models.IntegerField() class Meta: app_label = "migrations" with self.assertRaises(SystemExit): with self.temporary_migration_module(module="migrations.test_migrations_no_default"): call_command("makemigrations", "migrations", interactive=False) def test_makemigrations_non_interactive_not_null_alteration(self): """ Non-interactive makemigrations fails when a default is missing on a field changed to not-null. """ class Author(models.Model): name = models.CharField(max_length=255) slug = models.SlugField() age = models.IntegerField(default=0) class Meta: app_label = "migrations" out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations"): call_command("makemigrations", "migrations", interactive=False, stdout=out) self.assertIn("Alter field slug on author", out.getvalue()) def test_makemigrations_non_interactive_no_model_rename(self): """ makemigrations adds and removes a possible model rename in non-interactive mode. """ class RenamedModel(models.Model): silly_field = models.BooleanField(default=False) class Meta: app_label = "migrations" out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_no_default"): call_command("makemigrations", "migrations", interactive=False, stdout=out) self.assertIn("Delete model SillyModel", out.getvalue()) self.assertIn("Create model RenamedModel", out.getvalue()) def test_makemigrations_non_interactive_no_field_rename(self): """ makemigrations adds and removes a possible field rename in non-interactive mode. """ class SillyModel(models.Model): silly_rename = models.BooleanField(default=False) class Meta: app_label = "migrations" out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_no_default"): call_command("makemigrations", "migrations", interactive=False, stdout=out) self.assertIn("Remove field silly_field from sillymodel", out.getvalue()) self.assertIn("Add field silly_rename to sillymodel", out.getvalue()) def test_makemigrations_handle_merge(self): """ makemigrations properly merges the conflicting migrations with --noinput. """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir: call_command("makemigrations", "migrations", name="merge", merge=True, interactive=False, stdout=out) merge_file = os.path.join(migration_dir, '0003_merge.py') self.assertTrue(os.path.exists(merge_file)) output = out.getvalue() self.assertIn("Merging migrations", output) self.assertIn("Branch 0002_second", output) self.assertIn("Branch 0002_conflicting_second", output) self.assertIn("Created new merge migration", output) def test_makemigration_merge_dry_run(self): """ makemigrations respects --dry-run option when fixing migration conflicts (#24427). """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir: call_command( "makemigrations", "migrations", name="merge", dry_run=True, merge=True, interactive=False, stdout=out, ) merge_file = os.path.join(migration_dir, '0003_merge.py') self.assertFalse(os.path.exists(merge_file)) output = out.getvalue() self.assertIn("Merging migrations", output) self.assertIn("Branch 0002_second", output) self.assertIn("Branch 0002_conflicting_second", output) self.assertNotIn("Created new merge migration", output) def test_makemigration_merge_dry_run_verbosity_3(self): """ `makemigrations --merge --dry-run` writes the merge migration file to stdout with `verbosity == 3` (#24427). """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir: call_command( "makemigrations", "migrations", name="merge", dry_run=True, merge=True, interactive=False, stdout=out, verbosity=3, ) merge_file = os.path.join(migration_dir, '0003_merge.py') self.assertFalse(os.path.exists(merge_file)) output = out.getvalue() self.assertIn("Merging migrations", output) self.assertIn("Branch 0002_second", output) self.assertIn("Branch 0002_conflicting_second", output) self.assertNotIn("Created new merge migration", output) # Additional output caused by verbosity 3 # The complete merge migration file that would be written self.assertIn("class Migration(migrations.Migration):", output) self.assertIn("dependencies = [", output) self.assertIn("('migrations', '0002_second')", output) self.assertIn("('migrations', '0002_conflicting_second')", output) self.assertIn("operations = [", output) self.assertIn("]", output) def test_makemigrations_dry_run(self): """ `makemigrations --dry-run` should not ask for defaults. """ class SillyModel(models.Model): silly_field = models.BooleanField(default=False) silly_date = models.DateField() # Added field without a default class Meta: app_label = "migrations" out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_no_default"): call_command("makemigrations", "migrations", dry_run=True, stdout=out) # Output the expected changes directly, without asking for defaults self.assertIn("Add field silly_date to sillymodel", out.getvalue()) def test_makemigrations_dry_run_verbosity_3(self): """ Allow `makemigrations --dry-run` to output the migrations file to stdout (with verbosity == 3). """ class SillyModel(models.Model): silly_field = models.BooleanField(default=False) silly_char = models.CharField(default="") class Meta: app_label = "migrations" out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_no_default"): call_command("makemigrations", "migrations", dry_run=True, stdout=out, verbosity=3) # Normal --dry-run output self.assertIn("- Add field silly_char to sillymodel", out.getvalue()) # Additional output caused by verbosity 3 # The complete migrations file that would be written self.assertIn("class Migration(migrations.Migration):", out.getvalue()) self.assertIn("dependencies = [", out.getvalue()) self.assertIn("('migrations', '0001_initial'),", out.getvalue()) self.assertIn("migrations.AddField(", out.getvalue()) self.assertIn("model_name='sillymodel',", out.getvalue()) self.assertIn("name='silly_char',", out.getvalue()) def test_makemigrations_migrations_modules_path_not_exist(self): """ makemigrations creates migrations when specifying a custom location for migration files using MIGRATION_MODULES if the custom path doesn't already exist. """ class SillyModel(models.Model): silly_field = models.BooleanField(default=False) class Meta: app_label = "migrations" out = io.StringIO() migration_module = "migrations.test_migrations_path_doesnt_exist.foo.bar" with self.temporary_migration_module(module=migration_module) as migration_dir: call_command("makemigrations", "migrations", stdout=out) # Migrations file is actually created in the expected path. initial_file = os.path.join(migration_dir, "0001_initial.py") self.assertTrue(os.path.exists(initial_file)) # Command output indicates the migration is created. self.assertIn(" - Create model SillyModel", out.getvalue()) @override_settings(MIGRATION_MODULES={'migrations': 'some.nonexistent.path'}) def test_makemigrations_migrations_modules_nonexistent_toplevel_package(self): msg = ( 'Could not locate an appropriate location to create migrations ' 'package some.nonexistent.path. Make sure the toplevel package ' 'exists and can be imported.' ) with self.assertRaisesMessage(ValueError, msg): call_command('makemigrations', 'migrations', empty=True, verbosity=0) def test_makemigrations_interactive_by_default(self): """ The user is prompted to merge by default if there are conflicts and merge is True. Answer negative to differentiate it from behavior when --noinput is specified. """ # Monkeypatch interactive questioner to auto reject out = io.StringIO() with mock.patch('builtins.input', mock.Mock(return_value='N')): with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir: call_command("makemigrations", "migrations", name="merge", merge=True, stdout=out) merge_file = os.path.join(migration_dir, '0003_merge.py') # This will fail if interactive is False by default self.assertFalse(os.path.exists(merge_file)) self.assertNotIn("Created new merge migration", out.getvalue()) @override_settings( INSTALLED_APPS=[ "migrations", "migrations.migrations_test_apps.unspecified_app_with_conflict"]) def test_makemigrations_unspecified_app_with_conflict_no_merge(self): """ makemigrations does not raise a CommandError when an unspecified app has conflicting migrations. """ with self.temporary_migration_module(module="migrations.test_migrations_no_changes"): call_command("makemigrations", "migrations", merge=False, verbosity=0) @override_settings( INSTALLED_APPS=[ "migrations.migrations_test_apps.migrated_app", "migrations.migrations_test_apps.unspecified_app_with_conflict"]) def test_makemigrations_unspecified_app_with_conflict_merge(self): """ makemigrations does not create a merge for an unspecified app even if it has conflicting migrations. """ # Monkeypatch interactive questioner to auto accept with mock.patch('builtins.input', mock.Mock(return_value='y')): out = io.StringIO() with self.temporary_migration_module(app_label="migrated_app") as migration_dir: call_command("makemigrations", "migrated_app", name="merge", merge=True, interactive=True, stdout=out) merge_file = os.path.join(migration_dir, '0003_merge.py') self.assertFalse(os.path.exists(merge_file)) self.assertIn("No conflicts detected to merge.", out.getvalue()) @override_settings( INSTALLED_APPS=[ "migrations.migrations_test_apps.migrated_app", "migrations.migrations_test_apps.conflicting_app_with_dependencies"]) def test_makemigrations_merge_dont_output_dependency_operations(self): """ makemigrations --merge does not output any operations from apps that don't belong to a given app. """ # Monkeypatch interactive questioner to auto accept with mock.patch('builtins.input', mock.Mock(return_value='N')): out = io.StringIO() with mock.patch('django.core.management.color.supports_color', lambda *args: False): call_command( "makemigrations", "conflicting_app_with_dependencies", merge=True, interactive=True, stdout=out ) self.assertEqual( out.getvalue().lower(), 'merging conflicting_app_with_dependencies\n' ' branch 0002_conflicting_second\n' ' - create model something\n' ' branch 0002_second\n' ' - delete model tribble\n' ' - remove field silly_field from author\n' ' - add field rating to author\n' ' - create model book\n' ) def test_makemigrations_with_custom_name(self): """ makemigrations --name generate a custom migration name. """ with self.temporary_migration_module() as migration_dir: def cmd(migration_count, migration_name, *args): call_command("makemigrations", "migrations", "--verbosity", "0", "--name", migration_name, *args) migration_file = os.path.join(migration_dir, "%s_%s.py" % (migration_count, migration_name)) # Check for existing migration file in migration folder self.assertTrue(os.path.exists(migration_file)) with open(migration_file, encoding='utf-8') as fp: content = fp.read() content = content.replace(" ", "") return content # generate an initial migration migration_name_0001 = "my_initial_migration" content = cmd("0001", migration_name_0001) self.assertIn("dependencies=[\n]", content) # importlib caches os.listdir() on some platforms like macOS # (#23850). if hasattr(importlib, 'invalidate_caches'): importlib.invalidate_caches() # generate an empty migration migration_name_0002 = "my_custom_migration" content = cmd("0002", migration_name_0002, "--empty") self.assertIn("dependencies=[\n('migrations','0001_%s'),\n]" % migration_name_0001, content) self.assertIn("operations=[\n]", content) def test_makemigrations_with_invalid_custom_name(self): msg = 'The migration name must be a valid Python identifier.' with self.assertRaisesMessage(CommandError, msg): call_command('makemigrations', 'migrations', '--name', 'invalid name', '--empty') def test_makemigrations_check(self): """ makemigrations --check should exit with a non-zero status when there are changes to an app requiring migrations. """ with self.temporary_migration_module(): with self.assertRaises(SystemExit): call_command("makemigrations", "--check", "migrations", verbosity=0) with self.temporary_migration_module(module="migrations.test_migrations_no_changes"): call_command("makemigrations", "--check", "migrations", verbosity=0) def test_makemigrations_migration_path_output(self): """ makemigrations should print the relative paths to the migrations unless they are outside of the current tree, in which case the absolute path should be shown. """ out = io.StringIO() apps.register_model('migrations', UnicodeModel) with self.temporary_migration_module() as migration_dir: call_command("makemigrations", "migrations", stdout=out) self.assertIn(os.path.join(migration_dir, '0001_initial.py'), out.getvalue()) def test_makemigrations_migration_path_output_valueerror(self): """ makemigrations prints the absolute path if os.path.relpath() raises a ValueError when it's impossible to obtain a relative path, e.g. on Windows if Django is installed on a different drive than where the migration files are created. """ out = io.StringIO() with self.temporary_migration_module() as migration_dir: with mock.patch('os.path.relpath', side_effect=ValueError): call_command('makemigrations', 'migrations', stdout=out) self.assertIn(os.path.join(migration_dir, '0001_initial.py'), out.getvalue()) def test_makemigrations_inconsistent_history(self): """ makemigrations should raise InconsistentMigrationHistory exception if there are some migrations applied before their dependencies. """ recorder = MigrationRecorder(connection) recorder.record_applied('migrations', '0002_second') msg = "Migration migrations.0002_second is applied before its dependency migrations.0001_initial" with self.temporary_migration_module(module="migrations.test_migrations"): with self.assertRaisesMessage(InconsistentMigrationHistory, msg): call_command("makemigrations") def test_makemigrations_inconsistent_history_db_failure(self): msg = ( "Got an error checking a consistent migration history performed " "for database connection 'default': could not connect to server" ) with mock.patch( 'django.db.migrations.loader.MigrationLoader.check_consistent_history', side_effect=OperationalError('could not connect to server'), ): with self.temporary_migration_module(): with self.assertWarns(RuntimeWarning) as cm: call_command('makemigrations', verbosity=0) self.assertEqual(str(cm.warning), msg) @mock.patch('builtins.input', return_value='1') @mock.patch('django.db.migrations.questioner.sys.stdin', mock.MagicMock(encoding=sys.getdefaultencoding())) def test_makemigrations_auto_now_add_interactive(self, *args): """ makemigrations prompts the user when adding auto_now_add to an existing model. """ class Entry(models.Model): title = models.CharField(max_length=255) creation_date = models.DateTimeField(auto_now_add=True) class Meta: app_label = 'migrations' # Monkeypatch interactive questioner to auto accept with mock.patch('django.db.migrations.questioner.sys.stdout', new_callable=io.StringIO) as prompt_stdout: out = io.StringIO() with self.temporary_migration_module(module='migrations.test_auto_now_add'): call_command('makemigrations', 'migrations', interactive=True, stdout=out) output = out.getvalue() prompt_output = prompt_stdout.getvalue() self.assertIn("You can accept the default 'timezone.now' by pressing 'Enter'", prompt_output) self.assertIn("Add field creation_date to entry", output) class SquashMigrationsTests(MigrationTestBase): """ Tests running the squashmigrations command. """ def test_squashmigrations_squashes(self): """ squashmigrations squashes migrations. """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations") as migration_dir: call_command('squashmigrations', 'migrations', '0002', interactive=False, stdout=out, no_color=True) squashed_migration_file = os.path.join(migration_dir, "0001_squashed_0002_second.py") self.assertTrue(os.path.exists(squashed_migration_file)) self.assertEqual( out.getvalue(), 'Will squash the following migrations:\n' ' - 0001_initial\n' ' - 0002_second\n' 'Optimizing...\n' ' Optimized from 8 operations to 2 operations.\n' 'Created new squashed migration %s\n' ' You should commit this migration but leave the old ones in place;\n' ' the new migration will be used for new installs. Once you are sure\n' ' all instances of the codebase have applied the migrations you squashed,\n' ' you can delete them.\n' % squashed_migration_file ) def test_squashmigrations_initial_attribute(self): with self.temporary_migration_module(module="migrations.test_migrations") as migration_dir: call_command("squashmigrations", "migrations", "0002", interactive=False, verbosity=0) squashed_migration_file = os.path.join(migration_dir, "0001_squashed_0002_second.py") with open(squashed_migration_file, encoding='utf-8') as fp: content = fp.read() self.assertIn("initial = True", content) def test_squashmigrations_optimizes(self): """ squashmigrations optimizes operations. """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations"): call_command("squashmigrations", "migrations", "0002", interactive=False, verbosity=1, stdout=out) self.assertIn("Optimized from 8 operations to 2 operations.", out.getvalue()) def test_ticket_23799_squashmigrations_no_optimize(self): """ squashmigrations --no-optimize doesn't optimize operations. """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations"): call_command("squashmigrations", "migrations", "0002", interactive=False, verbosity=1, no_optimize=True, stdout=out) self.assertIn("Skipping optimization", out.getvalue()) def test_squashmigrations_valid_start(self): """ squashmigrations accepts a starting migration. """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_no_changes") as migration_dir: call_command("squashmigrations", "migrations", "0002", "0003", interactive=False, verbosity=1, stdout=out) squashed_migration_file = os.path.join(migration_dir, "0002_second_squashed_0003_third.py") with open(squashed_migration_file, encoding='utf-8') as fp: content = fp.read() self.assertIn(" ('migrations', '0001_initial')", content) self.assertNotIn("initial = True", content) out = out.getvalue() self.assertNotIn(" - 0001_initial", out) self.assertIn(" - 0002_second", out) self.assertIn(" - 0003_third", out) def test_squashmigrations_invalid_start(self): """ squashmigrations doesn't accept a starting migration after the ending migration. """ with self.temporary_migration_module(module="migrations.test_migrations_no_changes"): msg = ( "The migration 'migrations.0003_third' cannot be found. Maybe " "it comes after the migration 'migrations.0002_second'" ) with self.assertRaisesMessage(CommandError, msg): call_command("squashmigrations", "migrations", "0003", "0002", interactive=False, verbosity=0) def test_squashed_name_with_start_migration_name(self): """--squashed-name specifies the new migration's name.""" squashed_name = 'squashed_name' with self.temporary_migration_module(module='migrations.test_migrations') as migration_dir: call_command( 'squashmigrations', 'migrations', '0001', '0002', squashed_name=squashed_name, interactive=False, verbosity=0, ) squashed_migration_file = os.path.join(migration_dir, '0001_%s.py' % squashed_name) self.assertTrue(os.path.exists(squashed_migration_file)) def test_squashed_name_without_start_migration_name(self): """--squashed-name also works if a start migration is omitted.""" squashed_name = 'squashed_name' with self.temporary_migration_module(module="migrations.test_migrations") as migration_dir: call_command( 'squashmigrations', 'migrations', '0001', squashed_name=squashed_name, interactive=False, verbosity=0, ) squashed_migration_file = os.path.join(migration_dir, '0001_%s.py' % squashed_name) self.assertTrue(os.path.exists(squashed_migration_file)) class AppLabelErrorTests(TestCase): """ This class inherits TestCase because MigrationTestBase uses `available_apps = ['migrations']` which means that it's the only installed app. 'django.contrib.auth' must be in INSTALLED_APPS for some of these tests. """ nonexistent_app_error = "No installed app with label 'nonexistent_app'." did_you_mean_auth_error = ( "No installed app with label 'django.contrib.auth'. Did you mean " "'auth'?" ) def test_makemigrations_nonexistent_app_label(self): err = io.StringIO() with self.assertRaises(SystemExit): call_command('makemigrations', 'nonexistent_app', stderr=err) self.assertIn(self.nonexistent_app_error, err.getvalue()) def test_makemigrations_app_name_specified_as_label(self): err = io.StringIO() with self.assertRaises(SystemExit): call_command('makemigrations', 'django.contrib.auth', stderr=err) self.assertIn(self.did_you_mean_auth_error, err.getvalue()) def test_migrate_nonexistent_app_label(self): with self.assertRaisesMessage(CommandError, self.nonexistent_app_error): call_command('migrate', 'nonexistent_app') def test_migrate_app_name_specified_as_label(self): with self.assertRaisesMessage(CommandError, self.did_you_mean_auth_error): call_command('migrate', 'django.contrib.auth') def test_showmigrations_nonexistent_app_label(self): err = io.StringIO() with self.assertRaises(SystemExit): call_command('showmigrations', 'nonexistent_app', stderr=err) self.assertIn(self.nonexistent_app_error, err.getvalue()) def test_showmigrations_app_name_specified_as_label(self): err = io.StringIO() with self.assertRaises(SystemExit): call_command('showmigrations', 'django.contrib.auth', stderr=err) self.assertIn(self.did_you_mean_auth_error, err.getvalue()) def test_sqlmigrate_nonexistent_app_label(self): with self.assertRaisesMessage(CommandError, self.nonexistent_app_error): call_command('sqlmigrate', 'nonexistent_app', '0002') def test_sqlmigrate_app_name_specified_as_label(self): with self.assertRaisesMessage(CommandError, self.did_you_mean_auth_error): call_command('sqlmigrate', 'django.contrib.auth', '0002') def test_squashmigrations_nonexistent_app_label(self): with self.assertRaisesMessage(CommandError, self.nonexistent_app_error): call_command('squashmigrations', 'nonexistent_app', '0002') def test_squashmigrations_app_name_specified_as_label(self): with self.assertRaisesMessage(CommandError, self.did_you_mean_auth_error): call_command('squashmigrations', 'django.contrib.auth', '0002')
7d86651c0fa70a97b0f073a1e8853274e286804358d57c9bb222932d7c2fe237
import datetime import itertools import re from importlib import import_module from unittest import mock from urllib.parse import quote, urljoin from django.apps import apps from django.conf import settings from django.contrib.admin.models import LogEntry from django.contrib.auth import ( BACKEND_SESSION_KEY, HASH_SESSION_KEY, REDIRECT_FIELD_NAME, SESSION_KEY, ) from django.contrib.auth.forms import ( AuthenticationForm, PasswordChangeForm, SetPasswordForm, ) from django.contrib.auth.models import Permission, User from django.contrib.auth.views import ( INTERNAL_RESET_SESSION_TOKEN, LoginView, logout_then_login, redirect_to_login, ) from django.contrib.contenttypes.models import ContentType from django.contrib.sessions.middleware import SessionMiddleware from django.contrib.sites.requests import RequestSite from django.core import mail from django.db import connection from django.http import HttpRequest, HttpResponse from django.middleware.csrf import CsrfViewMiddleware, get_token from django.test import Client, TestCase, override_settings from django.test.client import RedirectCycleError from django.urls import NoReverseMatch, reverse, reverse_lazy from django.utils.http import urlsafe_base64_encode from .client import PasswordResetConfirmClient from .models import CustomUser, UUIDUser from .settings import AUTH_TEMPLATES @override_settings( LANGUAGES=[('en', 'English')], LANGUAGE_CODE='en', TEMPLATES=AUTH_TEMPLATES, ROOT_URLCONF='auth_tests.urls', ) class AuthViewsTestCase(TestCase): """ Helper base class for all the follow test cases. """ @classmethod def setUpTestData(cls): cls.u1 = User.objects.create_user(username='testclient', password='password', email='[email protected]') cls.u3 = User.objects.create_user(username='staff', password='password', email='[email protected]') def login(self, username='testclient', password='password'): response = self.client.post('/login/', { 'username': username, 'password': password, }) self.assertIn(SESSION_KEY, self.client.session) return response def logout(self): response = self.client.get('/admin/logout/') self.assertEqual(response.status_code, 200) self.assertNotIn(SESSION_KEY, self.client.session) def assertFormError(self, response, error): """Assert that error is found in response.context['form'] errors""" form_errors = list(itertools.chain(*response.context['form'].errors.values())) self.assertIn(str(error), form_errors) @override_settings(ROOT_URLCONF='django.contrib.auth.urls') class AuthViewNamedURLTests(AuthViewsTestCase): def test_named_urls(self): "Named URLs should be reversible" expected_named_urls = [ ('login', [], {}), ('logout', [], {}), ('password_change', [], {}), ('password_change_done', [], {}), ('password_reset', [], {}), ('password_reset_done', [], {}), ('password_reset_confirm', [], { 'uidb64': 'aaaaaaa', 'token': '1111-aaaaa', }), ('password_reset_complete', [], {}), ] for name, args, kwargs in expected_named_urls: with self.subTest(name=name): try: reverse(name, args=args, kwargs=kwargs) except NoReverseMatch: self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name) class PasswordResetTest(AuthViewsTestCase): def setUp(self): self.client = PasswordResetConfirmClient() def test_email_not_found(self): """If the provided email is not registered, don't raise any error but also don't send any email.""" response = self.client.get('/password_reset/') self.assertEqual(response.status_code, 200) response = self.client.post('/password_reset/', {'email': '[email protected]'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 0) def test_email_found(self): "Email is sent if a valid email address is provided for password reset" response = self.client.post('/password_reset/', {'email': '[email protected]'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertIn("http://", mail.outbox[0].body) self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email) # optional multipart text/html email has been added. Make sure original, # default functionality is 100% the same self.assertFalse(mail.outbox[0].message().is_multipart()) def test_extra_email_context(self): """ extra_email_context should be available in the email template context. """ response = self.client.post( '/password_reset_extra_email_context/', {'email': '[email protected]'}, ) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertIn('Email email context: "Hello!"', mail.outbox[0].body) self.assertIn('http://custom.example.com/reset/', mail.outbox[0].body) def test_html_mail_template(self): """ A multipart email with text/plain and text/html is sent if the html_email_template parameter is passed to the view """ response = self.client.post('/password_reset/html_email_template/', {'email': '[email protected]'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) message = mail.outbox[0].message() self.assertEqual(len(message.get_payload()), 2) self.assertTrue(message.is_multipart()) self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain') self.assertEqual(message.get_payload(1).get_content_type(), 'text/html') self.assertNotIn('<html>', message.get_payload(0).get_payload()) self.assertIn('<html>', message.get_payload(1).get_payload()) def test_email_found_custom_from(self): "Email is sent if a valid email address is provided for password reset when a custom from_email is provided." response = self.client.post('/password_reset_from_email/', {'email': '[email protected]'}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) self.assertEqual("[email protected]", mail.outbox[0].from_email) # Skip any 500 handler action (like sending more mail...) @override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True) def test_poisoned_http_host(self): "Poisoned HTTP_HOST headers can't be used for reset emails" # This attack is based on the way browsers handle URLs. The colon # should be used to separate the port, but if the URL contains an @, # the colon is interpreted as part of a username for login purposes, # making 'evil.com' the request domain. Since HTTP_HOST is used to # produce a meaningful reset URL, we need to be certain that the # HTTP_HOST header isn't poisoned. This is done as a check when get_host() # is invoked, but we check here as a practical consequence. with self.assertLogs('django.security.DisallowedHost', 'ERROR'): response = self.client.post( '/password_reset/', {'email': '[email protected]'}, HTTP_HOST='www.example:[email protected]' ) self.assertEqual(response.status_code, 400) self.assertEqual(len(mail.outbox), 0) # Skip any 500 handler action (like sending more mail...) @override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True) def test_poisoned_http_host_admin_site(self): "Poisoned HTTP_HOST headers can't be used for reset emails on admin views" with self.assertLogs('django.security.DisallowedHost', 'ERROR'): response = self.client.post( '/admin_password_reset/', {'email': '[email protected]'}, HTTP_HOST='www.example:[email protected]' ) self.assertEqual(response.status_code, 400) self.assertEqual(len(mail.outbox), 0) def _test_confirm_start(self): # Start by creating the email self.client.post('/password_reset/', {'email': '[email protected]'}) self.assertEqual(len(mail.outbox), 1) return self._read_signup_email(mail.outbox[0]) def _read_signup_email(self, email): urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body) self.assertIsNotNone(urlmatch, "No URL found in sent email") return urlmatch[0], urlmatch[1] def test_confirm_valid(self): url, path = self._test_confirm_start() response = self.client.get(path) # redirect to a 'complete' page: self.assertContains(response, "Please enter your new password") def test_confirm_invalid(self): url, path = self._test_confirm_start() # Let's munge the token in the path, but keep the same length, # in case the URLconf will reject a different length. path = path[:-5] + ("0" * 4) + path[-1] response = self.client.get(path) self.assertContains(response, "The password reset link was invalid") def test_confirm_invalid_user(self): # A nonexistent user returns a 200 response, not a 404. response = self.client.get('/reset/123456/1-1/') self.assertContains(response, "The password reset link was invalid") def test_confirm_overflow_user(self): # A base36 user id that overflows int returns a 200 response. response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/') self.assertContains(response, "The password reset link was invalid") def test_confirm_invalid_post(self): # Same as test_confirm_invalid, but trying to do a POST instead. url, path = self._test_confirm_start() path = path[:-5] + ("0" * 4) + path[-1] self.client.post(path, { 'new_password1': 'anewpassword', 'new_password2': ' anewpassword', }) # Check the password has not been changed u = User.objects.get(email='[email protected]') self.assertTrue(not u.check_password("anewpassword")) def test_confirm_invalid_hash(self): """A POST with an invalid token is rejected.""" u = User.objects.get(email='[email protected]') original_password = u.password url, path = self._test_confirm_start() path_parts = path.split('-') path_parts[-1] = ("0") * 20 + '/' path = '-'.join(path_parts) response = self.client.post(path, { 'new_password1': 'anewpassword', 'new_password2': 'anewpassword', }) self.assertIs(response.context['validlink'], False) u.refresh_from_db() self.assertEqual(original_password, u.password) # password hasn't changed def test_confirm_complete(self): url, path = self._test_confirm_start() response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'}) # Check the password has been changed u = User.objects.get(email='[email protected]') self.assertTrue(u.check_password("anewpassword")) # The reset token is deleted from the session. self.assertNotIn(INTERNAL_RESET_SESSION_TOKEN, self.client.session) # Check we can't use the link again response = self.client.get(path) self.assertContains(response, "The password reset link was invalid") def test_confirm_different_passwords(self): url, path = self._test_confirm_start() response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'x'}) self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch']) def test_reset_redirect_default(self): response = self.client.post('/password_reset/', {'email': '[email protected]'}) self.assertRedirects(response, '/password_reset/done/', fetch_redirect_response=False) def test_reset_custom_redirect(self): response = self.client.post('/password_reset/custom_redirect/', {'email': '[email protected]'}) self.assertRedirects(response, '/custom/', fetch_redirect_response=False) def test_reset_custom_redirect_named(self): response = self.client.post('/password_reset/custom_redirect/named/', {'email': '[email protected]'}) self.assertRedirects(response, '/password_reset/', fetch_redirect_response=False) def test_confirm_redirect_default(self): url, path = self._test_confirm_start() response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'}) self.assertRedirects(response, '/reset/done/', fetch_redirect_response=False) def test_confirm_redirect_custom(self): url, path = self._test_confirm_start() path = path.replace('/reset/', '/reset/custom/') response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'}) self.assertRedirects(response, '/custom/', fetch_redirect_response=False) def test_confirm_redirect_custom_named(self): url, path = self._test_confirm_start() path = path.replace('/reset/', '/reset/custom/named/') response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'}) self.assertRedirects(response, '/password_reset/', fetch_redirect_response=False) def test_confirm_custom_reset_url_token(self): url, path = self._test_confirm_start() path = path.replace('/reset/', '/reset/custom/token/') self.client.reset_url_token = 'set-passwordcustom' response = self.client.post( path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'}, ) self.assertRedirects(response, '/reset/done/', fetch_redirect_response=False) def test_confirm_login_post_reset(self): url, path = self._test_confirm_start() path = path.replace('/reset/', '/reset/post_reset_login/') response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'}) self.assertRedirects(response, '/reset/done/', fetch_redirect_response=False) self.assertIn(SESSION_KEY, self.client.session) @override_settings( AUTHENTICATION_BACKENDS=[ 'django.contrib.auth.backends.ModelBackend', 'django.contrib.auth.backends.AllowAllUsersModelBackend', ] ) def test_confirm_login_post_reset_custom_backend(self): # This backend is specified in the URL pattern. backend = 'django.contrib.auth.backends.AllowAllUsersModelBackend' url, path = self._test_confirm_start() path = path.replace('/reset/', '/reset/post_reset_login_custom_backend/') response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'}) self.assertRedirects(response, '/reset/done/', fetch_redirect_response=False) self.assertIn(SESSION_KEY, self.client.session) self.assertEqual(self.client.session[BACKEND_SESSION_KEY], backend) def test_confirm_login_post_reset_already_logged_in(self): url, path = self._test_confirm_start() path = path.replace('/reset/', '/reset/post_reset_login/') self.login() response = self.client.post(path, {'new_password1': 'anewpassword', 'new_password2': 'anewpassword'}) self.assertRedirects(response, '/reset/done/', fetch_redirect_response=False) self.assertIn(SESSION_KEY, self.client.session) def test_confirm_display_user_from_form(self): url, path = self._test_confirm_start() response = self.client.get(path) # The password_reset_confirm() view passes the user object to the # SetPasswordForm``, even on GET requests (#16919). For this test, # {{ form.user }}`` is rendered in the template # registration/password_reset_confirm.html. username = User.objects.get(email='[email protected]').username self.assertContains(response, "Hello, %s." % username) # However, the view should NOT pass any user object on a form if the # password reset link was invalid. response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/') self.assertContains(response, "Hello, .") def test_confirm_link_redirects_to_set_password_page(self): url, path = self._test_confirm_start() # Don't use PasswordResetConfirmClient (self.client) here which # automatically fetches the redirect page. client = Client() response = client.get(path) token = response.resolver_match.kwargs['token'] uuidb64 = response.resolver_match.kwargs['uidb64'] self.assertRedirects(response, '/reset/%s/set-password/' % uuidb64) self.assertEqual(client.session['_password_reset_token'], token) def test_confirm_custom_reset_url_token_link_redirects_to_set_password_page(self): url, path = self._test_confirm_start() path = path.replace('/reset/', '/reset/custom/token/') client = Client() response = client.get(path) token = response.resolver_match.kwargs['token'] uuidb64 = response.resolver_match.kwargs['uidb64'] self.assertRedirects(response, '/reset/custom/token/%s/set-passwordcustom/' % uuidb64) self.assertEqual(client.session['_password_reset_token'], token) def test_invalid_link_if_going_directly_to_the_final_reset_password_url(self): url, path = self._test_confirm_start() _, uuidb64, _ = path.strip('/').split('/') response = Client().get('/reset/%s/set-password/' % uuidb64) self.assertContains(response, 'The password reset link was invalid') @override_settings(AUTH_USER_MODEL='auth_tests.CustomUser') class CustomUserPasswordResetTest(AuthViewsTestCase): user_email = '[email protected]' @classmethod def setUpTestData(cls): cls.u1 = CustomUser.custom_objects.create( email='[email protected]', date_of_birth=datetime.date(1976, 11, 8), ) cls.u1.set_password('password') cls.u1.save() def setUp(self): self.client = PasswordResetConfirmClient() def _test_confirm_start(self): # Start by creating the email response = self.client.post('/password_reset/', {'email': self.user_email}) self.assertEqual(response.status_code, 302) self.assertEqual(len(mail.outbox), 1) return self._read_signup_email(mail.outbox[0]) def _read_signup_email(self, email): urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body) self.assertIsNotNone(urlmatch, "No URL found in sent email") return urlmatch[0], urlmatch[1] def test_confirm_valid_custom_user(self): url, path = self._test_confirm_start() response = self.client.get(path) # redirect to a 'complete' page: self.assertContains(response, "Please enter your new password") # then submit a new password response = self.client.post(path, { 'new_password1': 'anewpassword', 'new_password2': 'anewpassword', }) self.assertRedirects(response, '/reset/done/') @override_settings(AUTH_USER_MODEL='auth_tests.UUIDUser') class UUIDUserPasswordResetTest(CustomUserPasswordResetTest): def _test_confirm_start(self): # instead of fixture UUIDUser.objects.create_user( email=self.user_email, username='foo', password='foo', ) return super()._test_confirm_start() def test_confirm_invalid_uuid(self): """A uidb64 that decodes to a non-UUID doesn't crash.""" _, path = self._test_confirm_start() invalid_uidb64 = urlsafe_base64_encode(b'INVALID_UUID') first, _uuidb64_, second = path.strip('/').split('/') response = self.client.get('/' + '/'.join((first, invalid_uidb64, second)) + '/') self.assertContains(response, 'The password reset link was invalid') class ChangePasswordTest(AuthViewsTestCase): def fail_login(self): response = self.client.post('/login/', { 'username': 'testclient', 'password': 'password', }) self.assertFormError(response, AuthenticationForm.error_messages['invalid_login'] % { 'username': User._meta.get_field('username').verbose_name }) def logout(self): self.client.get('/logout/') def test_password_change_fails_with_invalid_old_password(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'donuts', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertFormError(response, PasswordChangeForm.error_messages['password_incorrect']) def test_password_change_fails_with_mismatched_passwords(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'donuts', }) self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch']) def test_password_change_succeeds(self): self.login() self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) self.fail_login() self.login(password='password1') def test_password_change_done_succeeds(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertRedirects(response, '/password_change/done/', fetch_redirect_response=False) @override_settings(LOGIN_URL='/login/') def test_password_change_done_fails(self): response = self.client.get('/password_change/done/') self.assertRedirects(response, '/login/?next=/password_change/done/', fetch_redirect_response=False) def test_password_change_redirect_default(self): self.login() response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertRedirects(response, '/password_change/done/', fetch_redirect_response=False) def test_password_change_redirect_custom(self): self.login() response = self.client.post('/password_change/custom/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertRedirects(response, '/custom/', fetch_redirect_response=False) def test_password_change_redirect_custom_named(self): self.login() response = self.client.post('/password_change/custom/named/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) self.assertRedirects(response, '/password_reset/', fetch_redirect_response=False) class SessionAuthenticationTests(AuthViewsTestCase): def test_user_password_change_updates_session(self): """ #21649 - Ensure contrib.auth.views.password_change updates the user's session auth hash after a password change so the session isn't logged out. """ self.login() original_session_key = self.client.session.session_key response = self.client.post('/password_change/', { 'old_password': 'password', 'new_password1': 'password1', 'new_password2': 'password1', }) # if the hash isn't updated, retrieving the redirection page will fail. self.assertRedirects(response, '/password_change/done/') # The session key is rotated. self.assertNotEqual(original_session_key, self.client.session.session_key) class LoginTest(AuthViewsTestCase): def test_current_site_in_context_after_login(self): response = self.client.get(reverse('login')) self.assertEqual(response.status_code, 200) if apps.is_installed('django.contrib.sites'): Site = apps.get_model('sites.Site') site = Site.objects.get_current() self.assertEqual(response.context['site'], site) self.assertEqual(response.context['site_name'], site.name) else: self.assertIsInstance(response.context['site'], RequestSite) self.assertIsInstance(response.context['form'], AuthenticationForm) def test_security_check(self): login_url = reverse('login') # These URLs should not pass the security check. bad_urls = ( 'http://example.com', 'http:///example.com', 'https://example.com', 'ftp://example.com', '///example.com', '//example.com', 'javascript:alert("XSS")', ) for bad_url in bad_urls: with self.subTest(bad_url=bad_url): nasty_url = '%(url)s?%(next)s=%(bad_url)s' % { 'url': login_url, 'next': REDIRECT_FIELD_NAME, 'bad_url': quote(bad_url), } response = self.client.post(nasty_url, { 'username': 'testclient', 'password': 'password', }) self.assertEqual(response.status_code, 302) self.assertNotIn(bad_url, response.url, '%s should be blocked' % bad_url) # These URLs should pass the security check. good_urls = ( '/view/?param=http://example.com', '/view/?param=https://example.com', '/view?param=ftp://example.com', 'view/?param=//example.com', 'https://testserver/', 'HTTPS://testserver/', '//testserver/', '/url%20with%20spaces/', ) for good_url in good_urls: with self.subTest(good_url=good_url): safe_url = '%(url)s?%(next)s=%(good_url)s' % { 'url': login_url, 'next': REDIRECT_FIELD_NAME, 'good_url': quote(good_url), } response = self.client.post(safe_url, { 'username': 'testclient', 'password': 'password', }) self.assertEqual(response.status_code, 302) self.assertIn(good_url, response.url, '%s should be allowed' % good_url) def test_security_check_https(self): login_url = reverse('login') non_https_next_url = 'http://testserver/path' not_secured_url = '%(url)s?%(next)s=%(next_url)s' % { 'url': login_url, 'next': REDIRECT_FIELD_NAME, 'next_url': quote(non_https_next_url), } post_data = { 'username': 'testclient', 'password': 'password', } response = self.client.post(not_secured_url, post_data, secure=True) self.assertEqual(response.status_code, 302) self.assertNotEqual(response.url, non_https_next_url) self.assertEqual(response.url, settings.LOGIN_REDIRECT_URL) def test_login_form_contains_request(self): # The custom authentication form for this login requires a request to # initialize it. response = self.client.post('/custom_request_auth_login/', { 'username': 'testclient', 'password': 'password', }) # The login was successful. self.assertRedirects(response, settings.LOGIN_REDIRECT_URL, fetch_redirect_response=False) def test_login_csrf_rotate(self): """ Makes sure that a login rotates the currently-used CSRF token. """ def get_response(request): return HttpResponse() # Do a GET to establish a CSRF token # The test client isn't used here as it's a test for middleware. req = HttpRequest() CsrfViewMiddleware(get_response).process_view(req, LoginView.as_view(), (), {}) # get_token() triggers CSRF token inclusion in the response get_token(req) resp = CsrfViewMiddleware(LoginView.as_view())(req) csrf_cookie = resp.cookies.get(settings.CSRF_COOKIE_NAME, None) token1 = csrf_cookie.coded_value # Prepare the POST request req = HttpRequest() req.COOKIES[settings.CSRF_COOKIE_NAME] = token1 req.method = "POST" req.POST = {'username': 'testclient', 'password': 'password', 'csrfmiddlewaretoken': token1} # Use POST request to log in SessionMiddleware(get_response).process_request(req) CsrfViewMiddleware(get_response).process_view(req, LoginView.as_view(), (), {}) req.META["SERVER_NAME"] = "testserver" # Required to have redirect work in login view req.META["SERVER_PORT"] = 80 resp = CsrfViewMiddleware(LoginView.as_view())(req) csrf_cookie = resp.cookies.get(settings.CSRF_COOKIE_NAME, None) token2 = csrf_cookie.coded_value # Check the CSRF token switched self.assertNotEqual(token1, token2) def test_session_key_flushed_on_login(self): """ To avoid reusing another user's session, ensure a new, empty session is created if the existing session corresponds to a different authenticated user. """ self.login() original_session_key = self.client.session.session_key self.login(username='staff') self.assertNotEqual(original_session_key, self.client.session.session_key) def test_session_key_flushed_on_login_after_password_change(self): """ As above, but same user logging in after a password change. """ self.login() original_session_key = self.client.session.session_key # If no password change, session key should not be flushed. self.login() self.assertEqual(original_session_key, self.client.session.session_key) user = User.objects.get(username='testclient') user.set_password('foobar') user.save() self.login(password='foobar') self.assertNotEqual(original_session_key, self.client.session.session_key) def test_legacy_session_key_flushed_on_login(self): # RemovedInDjango40Warning. user = User.objects.get(username='testclient') engine = import_module(settings.SESSION_ENGINE) session = engine.SessionStore() session[SESSION_KEY] = user.id session[HASH_SESSION_KEY] = user._legacy_get_session_auth_hash() session.save() original_session_key = session.session_key self.client.cookies[settings.SESSION_COOKIE_NAME] = original_session_key # Legacy session key is flushed on login. self.login() self.assertNotEqual(original_session_key, self.client.session.session_key) # Legacy session key is flushed after a password change. user.set_password('password_2') user.save() original_session_key = session.session_key self.client.cookies[settings.SESSION_COOKIE_NAME] = original_session_key self.login(password='password_2') self.assertNotEqual(original_session_key, self.client.session.session_key) def test_login_session_without_hash_session_key(self): """ Session without django.contrib.auth.HASH_SESSION_KEY should login without an exception. """ user = User.objects.get(username='testclient') engine = import_module(settings.SESSION_ENGINE) session = engine.SessionStore() session[SESSION_KEY] = user.id session.save() original_session_key = session.session_key self.client.cookies[settings.SESSION_COOKIE_NAME] = original_session_key self.login() self.assertNotEqual(original_session_key, self.client.session.session_key) class LoginURLSettings(AuthViewsTestCase): """Tests for settings.LOGIN_URL.""" def assertLoginURLEquals(self, url): response = self.client.get('/login_required/') self.assertRedirects(response, url, fetch_redirect_response=False) @override_settings(LOGIN_URL='/login/') def test_standard_login_url(self): self.assertLoginURLEquals('/login/?next=/login_required/') @override_settings(LOGIN_URL='login') def test_named_login_url(self): self.assertLoginURLEquals('/login/?next=/login_required/') @override_settings(LOGIN_URL='http://remote.example.com/login') def test_remote_login_url(self): quoted_next = quote('http://testserver/login_required/') expected = 'http://remote.example.com/login?next=%s' % quoted_next self.assertLoginURLEquals(expected) @override_settings(LOGIN_URL='https:///login/') def test_https_login_url(self): quoted_next = quote('http://testserver/login_required/') expected = 'https:///login/?next=%s' % quoted_next self.assertLoginURLEquals(expected) @override_settings(LOGIN_URL='/login/?pretty=1') def test_login_url_with_querystring(self): self.assertLoginURLEquals('/login/?pretty=1&next=/login_required/') @override_settings(LOGIN_URL='http://remote.example.com/login/?next=/default/') def test_remote_login_url_with_next_querystring(self): quoted_next = quote('http://testserver/login_required/') expected = 'http://remote.example.com/login/?next=%s' % quoted_next self.assertLoginURLEquals(expected) @override_settings(LOGIN_URL=reverse_lazy('login')) def test_lazy_login_url(self): self.assertLoginURLEquals('/login/?next=/login_required/') class LoginRedirectUrlTest(AuthViewsTestCase): """Tests for settings.LOGIN_REDIRECT_URL.""" def assertLoginRedirectURLEqual(self, url): response = self.login() self.assertRedirects(response, url, fetch_redirect_response=False) def test_default(self): self.assertLoginRedirectURLEqual('/accounts/profile/') @override_settings(LOGIN_REDIRECT_URL='/custom/') def test_custom(self): self.assertLoginRedirectURLEqual('/custom/') @override_settings(LOGIN_REDIRECT_URL='password_reset') def test_named(self): self.assertLoginRedirectURLEqual('/password_reset/') @override_settings(LOGIN_REDIRECT_URL='http://remote.example.com/welcome/') def test_remote(self): self.assertLoginRedirectURLEqual('http://remote.example.com/welcome/') class RedirectToLoginTests(AuthViewsTestCase): """Tests for the redirect_to_login view""" @override_settings(LOGIN_URL=reverse_lazy('login')) def test_redirect_to_login_with_lazy(self): login_redirect_response = redirect_to_login(next='/else/where/') expected = '/login/?next=/else/where/' self.assertEqual(expected, login_redirect_response.url) @override_settings(LOGIN_URL=reverse_lazy('login')) def test_redirect_to_login_with_lazy_and_unicode(self): login_redirect_response = redirect_to_login(next='/else/where/झ/') expected = '/login/?next=/else/where/%E0%A4%9D/' self.assertEqual(expected, login_redirect_response.url) class LogoutThenLoginTests(AuthViewsTestCase): """Tests for the logout_then_login view""" def confirm_logged_out(self): self.assertNotIn(SESSION_KEY, self.client.session) @override_settings(LOGIN_URL='/login/') def test_default_logout_then_login(self): self.login() req = HttpRequest() req.method = 'GET' req.session = self.client.session response = logout_then_login(req) self.confirm_logged_out() self.assertRedirects(response, '/login/', fetch_redirect_response=False) def test_logout_then_login_with_custom_login(self): self.login() req = HttpRequest() req.method = 'GET' req.session = self.client.session response = logout_then_login(req, login_url='/custom/') self.confirm_logged_out() self.assertRedirects(response, '/custom/', fetch_redirect_response=False) class LoginRedirectAuthenticatedUser(AuthViewsTestCase): dont_redirect_url = '/login/redirect_authenticated_user_default/' do_redirect_url = '/login/redirect_authenticated_user/' def test_default(self): """Stay on the login page by default.""" self.login() response = self.client.get(self.dont_redirect_url) self.assertEqual(response.status_code, 200) self.assertEqual(response.context['next'], '') def test_guest(self): """If not logged in, stay on the same page.""" response = self.client.get(self.do_redirect_url) self.assertEqual(response.status_code, 200) def test_redirect(self): """If logged in, go to default redirected URL.""" self.login() response = self.client.get(self.do_redirect_url) self.assertRedirects(response, '/accounts/profile/', fetch_redirect_response=False) @override_settings(LOGIN_REDIRECT_URL='/custom/') def test_redirect_url(self): """If logged in, go to custom redirected URL.""" self.login() response = self.client.get(self.do_redirect_url) self.assertRedirects(response, '/custom/', fetch_redirect_response=False) def test_redirect_param(self): """If next is specified as a GET parameter, go there.""" self.login() url = self.do_redirect_url + '?next=/custom_next/' response = self.client.get(url) self.assertRedirects(response, '/custom_next/', fetch_redirect_response=False) def test_redirect_loop(self): """ Detect a redirect loop if LOGIN_REDIRECT_URL is not correctly set, with and without custom parameters. """ self.login() msg = ( "Redirection loop for authenticated user detected. Check that " "your LOGIN_REDIRECT_URL doesn't point to a login page." ) with self.settings(LOGIN_REDIRECT_URL=self.do_redirect_url): with self.assertRaisesMessage(ValueError, msg): self.client.get(self.do_redirect_url) url = self.do_redirect_url + '?bla=2' with self.assertRaisesMessage(ValueError, msg): self.client.get(url) def test_permission_required_not_logged_in(self): # Not logged in ... with self.settings(LOGIN_URL=self.do_redirect_url): # redirected to login. response = self.client.get('/permission_required_redirect/', follow=True) self.assertEqual(response.status_code, 200) # exception raised. response = self.client.get('/permission_required_exception/', follow=True) self.assertEqual(response.status_code, 403) # redirected to login. response = self.client.get('/login_and_permission_required_exception/', follow=True) self.assertEqual(response.status_code, 200) def test_permission_required_logged_in(self): self.login() # Already logged in... with self.settings(LOGIN_URL=self.do_redirect_url): # redirect loop encountered. with self.assertRaisesMessage(RedirectCycleError, 'Redirect loop detected.'): self.client.get('/permission_required_redirect/', follow=True) # exception raised. response = self.client.get('/permission_required_exception/', follow=True) self.assertEqual(response.status_code, 403) # exception raised. response = self.client.get('/login_and_permission_required_exception/', follow=True) self.assertEqual(response.status_code, 403) class LoginSuccessURLAllowedHostsTest(AuthViewsTestCase): def test_success_url_allowed_hosts_same_host(self): response = self.client.post('/login/allowed_hosts/', { 'username': 'testclient', 'password': 'password', 'next': 'https://testserver/home', }) self.assertIn(SESSION_KEY, self.client.session) self.assertRedirects(response, 'https://testserver/home', fetch_redirect_response=False) def test_success_url_allowed_hosts_safe_host(self): response = self.client.post('/login/allowed_hosts/', { 'username': 'testclient', 'password': 'password', 'next': 'https://otherserver/home', }) self.assertIn(SESSION_KEY, self.client.session) self.assertRedirects(response, 'https://otherserver/home', fetch_redirect_response=False) def test_success_url_allowed_hosts_unsafe_host(self): response = self.client.post('/login/allowed_hosts/', { 'username': 'testclient', 'password': 'password', 'next': 'https://evil/home', }) self.assertIn(SESSION_KEY, self.client.session) self.assertRedirects(response, '/accounts/profile/', fetch_redirect_response=False) class LogoutTest(AuthViewsTestCase): def confirm_logged_out(self): self.assertNotIn(SESSION_KEY, self.client.session) def test_logout_default(self): "Logout without next_page option renders the default template" self.login() response = self.client.get('/logout/') self.assertContains(response, 'Logged out') self.confirm_logged_out() def test_logout_with_post(self): self.login() response = self.client.post('/logout/') self.assertContains(response, 'Logged out') self.confirm_logged_out() def test_14377(self): # Bug 14377 self.login() response = self.client.get('/logout/') self.assertIn('site', response.context) def test_logout_doesnt_cache(self): """ The logout() view should send "no-cache" headers for reasons described in #25490. """ response = self.client.get('/logout/') self.assertIn('no-store', response['Cache-Control']) def test_logout_with_overridden_redirect_url(self): # Bug 11223 self.login() response = self.client.get('/logout/next_page/') self.assertRedirects(response, '/somewhere/', fetch_redirect_response=False) response = self.client.get('/logout/next_page/?next=/login/') self.assertRedirects(response, '/login/', fetch_redirect_response=False) self.confirm_logged_out() def test_logout_with_next_page_specified(self): "Logout with next_page option given redirects to specified resource" self.login() response = self.client.get('/logout/next_page/') self.assertRedirects(response, '/somewhere/', fetch_redirect_response=False) self.confirm_logged_out() def test_logout_with_redirect_argument(self): "Logout with query string redirects to specified resource" self.login() response = self.client.get('/logout/?next=/login/') self.assertRedirects(response, '/login/', fetch_redirect_response=False) self.confirm_logged_out() def test_logout_with_custom_redirect_argument(self): "Logout with custom query string redirects to specified resource" self.login() response = self.client.get('/logout/custom_query/?follow=/somewhere/') self.assertRedirects(response, '/somewhere/', fetch_redirect_response=False) self.confirm_logged_out() def test_logout_with_named_redirect(self): "Logout resolves names or URLs passed as next_page." self.login() response = self.client.get('/logout/next_page/named/') self.assertRedirects(response, '/password_reset/', fetch_redirect_response=False) self.confirm_logged_out() def test_success_url_allowed_hosts_same_host(self): self.login() response = self.client.get('/logout/allowed_hosts/?next=https://testserver/') self.assertRedirects(response, 'https://testserver/', fetch_redirect_response=False) self.confirm_logged_out() def test_success_url_allowed_hosts_safe_host(self): self.login() response = self.client.get('/logout/allowed_hosts/?next=https://otherserver/') self.assertRedirects(response, 'https://otherserver/', fetch_redirect_response=False) self.confirm_logged_out() def test_success_url_allowed_hosts_unsafe_host(self): self.login() response = self.client.get('/logout/allowed_hosts/?next=https://evil/') self.assertRedirects(response, '/logout/allowed_hosts/', fetch_redirect_response=False) self.confirm_logged_out() def test_security_check(self): logout_url = reverse('logout') # These URLs should not pass the security check. bad_urls = ( 'http://example.com', 'http:///example.com', 'https://example.com', 'ftp://example.com', '///example.com', '//example.com', 'javascript:alert("XSS")', ) for bad_url in bad_urls: with self.subTest(bad_url=bad_url): nasty_url = '%(url)s?%(next)s=%(bad_url)s' % { 'url': logout_url, 'next': REDIRECT_FIELD_NAME, 'bad_url': quote(bad_url), } self.login() response = self.client.get(nasty_url) self.assertEqual(response.status_code, 302) self.assertNotIn(bad_url, response.url, '%s should be blocked' % bad_url) self.confirm_logged_out() # These URLs should pass the security check. good_urls = ( '/view/?param=http://example.com', '/view/?param=https://example.com', '/view?param=ftp://example.com', 'view/?param=//example.com', 'https://testserver/', 'HTTPS://testserver/', '//testserver/', '/url%20with%20spaces/', ) for good_url in good_urls: with self.subTest(good_url=good_url): safe_url = '%(url)s?%(next)s=%(good_url)s' % { 'url': logout_url, 'next': REDIRECT_FIELD_NAME, 'good_url': quote(good_url), } self.login() response = self.client.get(safe_url) self.assertEqual(response.status_code, 302) self.assertIn(good_url, response.url, '%s should be allowed' % good_url) self.confirm_logged_out() def test_security_check_https(self): logout_url = reverse('logout') non_https_next_url = 'http://testserver/' url = '%(url)s?%(next)s=%(next_url)s' % { 'url': logout_url, 'next': REDIRECT_FIELD_NAME, 'next_url': quote(non_https_next_url), } self.login() response = self.client.get(url, secure=True) self.assertRedirects(response, logout_url, fetch_redirect_response=False) self.confirm_logged_out() def test_logout_preserve_language(self): """Language is preserved after logout.""" self.login() self.client.post('/setlang/', {'language': 'pl'}) self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, 'pl') self.client.get('/logout/') self.assertEqual(self.client.cookies[settings.LANGUAGE_COOKIE_NAME].value, 'pl') @override_settings(LOGOUT_REDIRECT_URL='/custom/') def test_logout_redirect_url_setting(self): self.login() response = self.client.get('/logout/') self.assertRedirects(response, '/custom/', fetch_redirect_response=False) @override_settings(LOGOUT_REDIRECT_URL='logout') def test_logout_redirect_url_named_setting(self): self.login() response = self.client.get('/logout/') self.assertRedirects(response, '/logout/', fetch_redirect_response=False) def get_perm(Model, perm): ct = ContentType.objects.get_for_model(Model) return Permission.objects.get(content_type=ct, codename=perm) # Redirect in test_user_change_password will fail if session auth hash # isn't updated after password change (#21649) @override_settings(ROOT_URLCONF='auth_tests.urls_admin') class ChangelistTests(AuthViewsTestCase): @classmethod def setUpTestData(cls): super().setUpTestData() # Make me a superuser before logging in. User.objects.filter(username='testclient').update(is_staff=True, is_superuser=True) def setUp(self): self.login() # Get the latest last_login value. self.admin = User.objects.get(pk=self.u1.pk) def get_user_data(self, user): return { 'username': user.username, 'password': user.password, 'email': user.email, 'is_active': user.is_active, 'is_staff': user.is_staff, 'is_superuser': user.is_superuser, 'last_login_0': user.last_login.strftime('%Y-%m-%d'), 'last_login_1': user.last_login.strftime('%H:%M:%S'), 'initial-last_login_0': user.last_login.strftime('%Y-%m-%d'), 'initial-last_login_1': user.last_login.strftime('%H:%M:%S'), 'date_joined_0': user.date_joined.strftime('%Y-%m-%d'), 'date_joined_1': user.date_joined.strftime('%H:%M:%S'), 'initial-date_joined_0': user.date_joined.strftime('%Y-%m-%d'), 'initial-date_joined_1': user.date_joined.strftime('%H:%M:%S'), 'first_name': user.first_name, 'last_name': user.last_name, } # #20078 - users shouldn't be allowed to guess password hashes via # repeated password__startswith queries. def test_changelist_disallows_password_lookups(self): # A lookup that tries to filter on password isn't OK with self.assertLogs('django.security.DisallowedModelAdminLookup', 'ERROR'): response = self.client.get(reverse('auth_test_admin:auth_user_changelist') + '?password__startswith=sha1$') self.assertEqual(response.status_code, 400) def test_user_change_email(self): data = self.get_user_data(self.admin) data['email'] = 'new_' + data['email'] response = self.client.post( reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,)), data ) self.assertRedirects(response, reverse('auth_test_admin:auth_user_changelist')) row = LogEntry.objects.latest('id') self.assertEqual(row.get_change_message(), 'Changed Email address.') def test_user_not_change(self): response = self.client.post( reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,)), self.get_user_data(self.admin) ) self.assertRedirects(response, reverse('auth_test_admin:auth_user_changelist')) row = LogEntry.objects.latest('id') self.assertEqual(row.get_change_message(), 'No fields changed.') def test_user_change_password(self): user_change_url = reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,)) password_change_url = reverse('auth_test_admin:auth_user_password_change', args=(self.admin.pk,)) response = self.client.get(user_change_url) # Test the link inside password field help_text. rel_link = re.search( r'you can change the password using <a href="([^"]*)">this form</a>', response.content.decode() )[1] self.assertEqual(urljoin(user_change_url, rel_link), password_change_url) response = self.client.post( password_change_url, { 'password1': 'password1', 'password2': 'password1', } ) self.assertRedirects(response, user_change_url) row = LogEntry.objects.latest('id') self.assertEqual(row.get_change_message(), 'Changed password.') self.logout() self.login(password='password1') def test_user_change_different_user_password(self): u = User.objects.get(email='[email protected]') response = self.client.post( reverse('auth_test_admin:auth_user_password_change', args=(u.pk,)), { 'password1': 'password1', 'password2': 'password1', } ) self.assertRedirects(response, reverse('auth_test_admin:auth_user_change', args=(u.pk,))) row = LogEntry.objects.latest('id') self.assertEqual(row.user_id, self.admin.pk) self.assertEqual(row.object_id, str(u.pk)) self.assertEqual(row.get_change_message(), 'Changed password.') def test_password_change_bad_url(self): response = self.client.get(reverse('auth_test_admin:auth_user_password_change', args=('foobar',))) self.assertEqual(response.status_code, 404) @mock.patch('django.contrib.auth.admin.UserAdmin.has_change_permission') def test_user_change_password_passes_user_to_has_change_permission(self, has_change_permission): url = reverse('auth_test_admin:auth_user_password_change', args=(self.admin.pk,)) self.client.post(url, {'password1': 'password1', 'password2': 'password1'}) (_request, user), _kwargs = has_change_permission.call_args self.assertEqual(user.pk, self.admin.pk) def test_view_user_password_is_readonly(self): u = User.objects.get(username='testclient') u.is_superuser = False u.save() original_password = u.password u.user_permissions.add(get_perm(User, 'view_user')) response = self.client.get(reverse('auth_test_admin:auth_user_change', args=(u.pk,)),) algo, salt, hash_string = (u.password.split('$')) self.assertContains(response, '<div class="readonly">testclient</div>') # ReadOnlyPasswordHashWidget is used to render the field. self.assertContains( response, '<strong>algorithm</strong>: %s\n\n' '<strong>salt</strong>: %s**********\n\n' '<strong>hash</strong>: %s**************************\n\n' % ( algo, salt[:2], hash_string[:6], ), html=True, ) # Value in POST data is ignored. data = self.get_user_data(u) data['password'] = 'shouldnotchange' change_url = reverse('auth_test_admin:auth_user_change', args=(u.pk,)) response = self.client.post(change_url, data) self.assertEqual(response.status_code, 403) u.refresh_from_db() self.assertEqual(u.password, original_password) @override_settings( AUTH_USER_MODEL='auth_tests.UUIDUser', ROOT_URLCONF='auth_tests.urls_custom_user_admin', ) class UUIDUserTests(TestCase): def test_admin_password_change(self): u = UUIDUser.objects.create_superuser(username='uuid', email='[email protected]', password='test') self.assertTrue(self.client.login(username='uuid', password='test')) user_change_url = reverse('custom_user_admin:auth_tests_uuiduser_change', args=(u.pk,)) response = self.client.get(user_change_url) self.assertEqual(response.status_code, 200) password_change_url = reverse('custom_user_admin:auth_user_password_change', args=(u.pk,)) response = self.client.get(password_change_url) # The action attribute is omitted. self.assertContains(response, '<form method="post" id="uuiduser_form">') # A LogEntry is created with pk=1 which breaks a FK constraint on MySQL with connection.constraint_checks_disabled(): response = self.client.post(password_change_url, { 'password1': 'password1', 'password2': 'password1', }) self.assertRedirects(response, user_change_url) row = LogEntry.objects.latest('id') self.assertEqual(row.user_id, 1) # hardcoded in CustomUserAdmin.log_change() self.assertEqual(row.object_id, str(u.pk)) self.assertEqual(row.get_change_message(), 'Changed password.') # The LogEntry.user column isn't altered to a UUID type so it's set to # an integer manually in CustomUserAdmin to avoid an error. To avoid a # constraint error, delete the entry before constraints are checked # after the test. row.delete()
ed1c6c1319a041e1fc11d73d92e9247ddf520b53bb199838c665f588fc8b403a
import datetime import re from decimal import Decimal from unittest import skipIf from django.core.exceptions import FieldError from django.db import connection from django.db.models import ( Avg, Case, Count, DecimalField, DurationField, Exists, F, FloatField, Func, IntegerField, Max, Min, OuterRef, Subquery, Sum, Value, When, ) from django.db.models.functions import Coalesce from django.test import TestCase from django.test.testcases import skipUnlessDBFeature from django.test.utils import Approximate, CaptureQueriesContext from django.utils import timezone from .models import Author, Book, Publisher, Store class AggregateTestCase(TestCase): @classmethod def setUpTestData(cls): cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34) cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35) cls.a3 = Author.objects.create(name='Brad Dayley', age=45) cls.a4 = Author.objects.create(name='James Bennett', age=29) cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37) cls.a6 = Author.objects.create(name='Paul Bissex', age=29) cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25) cls.a8 = Author.objects.create(name='Peter Norvig', age=57) cls.a9 = Author.objects.create(name='Stuart Russell', age=46) cls.a1.friends.add(cls.a2, cls.a4) cls.a2.friends.add(cls.a1, cls.a7) cls.a4.friends.add(cls.a1) cls.a5.friends.add(cls.a6, cls.a7) cls.a6.friends.add(cls.a5, cls.a7) cls.a7.friends.add(cls.a2, cls.a5, cls.a6) cls.a8.friends.add(cls.a9) cls.a9.friends.add(cls.a8) cls.p1 = Publisher.objects.create(name='Apress', num_awards=3, duration=datetime.timedelta(days=1)) cls.p2 = Publisher.objects.create(name='Sams', num_awards=1, duration=datetime.timedelta(days=2)) cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7) cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9) cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0) cls.b1 = Book.objects.create( isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right', pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1, pubdate=datetime.date(2007, 12, 6) ) cls.b2 = Book.objects.create( isbn='067232959', name='Sams Teach Yourself Django in 24 Hours', pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2, pubdate=datetime.date(2008, 3, 3) ) cls.b3 = Book.objects.create( isbn='159059996', name='Practical Django Projects', pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1, pubdate=datetime.date(2008, 6, 23) ) cls.b4 = Book.objects.create( isbn='013235613', name='Python Web Development with Django', pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3, pubdate=datetime.date(2008, 11, 3) ) cls.b5 = Book.objects.create( isbn='013790395', name='Artificial Intelligence: A Modern Approach', pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3, pubdate=datetime.date(1995, 1, 15) ) cls.b6 = Book.objects.create( isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4, pubdate=datetime.date(1991, 10, 15) ) cls.b1.authors.add(cls.a1, cls.a2) cls.b2.authors.add(cls.a3) cls.b3.authors.add(cls.a4) cls.b4.authors.add(cls.a5, cls.a6, cls.a7) cls.b5.authors.add(cls.a8, cls.a9) cls.b6.authors.add(cls.a8) s1 = Store.objects.create( name='Amazon.com', original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42), friday_night_closing=datetime.time(23, 59, 59) ) s2 = Store.objects.create( name='Books.com', original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37), friday_night_closing=datetime.time(23, 59, 59) ) s3 = Store.objects.create( name="Mamma and Pappa's Books", original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14), friday_night_closing=datetime.time(21, 30) ) s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6) s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6) s3.books.add(cls.b3, cls.b4, cls.b6) def test_empty_aggregate(self): self.assertEqual(Author.objects.all().aggregate(), {}) def test_aggregate_in_order_by(self): msg = ( 'Using an aggregate in order_by() without also including it in ' 'annotate() is not allowed: Avg(F(book__rating)' ) with self.assertRaisesMessage(FieldError, msg): Author.objects.values('age').order_by(Avg('book__rating')) def test_single_aggregate(self): vals = Author.objects.aggregate(Avg("age")) self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)}) def test_multiple_aggregates(self): vals = Author.objects.aggregate(Sum("age"), Avg("age")) self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)}) def test_filter_aggregate(self): vals = Author.objects.filter(age__gt=29).aggregate(Sum("age")) self.assertEqual(vals, {'age__sum': 254}) def test_related_aggregate(self): vals = Author.objects.aggregate(Avg("friends__age")) self.assertEqual(vals, {'friends__age__avg': Approximate(34.07, places=2)}) vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age")) self.assertEqual(vals, {'authors__age__avg': Approximate(38.2857, places=2)}) vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating")) self.assertEqual(vals, {'book__rating__avg': 4.0}) vals = Book.objects.aggregate(Sum("publisher__num_awards")) self.assertEqual(vals, {'publisher__num_awards__sum': 30}) vals = Publisher.objects.aggregate(Sum("book__price")) self.assertEqual(vals, {'book__price__sum': Decimal('270.27')}) def test_aggregate_multi_join(self): vals = Store.objects.aggregate(Max("books__authors__age")) self.assertEqual(vals, {'books__authors__age__max': 57}) vals = Author.objects.aggregate(Min("book__publisher__num_awards")) self.assertEqual(vals, {'book__publisher__num_awards__min': 1}) def test_aggregate_alias(self): vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating")) self.assertEqual(vals, {'amazon_mean': Approximate(4.08, places=2)}) def test_annotate_basic(self): self.assertQuerysetEqual( Book.objects.annotate().order_by('pk'), [ "The Definitive Guide to Django: Web Development Done Right", "Sams Teach Yourself Django in 24 Hours", "Practical Django Projects", "Python Web Development with Django", "Artificial Intelligence: A Modern Approach", "Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp" ], lambda b: b.name ) books = Book.objects.annotate(mean_age=Avg("authors__age")) b = books.get(pk=self.b1.pk) self.assertEqual( b.name, 'The Definitive Guide to Django: Web Development Done Right' ) self.assertEqual(b.mean_age, 34.5) def test_annotate_defer(self): qs = Book.objects.annotate( page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk) rows = [ (self.b1.id, "159059725", 447, "The Definitive Guide to Django: Web Development Done Right") ] self.assertQuerysetEqual( qs.order_by('pk'), rows, lambda r: (r.id, r.isbn, r.page_sum, r.name) ) def test_annotate_defer_select_related(self): qs = Book.objects.select_related('contact').annotate( page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk) rows = [ (self.b1.id, "159059725", 447, "Adrian Holovaty", "The Definitive Guide to Django: Web Development Done Right") ] self.assertQuerysetEqual( qs.order_by('pk'), rows, lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name) ) def test_annotate_m2m(self): books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name") self.assertQuerysetEqual( books, [ ('Artificial Intelligence: A Modern Approach', 51.5), ('Practical Django Projects', 29.0), ('Python Web Development with Django', Approximate(30.3, places=1)), ('Sams Teach Yourself Django in 24 Hours', 45.0) ], lambda b: (b.name, b.authors__age__avg), ) books = Book.objects.annotate(num_authors=Count("authors")).order_by("name") self.assertQuerysetEqual( books, [ ('Artificial Intelligence: A Modern Approach', 2), ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1), ('Practical Django Projects', 1), ('Python Web Development with Django', 3), ('Sams Teach Yourself Django in 24 Hours', 1), ('The Definitive Guide to Django: Web Development Done Right', 2) ], lambda b: (b.name, b.num_authors) ) def test_backwards_m2m_annotate(self): authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name") self.assertQuerysetEqual( authors, [ ('Adrian Holovaty', 4.5), ('Brad Dayley', 3.0), ('Jacob Kaplan-Moss', 4.5), ('James Bennett', 4.0), ('Paul Bissex', 4.0), ('Stuart Russell', 4.0) ], lambda a: (a.name, a.book__rating__avg) ) authors = Author.objects.annotate(num_books=Count("book")).order_by("name") self.assertQuerysetEqual( authors, [ ('Adrian Holovaty', 1), ('Brad Dayley', 1), ('Jacob Kaplan-Moss', 1), ('James Bennett', 1), ('Jeffrey Forcier', 1), ('Paul Bissex', 1), ('Peter Norvig', 2), ('Stuart Russell', 1), ('Wesley J. Chun', 1) ], lambda a: (a.name, a.num_books) ) def test_reverse_fkey_annotate(self): books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name") self.assertQuerysetEqual( books, [ ('Artificial Intelligence: A Modern Approach', 7), ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9), ('Practical Django Projects', 3), ('Python Web Development with Django', 7), ('Sams Teach Yourself Django in 24 Hours', 1), ('The Definitive Guide to Django: Web Development Done Right', 3) ], lambda b: (b.name, b.publisher__num_awards__sum) ) publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name") self.assertQuerysetEqual( publishers, [ ('Apress', Decimal("59.69")), ("Jonno's House of Books", None), ('Morgan Kaufmann', Decimal("75.00")), ('Prentice Hall', Decimal("112.49")), ('Sams', Decimal("23.09")) ], lambda p: (p.name, p.book__price__sum) ) def test_annotate_values(self): books = list(Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values()) self.assertEqual( books, [ { "contact_id": self.a1.id, "id": self.b1.id, "isbn": "159059725", "mean_age": 34.5, "name": "The Definitive Guide to Django: Web Development Done Right", "pages": 447, "price": Approximate(Decimal("30")), "pubdate": datetime.date(2007, 12, 6), "publisher_id": self.p1.id, "rating": 4.5, } ] ) books = ( Book.objects .filter(pk=self.b1.pk) .annotate(mean_age=Avg('authors__age')) .values('pk', 'isbn', 'mean_age') ) self.assertEqual( list(books), [ { "pk": self.b1.pk, "isbn": "159059725", "mean_age": 34.5, } ] ) books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values("name") self.assertEqual( list(books), [{'name': 'The Definitive Guide to Django: Web Development Done Right'}], ) books = Book.objects.filter(pk=self.b1.pk).values().annotate(mean_age=Avg('authors__age')) self.assertEqual( list(books), [ { "contact_id": self.a1.id, "id": self.b1.id, "isbn": "159059725", "mean_age": 34.5, "name": "The Definitive Guide to Django: Web Development Done Right", "pages": 447, "price": Approximate(Decimal("30")), "pubdate": datetime.date(2007, 12, 6), "publisher_id": self.p1.id, "rating": 4.5, } ] ) books = ( Book.objects .values("rating") .annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age")) .order_by("rating") ) self.assertEqual( list(books), [ { "rating": 3.0, "n_authors": 1, "mean_age": 45.0, }, { "rating": 4.0, "n_authors": 6, "mean_age": Approximate(37.16, places=1) }, { "rating": 4.5, "n_authors": 2, "mean_age": 34.5, }, { "rating": 5.0, "n_authors": 1, "mean_age": 57.0, } ] ) authors = Author.objects.annotate(Avg("friends__age")).order_by("name") self.assertQuerysetEqual( authors, [ ('Adrian Holovaty', 32.0), ('Brad Dayley', None), ('Jacob Kaplan-Moss', 29.5), ('James Bennett', 34.0), ('Jeffrey Forcier', 27.0), ('Paul Bissex', 31.0), ('Peter Norvig', 46.0), ('Stuart Russell', 57.0), ('Wesley J. Chun', Approximate(33.66, places=1)) ], lambda a: (a.name, a.friends__age__avg) ) def test_count(self): vals = Book.objects.aggregate(Count("rating")) self.assertEqual(vals, {"rating__count": 6}) def test_count_star(self): with self.assertNumQueries(1) as ctx: Book.objects.aggregate(n=Count("*")) sql = ctx.captured_queries[0]['sql'] self.assertIn('SELECT COUNT(*) ', sql) def test_count_distinct_expression(self): aggs = Book.objects.aggregate( distinct_ratings=Count(Case(When(pages__gt=300, then='rating')), distinct=True), ) self.assertEqual(aggs['distinct_ratings'], 4) def test_distinct_on_aggregate(self): for aggregate, expected_result in ( (Avg, 4.125), (Count, 4), (Sum, 16.5), ): with self.subTest(aggregate=aggregate.__name__): books = Book.objects.aggregate(ratings=aggregate('rating', distinct=True)) self.assertEqual(books['ratings'], expected_result) def test_non_grouped_annotation_not_in_group_by(self): """ An annotation not included in values() before an aggregate should be excluded from the group by clause. """ qs = ( Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating') .annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count') ) self.assertEqual(list(qs), [{'rating': 4.0, 'count': 2}]) def test_grouped_annotation_in_group_by(self): """ An annotation included in values() before an aggregate should be included in the group by clause. """ qs = ( Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating', 'xprice') .annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count') ) self.assertEqual( list(qs), [ {'rating': 4.0, 'count': 1}, {'rating': 4.0, 'count': 2}, ] ) def test_fkey_aggregate(self): explicit = list(Author.objects.annotate(Count('book__id'))) implicit = list(Author.objects.annotate(Count('book'))) self.assertCountEqual(explicit, implicit) def test_annotate_ordering(self): books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating') self.assertEqual( list(books), [ {'rating': 4.5, 'oldest': 35}, {'rating': 3.0, 'oldest': 45}, {'rating': 4.0, 'oldest': 57}, {'rating': 5.0, 'oldest': 57}, ] ) books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating") self.assertEqual( list(books), [ {'rating': 5.0, 'oldest': 57}, {'rating': 4.0, 'oldest': 57}, {'rating': 3.0, 'oldest': 45}, {'rating': 4.5, 'oldest': 35}, ] ) def test_aggregate_annotation(self): vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors")) self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)}) def test_avg_duration_field(self): # Explicit `output_field`. self.assertEqual( Publisher.objects.aggregate(Avg('duration', output_field=DurationField())), {'duration__avg': datetime.timedelta(days=1, hours=12)} ) # Implicit `output_field`. self.assertEqual( Publisher.objects.aggregate(Avg('duration')), {'duration__avg': datetime.timedelta(days=1, hours=12)} ) def test_sum_duration_field(self): self.assertEqual( Publisher.objects.aggregate(Sum('duration', output_field=DurationField())), {'duration__sum': datetime.timedelta(days=3)} ) def test_sum_distinct_aggregate(self): """ Sum on a distinct() QuerySet should aggregate only the distinct items. """ authors = Author.objects.filter(book__in=[self.b5, self.b6]) self.assertEqual(authors.count(), 3) distinct_authors = authors.distinct() self.assertEqual(distinct_authors.count(), 2) # Selected author ages are 57 and 46 age_sum = distinct_authors.aggregate(Sum('age')) self.assertEqual(age_sum['age__sum'], 103) def test_filtering(self): p = Publisher.objects.create(name='Expensive Publisher', num_awards=0) Book.objects.create( name='ExpensiveBook1', pages=1, isbn='111', rating=3.5, price=Decimal("1000"), publisher=p, contact_id=self.a1.id, pubdate=datetime.date(2008, 12, 1) ) Book.objects.create( name='ExpensiveBook2', pages=1, isbn='222', rating=4.0, price=Decimal("1000"), publisher=p, contact_id=self.a1.id, pubdate=datetime.date(2008, 12, 2) ) Book.objects.create( name='ExpensiveBook3', pages=1, isbn='333', rating=4.5, price=Decimal("35"), publisher=p, contact_id=self.a1.id, pubdate=datetime.date(2008, 12, 3) ) publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk") self.assertQuerysetEqual( publishers, ['Apress', 'Prentice Hall', 'Expensive Publisher'], lambda p: p.name, ) publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk") self.assertQuerysetEqual( publishers, [ "Apress", "Apress", "Sams", "Prentice Hall", "Expensive Publisher", ], lambda p: p.name ) publishers = ( Publisher.objects .annotate(num_books=Count("book__id")) .filter(num_books__gt=1, book__price__lt=Decimal("40.0")) .order_by("pk") ) self.assertQuerysetEqual( publishers, ['Apress', 'Prentice Hall', 'Expensive Publisher'], lambda p: p.name, ) publishers = ( Publisher.objects .filter(book__price__lt=Decimal("40.0")) .annotate(num_books=Count("book__id")) .filter(num_books__gt=1) .order_by("pk") ) self.assertQuerysetEqual(publishers, ['Apress'], lambda p: p.name) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk") self.assertQuerysetEqual( publishers, [ "Apress", "Sams", "Prentice Hall", "Morgan Kaufmann", "Expensive Publisher", ], lambda p: p.name ) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk") self.assertQuerysetEqual( publishers, ['Apress', 'Sams', 'Prentice Hall', 'Morgan Kaufmann'], lambda p: p.name ) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk") self.assertQuerysetEqual( publishers, ['Sams', 'Morgan Kaufmann', 'Expensive Publisher'], lambda p: p.name, ) publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True) self.assertEqual(len(publishers), 0) def test_annotation(self): vals = Author.objects.filter(pk=self.a1.pk).aggregate(Count("friends__id")) self.assertEqual(vals, {"friends__id__count": 2}) books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__exact=2).order_by("pk") self.assertQuerysetEqual( books, [ "The Definitive Guide to Django: Web Development Done Right", "Artificial Intelligence: A Modern Approach", ], lambda b: b.name ) authors = ( Author.objects .annotate(num_friends=Count("friends__id", distinct=True)) .filter(num_friends=0) .order_by("pk") ) self.assertQuerysetEqual(authors, ['Brad Dayley'], lambda a: a.name) publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk") self.assertQuerysetEqual(publishers, ['Apress', 'Prentice Hall'], lambda p: p.name) publishers = ( Publisher.objects .filter(book__price__lt=Decimal("40.0")) .annotate(num_books=Count("book__id")) .filter(num_books__gt=1) ) self.assertQuerysetEqual(publishers, ['Apress'], lambda p: p.name) books = ( Book.objects .annotate(num_authors=Count("authors__id")) .filter(authors__name__contains="Norvig", num_authors__gt=1) ) self.assertQuerysetEqual( books, ['Artificial Intelligence: A Modern Approach'], lambda b: b.name ) def test_more_aggregation(self): a = Author.objects.get(name__contains='Norvig') b = Book.objects.get(name__contains='Done Right') b.authors.add(a) b.save() vals = ( Book.objects .annotate(num_authors=Count("authors__id")) .filter(authors__name__contains="Norvig", num_authors__gt=1) .aggregate(Avg("rating")) ) self.assertEqual(vals, {"rating__avg": 4.25}) def test_even_more_aggregate(self): publishers = Publisher.objects.annotate( earliest_book=Min("book__pubdate"), ).exclude(earliest_book=None).order_by("earliest_book").values( 'earliest_book', 'num_awards', 'id', 'name', ) self.assertEqual( list(publishers), [ { 'earliest_book': datetime.date(1991, 10, 15), 'num_awards': 9, 'id': self.p4.id, 'name': 'Morgan Kaufmann' }, { 'earliest_book': datetime.date(1995, 1, 15), 'num_awards': 7, 'id': self.p3.id, 'name': 'Prentice Hall' }, { 'earliest_book': datetime.date(2007, 12, 6), 'num_awards': 3, 'id': self.p1.id, 'name': 'Apress' }, { 'earliest_book': datetime.date(2008, 3, 3), 'num_awards': 1, 'id': self.p2.id, 'name': 'Sams' } ] ) vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening")) self.assertEqual( vals, { "friday_night_closing__max": datetime.time(23, 59, 59), "original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14), } ) def test_annotate_values_list(self): books = ( Book.objects .filter(pk=self.b1.pk) .annotate(mean_age=Avg("authors__age")) .values_list("pk", "isbn", "mean_age") ) self.assertEqual(list(books), [(self.b1.id, '159059725', 34.5)]) books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("isbn") self.assertEqual(list(books), [('159059725',)]) books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("mean_age") self.assertEqual(list(books), [(34.5,)]) books = ( Book.objects .filter(pk=self.b1.pk) .annotate(mean_age=Avg("authors__age")) .values_list("mean_age", flat=True) ) self.assertEqual(list(books), [34.5]) books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price") self.assertEqual( list(books), [ (Decimal("29.69"), 2), (Decimal('23.09'), 1), (Decimal('30'), 1), (Decimal('75'), 1), (Decimal('82.8'), 1), ] ) def test_dates_with_aggregation(self): """ .dates() returns a distinct set of dates when applied to a QuerySet with aggregation. Refs #18056. Previously, .dates() would return distinct (date_kind, aggregation) sets, in this case (year, num_authors), so 2008 would be returned twice because there are books from 2008 with a different number of authors. """ dates = Book.objects.annotate(num_authors=Count("authors")).dates('pubdate', 'year') self.assertQuerysetEqual( dates, [ "datetime.date(1991, 1, 1)", "datetime.date(1995, 1, 1)", "datetime.date(2007, 1, 1)", "datetime.date(2008, 1, 1)" ] ) def test_values_aggregation(self): # Refs #20782 max_rating = Book.objects.values('rating').aggregate(max_rating=Max('rating')) self.assertEqual(max_rating['max_rating'], 5) max_books_per_rating = Book.objects.values('rating').annotate( books_per_rating=Count('id') ).aggregate(Max('books_per_rating')) self.assertEqual( max_books_per_rating, {'books_per_rating__max': 3}) def test_ticket17424(self): """ Doing exclude() on a foreign model after annotate() doesn't crash. """ all_books = list(Book.objects.values_list('pk', flat=True).order_by('pk')) annotated_books = Book.objects.order_by('pk').annotate(one=Count("id")) # The value doesn't matter, we just need any negative # constraint on a related model that's a noop. excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__") # Try to generate query tree str(excluded_books.query) self.assertQuerysetEqual(excluded_books, all_books, lambda x: x.pk) # Check internal state self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type) self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type) def test_ticket12886(self): """ Aggregation over sliced queryset works correctly. """ qs = Book.objects.all().order_by('-rating')[0:3] vals = qs.aggregate(average_top3_rating=Avg('rating'))['average_top3_rating'] self.assertAlmostEqual(vals, 4.5, places=2) def test_ticket11881(self): """ Subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE or select_related() stuff. """ qs = Book.objects.all().select_for_update().order_by( 'pk').select_related('publisher').annotate(max_pk=Max('pk')) with CaptureQueriesContext(connection) as captured_queries: qs.aggregate(avg_pk=Avg('max_pk')) self.assertEqual(len(captured_queries), 1) qstr = captured_queries[0]['sql'].lower() self.assertNotIn('for update', qstr) forced_ordering = connection.ops.force_no_ordering() if forced_ordering: # If the backend needs to force an ordering we make sure it's # the only "ORDER BY" clause present in the query. self.assertEqual( re.findall(r'order by (\w+)', qstr), [', '.join(f[1][0] for f in forced_ordering).lower()] ) else: self.assertNotIn('order by', qstr) self.assertEqual(qstr.count(' join '), 0) def test_decimal_max_digits_has_no_effect(self): Book.objects.all().delete() a1 = Author.objects.first() p1 = Publisher.objects.first() thedate = timezone.now() for i in range(10): Book.objects.create( isbn="abcde{}".format(i), name="none", pages=10, rating=4.0, price=9999.98, contact=a1, publisher=p1, pubdate=thedate) book = Book.objects.aggregate(price_sum=Sum('price')) self.assertEqual(book['price_sum'], Decimal("99999.80")) def test_nonaggregate_aggregation_throws(self): with self.assertRaisesMessage(TypeError, 'fail is not an aggregate expression'): Book.objects.aggregate(fail=F('price')) def test_nonfield_annotation(self): book = Book.objects.annotate(val=Max(Value(2))).first() self.assertEqual(book.val, 2) book = Book.objects.annotate(val=Max(Value(2), output_field=IntegerField())).first() self.assertEqual(book.val, 2) book = Book.objects.annotate(val=Max(2, output_field=IntegerField())).first() self.assertEqual(book.val, 2) def test_annotation_expressions(self): authors = Author.objects.annotate(combined_ages=Sum(F('age') + F('friends__age'))).order_by('name') authors2 = Author.objects.annotate(combined_ages=Sum('age') + Sum('friends__age')).order_by('name') for qs in (authors, authors2): self.assertQuerysetEqual( qs, [ ('Adrian Holovaty', 132), ('Brad Dayley', None), ('Jacob Kaplan-Moss', 129), ('James Bennett', 63), ('Jeffrey Forcier', 128), ('Paul Bissex', 120), ('Peter Norvig', 103), ('Stuart Russell', 103), ('Wesley J. Chun', 176) ], lambda a: (a.name, a.combined_ages) ) def test_aggregation_expressions(self): a1 = Author.objects.aggregate(av_age=Sum('age') / Count('*')) a2 = Author.objects.aggregate(av_age=Sum('age') / Count('age')) a3 = Author.objects.aggregate(av_age=Avg('age')) self.assertEqual(a1, {'av_age': 37}) self.assertEqual(a2, {'av_age': 37}) self.assertEqual(a3, {'av_age': Approximate(37.4, places=1)}) def test_avg_decimal_field(self): v = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price')))['avg_price'] self.assertIsInstance(v, Decimal) self.assertEqual(v, Approximate(Decimal('47.39'), places=2)) def test_order_of_precedence(self): p1 = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price') + 2) * 3) self.assertEqual(p1, {'avg_price': Approximate(Decimal('148.18'), places=2)}) p2 = Book.objects.filter(rating=4).aggregate(avg_price=Avg('price') + 2 * 3) self.assertEqual(p2, {'avg_price': Approximate(Decimal('53.39'), places=2)}) def test_combine_different_types(self): msg = ( 'Expression contains mixed types: FloatField, DecimalField. ' 'You must set output_field.' ) qs = Book.objects.annotate(sums=Sum('rating') + Sum('pages') + Sum('price')) with self.assertRaisesMessage(FieldError, msg): qs.first() with self.assertRaisesMessage(FieldError, msg): qs.first() b1 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'), output_field=IntegerField())).get(pk=self.b4.pk) self.assertEqual(b1.sums, 383) b2 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'), output_field=FloatField())).get(pk=self.b4.pk) self.assertEqual(b2.sums, 383.69) b3 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'), output_field=DecimalField())).get(pk=self.b4.pk) self.assertEqual(b3.sums, Approximate(Decimal("383.69"), places=2)) def test_complex_aggregations_require_kwarg(self): with self.assertRaisesMessage(TypeError, 'Complex annotations require an alias'): Author.objects.annotate(Sum(F('age') + F('friends__age'))) with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'): Author.objects.aggregate(Sum('age') / Count('age')) with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'): Author.objects.aggregate(Sum(1)) def test_aggregate_over_complex_annotation(self): qs = Author.objects.annotate( combined_ages=Sum(F('age') + F('friends__age'))) age = qs.aggregate(max_combined_age=Max('combined_ages')) self.assertEqual(age['max_combined_age'], 176) age = qs.aggregate(max_combined_age_doubled=Max('combined_ages') * 2) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) age = qs.aggregate( max_combined_age_doubled=Max('combined_ages') + Max('combined_ages')) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) age = qs.aggregate( max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'), sum_combined_age=Sum('combined_ages')) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) self.assertEqual(age['sum_combined_age'], 954) age = qs.aggregate( max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'), sum_combined_age_doubled=Sum('combined_ages') + Sum('combined_ages')) self.assertEqual(age['max_combined_age_doubled'], 176 * 2) self.assertEqual(age['sum_combined_age_doubled'], 954 * 2) def test_values_annotation_with_expression(self): # ensure the F() is promoted to the group by clause qs = Author.objects.values('name').annotate(another_age=Sum('age') + F('age')) a = qs.get(name="Adrian Holovaty") self.assertEqual(a['another_age'], 68) qs = qs.annotate(friend_count=Count('friends')) a = qs.get(name="Adrian Holovaty") self.assertEqual(a['friend_count'], 2) qs = qs.annotate(combined_age=Sum('age') + F('friends__age')).filter( name="Adrian Holovaty").order_by('-combined_age') self.assertEqual( list(qs), [ { "name": 'Adrian Holovaty', "another_age": 68, "friend_count": 1, "combined_age": 69 }, { "name": 'Adrian Holovaty', "another_age": 68, "friend_count": 1, "combined_age": 63 } ] ) vals = qs.values('name', 'combined_age') self.assertEqual( list(vals), [ {'name': 'Adrian Holovaty', 'combined_age': 69}, {'name': 'Adrian Holovaty', 'combined_age': 63}, ] ) def test_annotate_values_aggregate(self): alias_age = Author.objects.annotate( age_alias=F('age') ).values( 'age_alias', ).aggregate(sum_age=Sum('age_alias')) age = Author.objects.values('age').aggregate(sum_age=Sum('age')) self.assertEqual(alias_age['sum_age'], age['sum_age']) def test_annotate_over_annotate(self): author = Author.objects.annotate( age_alias=F('age') ).annotate( sum_age=Sum('age_alias') ).get(name="Adrian Holovaty") other_author = Author.objects.annotate( sum_age=Sum('age') ).get(name="Adrian Holovaty") self.assertEqual(author.sum_age, other_author.sum_age) def test_annotated_aggregate_over_annotated_aggregate(self): with self.assertRaisesMessage(FieldError, "Cannot compute Sum('id__max'): 'id__max' is an aggregate"): Book.objects.annotate(Max('id')).annotate(Sum('id__max')) class MyMax(Max): def as_sql(self, compiler, connection): self.set_source_expressions(self.get_source_expressions()[0:1]) return super().as_sql(compiler, connection) with self.assertRaisesMessage(FieldError, "Cannot compute Max('id__max'): 'id__max' is an aggregate"): Book.objects.annotate(Max('id')).annotate(my_max=MyMax('id__max', 'price')) def test_multi_arg_aggregate(self): class MyMax(Max): output_field = DecimalField() def as_sql(self, compiler, connection): copy = self.copy() copy.set_source_expressions(copy.get_source_expressions()[0:1]) return super(MyMax, copy).as_sql(compiler, connection) with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'): Book.objects.aggregate(MyMax('pages', 'price')) with self.assertRaisesMessage(TypeError, 'Complex annotations require an alias'): Book.objects.annotate(MyMax('pages', 'price')) Book.objects.aggregate(max_field=MyMax('pages', 'price')) def test_add_implementation(self): class MySum(Sum): pass # test completely changing how the output is rendered def lower_case_function_override(self, compiler, connection): sql, params = compiler.compile(self.source_expressions[0]) substitutions = {'function': self.function.lower(), 'expressions': sql, 'distinct': ''} substitutions.update(self.extra) return self.template % substitutions, params setattr(MySum, 'as_' + connection.vendor, lower_case_function_override) qs = Book.objects.annotate( sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField()) ) self.assertEqual(str(qs.query).count('sum('), 1) b1 = qs.get(pk=self.b4.pk) self.assertEqual(b1.sums, 383) # test changing the dict and delegating def lower_case_function_super(self, compiler, connection): self.extra['function'] = self.function.lower() return super(MySum, self).as_sql(compiler, connection) setattr(MySum, 'as_' + connection.vendor, lower_case_function_super) qs = Book.objects.annotate( sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField()) ) self.assertEqual(str(qs.query).count('sum('), 1) b1 = qs.get(pk=self.b4.pk) self.assertEqual(b1.sums, 383) # test overriding all parts of the template def be_evil(self, compiler, connection): substitutions = {'function': 'MAX', 'expressions': '2', 'distinct': ''} substitutions.update(self.extra) return self.template % substitutions, () setattr(MySum, 'as_' + connection.vendor, be_evil) qs = Book.objects.annotate( sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField()) ) self.assertEqual(str(qs.query).count('MAX('), 1) b1 = qs.get(pk=self.b4.pk) self.assertEqual(b1.sums, 2) def test_complex_values_aggregation(self): max_rating = Book.objects.values('rating').aggregate( double_max_rating=Max('rating') + Max('rating')) self.assertEqual(max_rating['double_max_rating'], 5 * 2) max_books_per_rating = Book.objects.values('rating').annotate( books_per_rating=Count('id') + 5 ).aggregate(Max('books_per_rating')) self.assertEqual( max_books_per_rating, {'books_per_rating__max': 3 + 5}) def test_expression_on_aggregation(self): # Create a plain expression class Greatest(Func): function = 'GREATEST' def as_sqlite(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function='MAX', **extra_context) qs = Publisher.objects.annotate( price_or_median=Greatest(Avg('book__rating', output_field=DecimalField()), Avg('book__price')) ).filter(price_or_median__gte=F('num_awards')).order_by('num_awards') self.assertQuerysetEqual( qs, [1, 3, 7, 9], lambda v: v.num_awards) qs2 = Publisher.objects.annotate( rating_or_num_awards=Greatest(Avg('book__rating'), F('num_awards'), output_field=FloatField()) ).filter(rating_or_num_awards__gt=F('num_awards')).order_by('num_awards') self.assertQuerysetEqual( qs2, [1, 3], lambda v: v.num_awards) def test_arguments_must_be_expressions(self): msg = 'QuerySet.aggregate() received non-expression(s): %s.' with self.assertRaisesMessage(TypeError, msg % FloatField()): Book.objects.aggregate(FloatField()) with self.assertRaisesMessage(TypeError, msg % True): Book.objects.aggregate(is_book=True) with self.assertRaisesMessage(TypeError, msg % ', '.join([str(FloatField()), 'True'])): Book.objects.aggregate(FloatField(), Avg('price'), is_book=True) def test_aggregation_subquery_annotation(self): """Subquery annotations are excluded from the GROUP BY if they are not explicitly grouped against.""" latest_book_pubdate_qs = Book.objects.filter( publisher=OuterRef('pk') ).order_by('-pubdate').values('pubdate')[:1] publisher_qs = Publisher.objects.annotate( latest_book_pubdate=Subquery(latest_book_pubdate_qs), ).annotate(count=Count('book')) with self.assertNumQueries(1) as ctx: list(publisher_qs) self.assertEqual(ctx[0]['sql'].count('SELECT'), 2) # The GROUP BY should not be by alias either. self.assertEqual(ctx[0]['sql'].lower().count('latest_book_pubdate'), 1) def test_aggregation_subquery_annotation_exists(self): latest_book_pubdate_qs = Book.objects.filter( publisher=OuterRef('pk') ).order_by('-pubdate').values('pubdate')[:1] publisher_qs = Publisher.objects.annotate( latest_book_pubdate=Subquery(latest_book_pubdate_qs), count=Count('book'), ) self.assertTrue(publisher_qs.exists()) def test_aggregation_exists_annotation(self): published_books = Book.objects.filter(publisher=OuterRef('pk')) publisher_qs = Publisher.objects.annotate( published_book=Exists(published_books), count=Count('book'), ).values_list('name', flat=True) self.assertCountEqual(list(publisher_qs), [ 'Apress', 'Morgan Kaufmann', "Jonno's House of Books", 'Prentice Hall', 'Sams', ]) def test_aggregation_subquery_annotation_values(self): """ Subquery annotations and external aliases are excluded from the GROUP BY if they are not selected. """ books_qs = Book.objects.annotate( first_author_the_same_age=Subquery( Author.objects.filter( age=OuterRef('contact__friends__age'), ).order_by('age').values('id')[:1], ) ).filter( publisher=self.p1, first_author_the_same_age__isnull=False, ).annotate( min_age=Min('contact__friends__age'), ).values('name', 'min_age').order_by('name') self.assertEqual(list(books_qs), [ {'name': 'Practical Django Projects', 'min_age': 34}, { 'name': 'The Definitive Guide to Django: Web Development Done Right', 'min_age': 29, }, ]) def test_aggregation_subquery_annotation_values_collision(self): books_rating_qs = Book.objects.filter( publisher=OuterRef('pk'), price=Decimal('29.69'), ).values('rating') publisher_qs = Publisher.objects.filter( book__contact__age__gt=20, name=self.p1.name, ).annotate( rating=Subquery(books_rating_qs), contacts_count=Count('book__contact'), ).values('rating').annotate(total_count=Count('rating')) self.assertEqual(list(publisher_qs), [ {'rating': 4.0, 'total_count': 2}, ]) @skipUnlessDBFeature('supports_subqueries_in_group_by') @skipIf( connection.vendor == 'mysql' and 'ONLY_FULL_GROUP_BY' in connection.sql_mode, 'GROUP BY optimization does not work properly when ONLY_FULL_GROUP_BY ' 'mode is enabled on MySQL, see #31331.', ) def test_aggregation_subquery_annotation_multivalued(self): """ Subquery annotations must be included in the GROUP BY if they use potentially multivalued relations (contain the LOOKUP_SEP). """ subquery_qs = Author.objects.filter( pk=OuterRef('pk'), book__name=OuterRef('book__name'), ).values('pk') author_qs = Author.objects.annotate( subquery_id=Subquery(subquery_qs), ).annotate(count=Count('book')) self.assertEqual(author_qs.count(), Author.objects.count()) def test_aggregation_order_by_not_selected_annotation_values(self): result_asc = [ self.b4.pk, self.b3.pk, self.b1.pk, self.b2.pk, self.b5.pk, self.b6.pk, ] result_desc = result_asc[::-1] tests = [ ('min_related_age', result_asc), ('-min_related_age', result_desc), (F('min_related_age'), result_asc), (F('min_related_age').asc(), result_asc), (F('min_related_age').desc(), result_desc), ] for ordering, expected_result in tests: with self.subTest(ordering=ordering): books_qs = Book.objects.annotate( min_age=Min('authors__age'), ).annotate( min_related_age=Coalesce('min_age', 'contact__age'), ).order_by(ordering).values_list('pk', flat=True) self.assertEqual(list(books_qs), expected_result) @skipUnlessDBFeature('supports_subqueries_in_group_by') def test_group_by_subquery_annotation(self): """ Subquery annotations are included in the GROUP BY if they are grouped against. """ long_books_count_qs = Book.objects.filter( publisher=OuterRef('pk'), pages__gt=400, ).values( 'publisher' ).annotate(count=Count('pk')).values('count') long_books_count_breakdown = Publisher.objects.values_list( Subquery(long_books_count_qs, IntegerField()), ).annotate(total=Count('*')) self.assertEqual(dict(long_books_count_breakdown), {None: 1, 1: 4}) @skipUnlessDBFeature('supports_subqueries_in_group_by') def test_group_by_exists_annotation(self): """ Exists annotations are included in the GROUP BY if they are grouped against. """ long_books_qs = Book.objects.filter( publisher=OuterRef('pk'), pages__gt=800, ) has_long_books_breakdown = Publisher.objects.values_list( Exists(long_books_qs), ).annotate(total=Count('*')) self.assertEqual(dict(has_long_books_breakdown), {True: 2, False: 3}) @skipUnlessDBFeature('supports_subqueries_in_group_by') def test_aggregation_subquery_annotation_related_field(self): publisher = Publisher.objects.create(name=self.a9.name, num_awards=2) book = Book.objects.create( isbn='159059999', name='Test book.', pages=819, rating=2.5, price=Decimal('14.44'), contact=self.a9, publisher=publisher, pubdate=datetime.date(2019, 12, 6), ) book.authors.add(self.a5, self.a6, self.a7) books_qs = Book.objects.annotate( contact_publisher=Subquery( Publisher.objects.filter( pk=OuterRef('publisher'), name=OuterRef('contact__name'), ).values('name')[:1], ) ).filter( contact_publisher__isnull=False, ).annotate(count=Count('authors')) self.assertSequenceEqual(books_qs, [book]) # FIXME: GROUP BY doesn't need to include a subquery with # non-multivalued JOINs, see Col.possibly_multivalued (refs #31150): # with self.assertNumQueries(1) as ctx: # self.assertSequenceEqual(books_qs, [book]) # self.assertEqual(ctx[0]['sql'].count('SELECT'), 2)
8478152ebe5f2140003ca62fcfb6394f27441c1c602ab25ab0953a1c3e00a8c8
import os import unittest import warnings from io import StringIO from unittest import mock from django.conf import settings from django.contrib.staticfiles.finders import get_finder, get_finders from django.contrib.staticfiles.storage import staticfiles_storage from django.core.exceptions import ImproperlyConfigured from django.core.files.storage import default_storage from django.db import ( IntegrityError, connection, connections, models, router, transaction, ) from django.forms import EmailField, IntegerField from django.http import HttpResponse from django.template.loader import render_to_string from django.test import ( SimpleTestCase, TestCase, TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature, ) from django.test.html import HTMLParseError, parse_html from django.test.utils import ( CaptureQueriesContext, TestContextDecorator, isolate_apps, override_settings, setup_test_environment, ) from django.urls import NoReverseMatch, path, reverse, reverse_lazy from .models import Car, Person, PossessedCar from .views import empty_response class SkippingTestCase(SimpleTestCase): def _assert_skipping(self, func, expected_exc, msg=None): try: if msg is not None: with self.assertRaisesMessage(expected_exc, msg): func() else: with self.assertRaises(expected_exc): func() except unittest.SkipTest: self.fail('%s should not result in a skipped test.' % func.__name__) def test_skip_unless_db_feature(self): """ Testing the django.test.skipUnlessDBFeature decorator. """ # Total hack, but it works, just want an attribute that's always true. @skipUnlessDBFeature("__class__") def test_func(): raise ValueError @skipUnlessDBFeature("notprovided") def test_func2(): raise ValueError @skipUnlessDBFeature("__class__", "__class__") def test_func3(): raise ValueError @skipUnlessDBFeature("__class__", "notprovided") def test_func4(): raise ValueError self._assert_skipping(test_func, ValueError) self._assert_skipping(test_func2, unittest.SkipTest) self._assert_skipping(test_func3, ValueError) self._assert_skipping(test_func4, unittest.SkipTest) class SkipTestCase(SimpleTestCase): @skipUnlessDBFeature('missing') def test_foo(self): pass self._assert_skipping( SkipTestCase('test_foo').test_foo, ValueError, "skipUnlessDBFeature cannot be used on test_foo (test_utils.tests." "SkippingTestCase.test_skip_unless_db_feature.<locals>.SkipTestCase) " "as SkippingTestCase.test_skip_unless_db_feature.<locals>.SkipTestCase " "doesn't allow queries against the 'default' database." ) def test_skip_if_db_feature(self): """ Testing the django.test.skipIfDBFeature decorator. """ @skipIfDBFeature("__class__") def test_func(): raise ValueError @skipIfDBFeature("notprovided") def test_func2(): raise ValueError @skipIfDBFeature("__class__", "__class__") def test_func3(): raise ValueError @skipIfDBFeature("__class__", "notprovided") def test_func4(): raise ValueError @skipIfDBFeature("notprovided", "notprovided") def test_func5(): raise ValueError self._assert_skipping(test_func, unittest.SkipTest) self._assert_skipping(test_func2, ValueError) self._assert_skipping(test_func3, unittest.SkipTest) self._assert_skipping(test_func4, unittest.SkipTest) self._assert_skipping(test_func5, ValueError) class SkipTestCase(SimpleTestCase): @skipIfDBFeature('missing') def test_foo(self): pass self._assert_skipping( SkipTestCase('test_foo').test_foo, ValueError, "skipIfDBFeature cannot be used on test_foo (test_utils.tests." "SkippingTestCase.test_skip_if_db_feature.<locals>.SkipTestCase) " "as SkippingTestCase.test_skip_if_db_feature.<locals>.SkipTestCase " "doesn't allow queries against the 'default' database." ) class SkippingClassTestCase(TestCase): def test_skip_class_unless_db_feature(self): @skipUnlessDBFeature("__class__") class NotSkippedTests(TestCase): def test_dummy(self): return @skipUnlessDBFeature("missing") @skipIfDBFeature("__class__") class SkippedTests(TestCase): def test_will_be_skipped(self): self.fail("We should never arrive here.") @skipIfDBFeature("__dict__") class SkippedTestsSubclass(SkippedTests): pass test_suite = unittest.TestSuite() test_suite.addTest(NotSkippedTests('test_dummy')) try: test_suite.addTest(SkippedTests('test_will_be_skipped')) test_suite.addTest(SkippedTestsSubclass('test_will_be_skipped')) except unittest.SkipTest: self.fail('SkipTest should not be raised here.') result = unittest.TextTestRunner(stream=StringIO()).run(test_suite) self.assertEqual(result.testsRun, 3) self.assertEqual(len(result.skipped), 2) self.assertEqual(result.skipped[0][1], 'Database has feature(s) __class__') self.assertEqual(result.skipped[1][1], 'Database has feature(s) __class__') def test_missing_default_databases(self): @skipIfDBFeature('missing') class MissingDatabases(SimpleTestCase): def test_assertion_error(self): pass suite = unittest.TestSuite() try: suite.addTest(MissingDatabases('test_assertion_error')) except unittest.SkipTest: self.fail("SkipTest should not be raised at this stage") runner = unittest.TextTestRunner(stream=StringIO()) msg = ( "skipIfDBFeature cannot be used on <class 'test_utils.tests." "SkippingClassTestCase.test_missing_default_databases.<locals>." "MissingDatabases'> as it doesn't allow queries against the " "'default' database." ) with self.assertRaisesMessage(ValueError, msg): runner.run(suite) @override_settings(ROOT_URLCONF='test_utils.urls') class AssertNumQueriesTests(TestCase): def test_assert_num_queries(self): def test_func(): raise ValueError with self.assertRaises(ValueError): self.assertNumQueries(2, test_func) def test_assert_num_queries_with_client(self): person = Person.objects.create(name='test') self.assertNumQueries( 1, self.client.get, "/test_utils/get_person/%s/" % person.pk ) self.assertNumQueries( 1, self.client.get, "/test_utils/get_person/%s/" % person.pk ) def test_func(): self.client.get("/test_utils/get_person/%s/" % person.pk) self.client.get("/test_utils/get_person/%s/" % person.pk) self.assertNumQueries(2, test_func) @unittest.skipUnless( connection.vendor != 'sqlite' or not connection.is_in_memory_db(), 'For SQLite in-memory tests, closing the connection destroys the database.' ) class AssertNumQueriesUponConnectionTests(TransactionTestCase): available_apps = [] def test_ignores_connection_configuration_queries(self): real_ensure_connection = connection.ensure_connection connection.close() def make_configuration_query(): is_opening_connection = connection.connection is None real_ensure_connection() if is_opening_connection: # Avoid infinite recursion. Creating a cursor calls # ensure_connection() which is currently mocked by this method. with connection.cursor() as cursor: cursor.execute('SELECT 1' + connection.features.bare_select_suffix) ensure_connection = 'django.db.backends.base.base.BaseDatabaseWrapper.ensure_connection' with mock.patch(ensure_connection, side_effect=make_configuration_query): with self.assertNumQueries(1): list(Car.objects.all()) class AssertQuerysetEqualTests(TestCase): @classmethod def setUpTestData(cls): cls.p1 = Person.objects.create(name='p1') cls.p2 = Person.objects.create(name='p2') def test_ordered(self): self.assertQuerysetEqual( Person.objects.all().order_by('name'), [repr(self.p1), repr(self.p2)] ) def test_unordered(self): self.assertQuerysetEqual( Person.objects.all().order_by('name'), [repr(self.p2), repr(self.p1)], ordered=False ) def test_transform(self): self.assertQuerysetEqual( Person.objects.all().order_by('name'), [self.p1.pk, self.p2.pk], transform=lambda x: x.pk ) def test_undefined_order(self): # Using an unordered queryset with more than one ordered value # is an error. msg = 'Trying to compare non-ordered queryset against more than one ordered values' with self.assertRaisesMessage(ValueError, msg): self.assertQuerysetEqual( Person.objects.all(), [repr(self.p1), repr(self.p2)] ) # No error for one value. self.assertQuerysetEqual( Person.objects.filter(name='p1'), [repr(self.p1)] ) def test_repeated_values(self): """ assertQuerysetEqual checks the number of appearance of each item when used with option ordered=False. """ batmobile = Car.objects.create(name='Batmobile') k2000 = Car.objects.create(name='K 2000') PossessedCar.objects.bulk_create([ PossessedCar(car=batmobile, belongs_to=self.p1), PossessedCar(car=batmobile, belongs_to=self.p1), PossessedCar(car=k2000, belongs_to=self.p1), PossessedCar(car=k2000, belongs_to=self.p1), PossessedCar(car=k2000, belongs_to=self.p1), PossessedCar(car=k2000, belongs_to=self.p1), ]) with self.assertRaises(AssertionError): self.assertQuerysetEqual( self.p1.cars.all(), [repr(batmobile), repr(k2000)], ordered=False ) self.assertQuerysetEqual( self.p1.cars.all(), [repr(batmobile)] * 2 + [repr(k2000)] * 4, ordered=False ) @override_settings(ROOT_URLCONF='test_utils.urls') class CaptureQueriesContextManagerTests(TestCase): @classmethod def setUpTestData(cls): cls.person_pk = str(Person.objects.create(name='test').pk) def test_simple(self): with CaptureQueriesContext(connection) as captured_queries: Person.objects.get(pk=self.person_pk) self.assertEqual(len(captured_queries), 1) self.assertIn(self.person_pk, captured_queries[0]['sql']) with CaptureQueriesContext(connection) as captured_queries: pass self.assertEqual(0, len(captured_queries)) def test_within(self): with CaptureQueriesContext(connection) as captured_queries: Person.objects.get(pk=self.person_pk) self.assertEqual(len(captured_queries), 1) self.assertIn(self.person_pk, captured_queries[0]['sql']) def test_nested(self): with CaptureQueriesContext(connection) as captured_queries: Person.objects.count() with CaptureQueriesContext(connection) as nested_captured_queries: Person.objects.count() self.assertEqual(1, len(nested_captured_queries)) self.assertEqual(2, len(captured_queries)) def test_failure(self): with self.assertRaises(TypeError): with CaptureQueriesContext(connection): raise TypeError def test_with_client(self): with CaptureQueriesContext(connection) as captured_queries: self.client.get("/test_utils/get_person/%s/" % self.person_pk) self.assertEqual(len(captured_queries), 1) self.assertIn(self.person_pk, captured_queries[0]['sql']) with CaptureQueriesContext(connection) as captured_queries: self.client.get("/test_utils/get_person/%s/" % self.person_pk) self.assertEqual(len(captured_queries), 1) self.assertIn(self.person_pk, captured_queries[0]['sql']) with CaptureQueriesContext(connection) as captured_queries: self.client.get("/test_utils/get_person/%s/" % self.person_pk) self.client.get("/test_utils/get_person/%s/" % self.person_pk) self.assertEqual(len(captured_queries), 2) self.assertIn(self.person_pk, captured_queries[0]['sql']) self.assertIn(self.person_pk, captured_queries[1]['sql']) @override_settings(ROOT_URLCONF='test_utils.urls') class AssertNumQueriesContextManagerTests(TestCase): def test_simple(self): with self.assertNumQueries(0): pass with self.assertNumQueries(1): Person.objects.count() with self.assertNumQueries(2): Person.objects.count() Person.objects.count() def test_failure(self): msg = ( '1 != 2 : 1 queries executed, 2 expected\nCaptured queries were:\n' '1.' ) with self.assertRaisesMessage(AssertionError, msg): with self.assertNumQueries(2): Person.objects.count() with self.assertRaises(TypeError): with self.assertNumQueries(4000): raise TypeError def test_with_client(self): person = Person.objects.create(name="test") with self.assertNumQueries(1): self.client.get("/test_utils/get_person/%s/" % person.pk) with self.assertNumQueries(1): self.client.get("/test_utils/get_person/%s/" % person.pk) with self.assertNumQueries(2): self.client.get("/test_utils/get_person/%s/" % person.pk) self.client.get("/test_utils/get_person/%s/" % person.pk) @override_settings(ROOT_URLCONF='test_utils.urls') class AssertTemplateUsedContextManagerTests(SimpleTestCase): def test_usage(self): with self.assertTemplateUsed('template_used/base.html'): render_to_string('template_used/base.html') with self.assertTemplateUsed(template_name='template_used/base.html'): render_to_string('template_used/base.html') with self.assertTemplateUsed('template_used/base.html'): render_to_string('template_used/include.html') with self.assertTemplateUsed('template_used/base.html'): render_to_string('template_used/extends.html') with self.assertTemplateUsed('template_used/base.html'): render_to_string('template_used/base.html') render_to_string('template_used/base.html') def test_nested_usage(self): with self.assertTemplateUsed('template_used/base.html'): with self.assertTemplateUsed('template_used/include.html'): render_to_string('template_used/include.html') with self.assertTemplateUsed('template_used/extends.html'): with self.assertTemplateUsed('template_used/base.html'): render_to_string('template_used/extends.html') with self.assertTemplateUsed('template_used/base.html'): with self.assertTemplateUsed('template_used/alternative.html'): render_to_string('template_used/alternative.html') render_to_string('template_used/base.html') with self.assertTemplateUsed('template_used/base.html'): render_to_string('template_used/extends.html') with self.assertTemplateNotUsed('template_used/base.html'): render_to_string('template_used/alternative.html') render_to_string('template_used/base.html') def test_not_used(self): with self.assertTemplateNotUsed('template_used/base.html'): pass with self.assertTemplateNotUsed('template_used/alternative.html'): pass def test_error_message(self): msg = 'template_used/base.html was not rendered. No template was rendered.' with self.assertRaisesMessage(AssertionError, msg): with self.assertTemplateUsed('template_used/base.html'): pass with self.assertRaisesMessage(AssertionError, msg): with self.assertTemplateUsed(template_name='template_used/base.html'): pass msg2 = ( 'template_used/base.html was not rendered. Following templates ' 'were rendered: template_used/alternative.html' ) with self.assertRaisesMessage(AssertionError, msg2): with self.assertTemplateUsed('template_used/base.html'): render_to_string('template_used/alternative.html') with self.assertRaisesMessage(AssertionError, 'No templates used to render the response'): response = self.client.get('/test_utils/no_template_used/') self.assertTemplateUsed(response, 'template_used/base.html') def test_failure(self): msg = 'response and/or template_name argument must be provided' with self.assertRaisesMessage(TypeError, msg): with self.assertTemplateUsed(): pass msg = 'No templates used to render the response' with self.assertRaisesMessage(AssertionError, msg): with self.assertTemplateUsed(''): pass with self.assertRaisesMessage(AssertionError, msg): with self.assertTemplateUsed(''): render_to_string('template_used/base.html') with self.assertRaisesMessage(AssertionError, msg): with self.assertTemplateUsed(template_name=''): pass msg = ( 'template_used/base.html was not rendered. Following ' 'templates were rendered: template_used/alternative.html' ) with self.assertRaisesMessage(AssertionError, msg): with self.assertTemplateUsed('template_used/base.html'): render_to_string('template_used/alternative.html') def test_assert_used_on_http_response(self): response = HttpResponse() error_msg = ( 'assertTemplateUsed() and assertTemplateNotUsed() are only ' 'usable on responses fetched using the Django test Client.' ) with self.assertRaisesMessage(ValueError, error_msg): self.assertTemplateUsed(response, 'template.html') with self.assertRaisesMessage(ValueError, error_msg): self.assertTemplateNotUsed(response, 'template.html') class HTMLEqualTests(SimpleTestCase): def test_html_parser(self): element = parse_html('<div><p>Hello</p></div>') self.assertEqual(len(element.children), 1) self.assertEqual(element.children[0].name, 'p') self.assertEqual(element.children[0].children[0], 'Hello') parse_html('<p>') parse_html('<p attr>') dom = parse_html('<p>foo') self.assertEqual(len(dom.children), 1) self.assertEqual(dom.name, 'p') self.assertEqual(dom[0], 'foo') def test_parse_html_in_script(self): parse_html('<script>var a = "<p" + ">";</script>') parse_html(''' <script> var js_sha_link='<p>***</p>'; </script> ''') # script content will be parsed to text dom = parse_html(''' <script><p>foo</p> '</scr'+'ipt>' <span>bar</span></script> ''') self.assertEqual(len(dom.children), 1) self.assertEqual(dom.children[0], "<p>foo</p> '</scr'+'ipt>' <span>bar</span>") def test_self_closing_tags(self): self_closing_tags = [ 'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'link', 'meta', 'param', 'source', 'track', 'wbr', # Deprecated tags 'frame', 'spacer', ] for tag in self_closing_tags: with self.subTest(tag): dom = parse_html('<p>Hello <%s> world</p>' % tag) self.assertEqual(len(dom.children), 3) self.assertEqual(dom[0], 'Hello') self.assertEqual(dom[1].name, tag) self.assertEqual(dom[2], 'world') dom = parse_html('<p>Hello <%s /> world</p>' % tag) self.assertEqual(len(dom.children), 3) self.assertEqual(dom[0], 'Hello') self.assertEqual(dom[1].name, tag) self.assertEqual(dom[2], 'world') def test_simple_equal_html(self): self.assertHTMLEqual('', '') self.assertHTMLEqual('<p></p>', '<p></p>') self.assertHTMLEqual('<p></p>', ' <p> </p> ') self.assertHTMLEqual( '<div><p>Hello</p></div>', '<div><p>Hello</p></div>') self.assertHTMLEqual( '<div><p>Hello</p></div>', '<div> <p>Hello</p> </div>') self.assertHTMLEqual( '<div>\n<p>Hello</p></div>', '<div><p>Hello</p></div>\n') self.assertHTMLEqual( '<div><p>Hello\nWorld !</p></div>', '<div><p>Hello World\n!</p></div>') self.assertHTMLEqual( '<div><p>Hello\nWorld !</p></div>', '<div><p>Hello World\n!</p></div>') self.assertHTMLEqual( '<p>Hello World !</p>', '<p>Hello World\n\n!</p>') self.assertHTMLEqual('<p> </p>', '<p></p>') self.assertHTMLEqual('<p/>', '<p></p>') self.assertHTMLEqual('<p />', '<p></p>') self.assertHTMLEqual('<input checked>', '<input checked="checked">') self.assertHTMLEqual('<p>Hello', '<p> Hello') self.assertHTMLEqual('<p>Hello</p>World', '<p>Hello</p> World') def test_ignore_comments(self): self.assertHTMLEqual( '<div>Hello<!-- this is a comment --> World!</div>', '<div>Hello World!</div>') def test_unequal_html(self): self.assertHTMLNotEqual('<p>Hello</p>', '<p>Hello!</p>') self.assertHTMLNotEqual('<p>foo&#20;bar</p>', '<p>foo&nbsp;bar</p>') self.assertHTMLNotEqual('<p>foo bar</p>', '<p>foo &nbsp;bar</p>') self.assertHTMLNotEqual('<p>foo nbsp</p>', '<p>foo &nbsp;</p>') self.assertHTMLNotEqual('<p>foo #20</p>', '<p>foo &#20;</p>') self.assertHTMLNotEqual( '<p><span>Hello</span><span>World</span></p>', '<p><span>Hello</span>World</p>') self.assertHTMLNotEqual( '<p><span>Hello</span>World</p>', '<p><span>Hello</span><span>World</span></p>') def test_attributes(self): self.assertHTMLEqual( '<input type="text" id="id_name" />', '<input id="id_name" type="text" />') self.assertHTMLEqual( '''<input type='text' id="id_name" />''', '<input id="id_name" type="text" />') self.assertHTMLNotEqual( '<input type="text" id="id_name" />', '<input type="password" id="id_name" />') def test_class_attribute(self): pairs = [ ('<p class="foo bar"></p>', '<p class="bar foo"></p>'), ('<p class=" foo bar "></p>', '<p class="bar foo"></p>'), ('<p class=" foo bar "></p>', '<p class="bar foo"></p>'), ('<p class="foo\tbar"></p>', '<p class="bar foo"></p>'), ('<p class="\tfoo\tbar\t"></p>', '<p class="bar foo"></p>'), ('<p class="\t\t\tfoo\t\t\tbar\t\t\t"></p>', '<p class="bar foo"></p>'), ('<p class="\t \nfoo \t\nbar\n\t "></p>', '<p class="bar foo"></p>'), ] for html1, html2 in pairs: with self.subTest(html1): self.assertHTMLEqual(html1, html2) def test_normalize_refs(self): pairs = [ ('&#39;', '&#x27;'), ('&#39;', "'"), ('&#x27;', '&#39;'), ('&#x27;', "'"), ("'", '&#39;'), ("'", '&#x27;'), ('&amp;', '&#38;'), ('&amp;', '&#x26;'), ('&amp;', '&'), ('&#38;', '&amp;'), ('&#38;', '&#x26;'), ('&#38;', '&'), ('&#x26;', '&amp;'), ('&#x26;', '&#38;'), ('&#x26;', '&'), ('&', '&amp;'), ('&', '&#38;'), ('&', '&#x26;'), ] for pair in pairs: with self.subTest(repr(pair)): self.assertHTMLEqual(*pair) def test_complex_examples(self): self.assertHTMLEqual( """<tr><th><label for="id_first_name">First name:</label></th> <td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr> <tr><th><label for="id_last_name">Last name:</label></th> <td><input type="text" id="id_last_name" name="last_name" value="Lennon" /></td></tr> <tr><th><label for="id_birthday">Birthday:</label></th> <td><input type="text" value="1940-10-9" name="birthday" id="id_birthday" /></td></tr>""", """ <tr><th> <label for="id_first_name">First name:</label></th><td> <input type="text" name="first_name" value="John" id="id_first_name" /> </td></tr> <tr><th> <label for="id_last_name">Last name:</label></th><td> <input type="text" name="last_name" value="Lennon" id="id_last_name" /> </td></tr> <tr><th> <label for="id_birthday">Birthday:</label></th><td> <input type="text" name="birthday" value="1940-10-9" id="id_birthday" /> </td></tr> """) self.assertHTMLEqual( """<!DOCTYPE html> <html> <head> <link rel="stylesheet"> <title>Document</title> <meta attribute="value"> </head> <body> <p> This is a valid paragraph <div> this is a div AFTER the p</div> </body> </html>""", """ <html> <head> <link rel="stylesheet"> <title>Document</title> <meta attribute="value"> </head> <body> <p> This is a valid paragraph <!-- browsers would close the p tag here --> <div> this is a div AFTER the p</div> </p> <!-- this is invalid HTML parsing, but it should make no difference in most cases --> </body> </html>""") def test_html_contain(self): # equal html contains each other dom1 = parse_html('<p>foo') dom2 = parse_html('<p>foo</p>') self.assertIn(dom1, dom2) self.assertIn(dom2, dom1) dom2 = parse_html('<div><p>foo</p></div>') self.assertIn(dom1, dom2) self.assertNotIn(dom2, dom1) self.assertNotIn('<p>foo</p>', dom2) self.assertIn('foo', dom2) # when a root element is used ... dom1 = parse_html('<p>foo</p><p>bar</p>') dom2 = parse_html('<p>foo</p><p>bar</p>') self.assertIn(dom1, dom2) dom1 = parse_html('<p>foo</p>') self.assertIn(dom1, dom2) dom1 = parse_html('<p>bar</p>') self.assertIn(dom1, dom2) dom1 = parse_html('<div><p>foo</p><p>bar</p></div>') self.assertIn(dom2, dom1) def test_count(self): # equal html contains each other one time dom1 = parse_html('<p>foo') dom2 = parse_html('<p>foo</p>') self.assertEqual(dom1.count(dom2), 1) self.assertEqual(dom2.count(dom1), 1) dom2 = parse_html('<p>foo</p><p>bar</p>') self.assertEqual(dom2.count(dom1), 1) dom2 = parse_html('<p>foo foo</p><p>foo</p>') self.assertEqual(dom2.count('foo'), 3) dom2 = parse_html('<p class="bar">foo</p>') self.assertEqual(dom2.count('bar'), 0) self.assertEqual(dom2.count('class'), 0) self.assertEqual(dom2.count('p'), 0) self.assertEqual(dom2.count('o'), 2) dom2 = parse_html('<p>foo</p><p>foo</p>') self.assertEqual(dom2.count(dom1), 2) dom2 = parse_html('<div><p>foo<input type=""></p><p>foo</p></div>') self.assertEqual(dom2.count(dom1), 1) dom2 = parse_html('<div><div><p>foo</p></div></div>') self.assertEqual(dom2.count(dom1), 1) dom2 = parse_html('<p>foo<p>foo</p></p>') self.assertEqual(dom2.count(dom1), 1) dom2 = parse_html('<p>foo<p>bar</p></p>') self.assertEqual(dom2.count(dom1), 0) # html with a root element contains the same html with no root element dom1 = parse_html('<p>foo</p><p>bar</p>') dom2 = parse_html('<div><p>foo</p><p>bar</p></div>') self.assertEqual(dom2.count(dom1), 1) def test_parsing_errors(self): with self.assertRaises(AssertionError): self.assertHTMLEqual('<p>', '') with self.assertRaises(AssertionError): self.assertHTMLEqual('', '<p>') error_msg = ( "First argument is not valid HTML:\n" "('Unexpected end tag `div` (Line 1, Column 6)', (1, 6))" ) with self.assertRaisesMessage(AssertionError, error_msg): self.assertHTMLEqual('< div></ div>', '<div></div>') with self.assertRaises(HTMLParseError): parse_html('</p>') def test_contains_html(self): response = HttpResponse('''<body> This is a form: <form method="get"> <input type="text" name="Hello" /> </form></body>''') self.assertNotContains(response, "<input name='Hello' type='text'>") self.assertContains(response, '<form method="get">') self.assertContains(response, "<input name='Hello' type='text'>", html=True) self.assertNotContains(response, '<form method="get">', html=True) invalid_response = HttpResponse('''<body <bad>>''') with self.assertRaises(AssertionError): self.assertContains(invalid_response, '<p></p>') with self.assertRaises(AssertionError): self.assertContains(response, '<p "whats" that>') def test_unicode_handling(self): response = HttpResponse('<p class="help">Some help text for the title (with Unicode ŠĐĆŽćžšđ)</p>') self.assertContains( response, '<p class="help">Some help text for the title (with Unicode ŠĐĆŽćžšđ)</p>', html=True ) class JSONEqualTests(SimpleTestCase): def test_simple_equal(self): json1 = '{"attr1": "foo", "attr2":"baz"}' json2 = '{"attr1": "foo", "attr2":"baz"}' self.assertJSONEqual(json1, json2) def test_simple_equal_unordered(self): json1 = '{"attr1": "foo", "attr2":"baz"}' json2 = '{"attr2":"baz", "attr1": "foo"}' self.assertJSONEqual(json1, json2) def test_simple_equal_raise(self): json1 = '{"attr1": "foo", "attr2":"baz"}' json2 = '{"attr2":"baz"}' with self.assertRaises(AssertionError): self.assertJSONEqual(json1, json2) def test_equal_parsing_errors(self): invalid_json = '{"attr1": "foo, "attr2":"baz"}' valid_json = '{"attr1": "foo", "attr2":"baz"}' with self.assertRaises(AssertionError): self.assertJSONEqual(invalid_json, valid_json) with self.assertRaises(AssertionError): self.assertJSONEqual(valid_json, invalid_json) def test_simple_not_equal(self): json1 = '{"attr1": "foo", "attr2":"baz"}' json2 = '{"attr2":"baz"}' self.assertJSONNotEqual(json1, json2) def test_simple_not_equal_raise(self): json1 = '{"attr1": "foo", "attr2":"baz"}' json2 = '{"attr1": "foo", "attr2":"baz"}' with self.assertRaises(AssertionError): self.assertJSONNotEqual(json1, json2) def test_not_equal_parsing_errors(self): invalid_json = '{"attr1": "foo, "attr2":"baz"}' valid_json = '{"attr1": "foo", "attr2":"baz"}' with self.assertRaises(AssertionError): self.assertJSONNotEqual(invalid_json, valid_json) with self.assertRaises(AssertionError): self.assertJSONNotEqual(valid_json, invalid_json) class XMLEqualTests(SimpleTestCase): def test_simple_equal(self): xml1 = "<elem attr1='a' attr2='b' />" xml2 = "<elem attr1='a' attr2='b' />" self.assertXMLEqual(xml1, xml2) def test_simple_equal_unordered(self): xml1 = "<elem attr1='a' attr2='b' />" xml2 = "<elem attr2='b' attr1='a' />" self.assertXMLEqual(xml1, xml2) def test_simple_equal_raise(self): xml1 = "<elem attr1='a' />" xml2 = "<elem attr2='b' attr1='a' />" with self.assertRaises(AssertionError): self.assertXMLEqual(xml1, xml2) def test_simple_equal_raises_message(self): xml1 = "<elem attr1='a' />" xml2 = "<elem attr2='b' attr1='a' />" msg = '''{xml1} != {xml2} - <elem attr1='a' /> + <elem attr2='b' attr1='a' /> ? ++++++++++ '''.format(xml1=repr(xml1), xml2=repr(xml2)) with self.assertRaisesMessage(AssertionError, msg): self.assertXMLEqual(xml1, xml2) def test_simple_not_equal(self): xml1 = "<elem attr1='a' attr2='c' />" xml2 = "<elem attr1='a' attr2='b' />" self.assertXMLNotEqual(xml1, xml2) def test_simple_not_equal_raise(self): xml1 = "<elem attr1='a' attr2='b' />" xml2 = "<elem attr2='b' attr1='a' />" with self.assertRaises(AssertionError): self.assertXMLNotEqual(xml1, xml2) def test_parsing_errors(self): xml_unvalid = "<elem attr1='a attr2='b' />" xml2 = "<elem attr2='b' attr1='a' />" with self.assertRaises(AssertionError): self.assertXMLNotEqual(xml_unvalid, xml2) def test_comment_root(self): xml1 = "<?xml version='1.0'?><!-- comment1 --><elem attr1='a' attr2='b' />" xml2 = "<?xml version='1.0'?><!-- comment2 --><elem attr2='b' attr1='a' />" self.assertXMLEqual(xml1, xml2) def test_simple_equal_with_leading_or_trailing_whitespace(self): xml1 = "<elem>foo</elem> \t\n" xml2 = " \t\n<elem>foo</elem>" self.assertXMLEqual(xml1, xml2) def test_simple_not_equal_with_whitespace_in_the_middle(self): xml1 = "<elem>foo</elem><elem>bar</elem>" xml2 = "<elem>foo</elem> <elem>bar</elem>" self.assertXMLNotEqual(xml1, xml2) def test_doctype_root(self): xml1 = '<?xml version="1.0"?><!DOCTYPE root SYSTEM "example1.dtd"><root />' xml2 = '<?xml version="1.0"?><!DOCTYPE root SYSTEM "example2.dtd"><root />' self.assertXMLEqual(xml1, xml2) def test_processing_instruction(self): xml1 = ( '<?xml version="1.0"?>' '<?xml-model href="http://www.example1.com"?><root />' ) xml2 = ( '<?xml version="1.0"?>' '<?xml-model href="http://www.example2.com"?><root />' ) self.assertXMLEqual(xml1, xml2) self.assertXMLEqual( '<?xml-stylesheet href="style1.xslt" type="text/xsl"?><root />', '<?xml-stylesheet href="style2.xslt" type="text/xsl"?><root />', ) class SkippingExtraTests(TestCase): fixtures = ['should_not_be_loaded.json'] # HACK: This depends on internals of our TestCase subclasses def __call__(self, result=None): # Detect fixture loading by counting SQL queries, should be zero with self.assertNumQueries(0): super().__call__(result) @unittest.skip("Fixture loading should not be performed for skipped tests.") def test_fixtures_are_skipped(self): pass class AssertRaisesMsgTest(SimpleTestCase): def test_assert_raises_message(self): msg = "'Expected message' not found in 'Unexpected message'" # context manager form of assertRaisesMessage() with self.assertRaisesMessage(AssertionError, msg): with self.assertRaisesMessage(ValueError, "Expected message"): raise ValueError("Unexpected message") # callable form def func(): raise ValueError("Unexpected message") with self.assertRaisesMessage(AssertionError, msg): self.assertRaisesMessage(ValueError, "Expected message", func) def test_special_re_chars(self): """assertRaisesMessage shouldn't interpret RE special chars.""" def func1(): raise ValueError("[.*x+]y?") with self.assertRaisesMessage(ValueError, "[.*x+]y?"): func1() class AssertWarnsMessageTests(SimpleTestCase): def test_context_manager(self): with self.assertWarnsMessage(UserWarning, 'Expected message'): warnings.warn('Expected message', UserWarning) def test_context_manager_failure(self): msg = "Expected message' not found in 'Unexpected message'" with self.assertRaisesMessage(AssertionError, msg): with self.assertWarnsMessage(UserWarning, 'Expected message'): warnings.warn('Unexpected message', UserWarning) def test_callable(self): def func(): warnings.warn('Expected message', UserWarning) self.assertWarnsMessage(UserWarning, 'Expected message', func) def test_special_re_chars(self): def func1(): warnings.warn('[.*x+]y?', UserWarning) with self.assertWarnsMessage(UserWarning, '[.*x+]y?'): func1() class AssertFieldOutputTests(SimpleTestCase): def test_assert_field_output(self): error_invalid = ['Enter a valid email address.'] self.assertFieldOutput(EmailField, {'[email protected]': '[email protected]'}, {'aaa': error_invalid}) with self.assertRaises(AssertionError): self.assertFieldOutput(EmailField, {'[email protected]': '[email protected]'}, {'aaa': error_invalid + ['Another error']}) with self.assertRaises(AssertionError): self.assertFieldOutput(EmailField, {'[email protected]': 'Wrong output'}, {'aaa': error_invalid}) with self.assertRaises(AssertionError): self.assertFieldOutput( EmailField, {'[email protected]': '[email protected]'}, {'aaa': ['Come on, gimme some well formatted data, dude.']} ) def test_custom_required_message(self): class MyCustomField(IntegerField): default_error_messages = { 'required': 'This is really required.', } self.assertFieldOutput(MyCustomField, {}, {}, empty_value=None) @override_settings(ROOT_URLCONF='test_utils.urls') class AssertURLEqualTests(SimpleTestCase): def test_equal(self): valid_tests = ( ('http://example.com/?', 'http://example.com/'), ('http://example.com/?x=1&', 'http://example.com/?x=1'), ('http://example.com/?x=1&y=2', 'http://example.com/?y=2&x=1'), ('http://example.com/?x=1&y=2', 'http://example.com/?y=2&x=1'), ('http://example.com/?x=1&y=2&a=1&a=2', 'http://example.com/?a=1&a=2&y=2&x=1'), ('/path/to/?x=1&y=2&z=3', '/path/to/?z=3&y=2&x=1'), ('?x=1&y=2&z=3', '?z=3&y=2&x=1'), ('/test_utils/no_template_used/', reverse_lazy('no_template_used')), ) for url1, url2 in valid_tests: with self.subTest(url=url1): self.assertURLEqual(url1, url2) def test_not_equal(self): invalid_tests = ( # Protocol must be the same. ('http://example.com/', 'https://example.com/'), ('http://example.com/?x=1&x=2', 'https://example.com/?x=2&x=1'), ('http://example.com/?x=1&y=bar&x=2', 'https://example.com/?y=bar&x=2&x=1'), # Parameters of the same name must be in the same order. ('/path/to?a=1&a=2', '/path/to/?a=2&a=1') ) for url1, url2 in invalid_tests: with self.subTest(url=url1), self.assertRaises(AssertionError): self.assertURLEqual(url1, url2) def test_message(self): msg = ( "Expected 'http://example.com/?x=1&x=2' to equal " "'https://example.com/?x=2&x=1'" ) with self.assertRaisesMessage(AssertionError, msg): self.assertURLEqual('http://example.com/?x=1&x=2', 'https://example.com/?x=2&x=1') def test_msg_prefix(self): msg = ( "Prefix: Expected 'http://example.com/?x=1&x=2' to equal " "'https://example.com/?x=2&x=1'" ) with self.assertRaisesMessage(AssertionError, msg): self.assertURLEqual( 'http://example.com/?x=1&x=2', 'https://example.com/?x=2&x=1', msg_prefix='Prefix: ', ) class FirstUrls: urlpatterns = [path('first/', empty_response, name='first')] class SecondUrls: urlpatterns = [path('second/', empty_response, name='second')] class SetupTestEnvironmentTests(SimpleTestCase): def test_setup_test_environment_calling_more_than_once(self): with self.assertRaisesMessage(RuntimeError, "setup_test_environment() was already called"): setup_test_environment() def test_allowed_hosts(self): for type_ in (list, tuple): with self.subTest(type_=type_): allowed_hosts = type_('*') with mock.patch('django.test.utils._TestState') as x: del x.saved_data with self.settings(ALLOWED_HOSTS=allowed_hosts): setup_test_environment() self.assertEqual(settings.ALLOWED_HOSTS, ['*', 'testserver']) class OverrideSettingsTests(SimpleTestCase): # #21518 -- If neither override_settings nor a setting_changed receiver # clears the URL cache between tests, then one of test_first or # test_second will fail. @override_settings(ROOT_URLCONF=FirstUrls) def test_urlconf_first(self): reverse('first') @override_settings(ROOT_URLCONF=SecondUrls) def test_urlconf_second(self): reverse('second') def test_urlconf_cache(self): with self.assertRaises(NoReverseMatch): reverse('first') with self.assertRaises(NoReverseMatch): reverse('second') with override_settings(ROOT_URLCONF=FirstUrls): self.client.get(reverse('first')) with self.assertRaises(NoReverseMatch): reverse('second') with override_settings(ROOT_URLCONF=SecondUrls): with self.assertRaises(NoReverseMatch): reverse('first') self.client.get(reverse('second')) self.client.get(reverse('first')) with self.assertRaises(NoReverseMatch): reverse('second') with self.assertRaises(NoReverseMatch): reverse('first') with self.assertRaises(NoReverseMatch): reverse('second') def test_override_media_root(self): """ Overriding the MEDIA_ROOT setting should be reflected in the base_location attribute of django.core.files.storage.default_storage. """ self.assertEqual(default_storage.base_location, '') with self.settings(MEDIA_ROOT='test_value'): self.assertEqual(default_storage.base_location, 'test_value') def test_override_media_url(self): """ Overriding the MEDIA_URL setting should be reflected in the base_url attribute of django.core.files.storage.default_storage. """ self.assertEqual(default_storage.base_location, '') with self.settings(MEDIA_URL='/test_value/'): self.assertEqual(default_storage.base_url, '/test_value/') def test_override_file_upload_permissions(self): """ Overriding the FILE_UPLOAD_PERMISSIONS setting should be reflected in the file_permissions_mode attribute of django.core.files.storage.default_storage. """ self.assertEqual(default_storage.file_permissions_mode, 0o644) with self.settings(FILE_UPLOAD_PERMISSIONS=0o777): self.assertEqual(default_storage.file_permissions_mode, 0o777) def test_override_file_upload_directory_permissions(self): """ Overriding the FILE_UPLOAD_DIRECTORY_PERMISSIONS setting should be reflected in the directory_permissions_mode attribute of django.core.files.storage.default_storage. """ self.assertIsNone(default_storage.directory_permissions_mode) with self.settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777): self.assertEqual(default_storage.directory_permissions_mode, 0o777) def test_override_database_routers(self): """ Overriding DATABASE_ROUTERS should update the master router. """ test_routers = [object()] with self.settings(DATABASE_ROUTERS=test_routers): self.assertEqual(router.routers, test_routers) def test_override_static_url(self): """ Overriding the STATIC_URL setting should be reflected in the base_url attribute of django.contrib.staticfiles.storage.staticfiles_storage. """ with self.settings(STATIC_URL='/test/'): self.assertEqual(staticfiles_storage.base_url, '/test/') def test_override_static_root(self): """ Overriding the STATIC_ROOT setting should be reflected in the location attribute of django.contrib.staticfiles.storage.staticfiles_storage. """ with self.settings(STATIC_ROOT='/tmp/test'): self.assertEqual(staticfiles_storage.location, os.path.abspath('/tmp/test')) def test_override_staticfiles_storage(self): """ Overriding the STATICFILES_STORAGE setting should be reflected in the value of django.contrib.staticfiles.storage.staticfiles_storage. """ new_class = 'ManifestStaticFilesStorage' new_storage = 'django.contrib.staticfiles.storage.' + new_class with self.settings(STATICFILES_STORAGE=new_storage): self.assertEqual(staticfiles_storage.__class__.__name__, new_class) def test_override_staticfiles_finders(self): """ Overriding the STATICFILES_FINDERS setting should be reflected in the return value of django.contrib.staticfiles.finders.get_finders. """ current = get_finders() self.assertGreater(len(list(current)), 1) finders = ['django.contrib.staticfiles.finders.FileSystemFinder'] with self.settings(STATICFILES_FINDERS=finders): self.assertEqual(len(list(get_finders())), len(finders)) def test_override_staticfiles_dirs(self): """ Overriding the STATICFILES_DIRS setting should be reflected in the locations attribute of the django.contrib.staticfiles.finders.FileSystemFinder instance. """ finder = get_finder('django.contrib.staticfiles.finders.FileSystemFinder') test_path = '/tmp/test' expected_location = ('', test_path) self.assertNotIn(expected_location, finder.locations) with self.settings(STATICFILES_DIRS=[test_path]): finder = get_finder('django.contrib.staticfiles.finders.FileSystemFinder') self.assertIn(expected_location, finder.locations) class TestBadSetUpTestData(TestCase): """ An exception in setUpTestData() shouldn't leak a transaction which would cascade across the rest of the test suite. """ class MyException(Exception): pass @classmethod def setUpClass(cls): try: super().setUpClass() except cls.MyException: cls._in_atomic_block = connection.in_atomic_block @classmethod def tearDownClass(Cls): # override to avoid a second cls._rollback_atomics() which would fail. # Normal setUpClass() methods won't have exception handling so this # method wouldn't typically be run. pass @classmethod def setUpTestData(cls): # Simulate a broken setUpTestData() method. raise cls.MyException() def test_failure_in_setUpTestData_should_rollback_transaction(self): # setUpTestData() should call _rollback_atomics() so that the # transaction doesn't leak. self.assertFalse(self._in_atomic_block) class CaptureOnCommitCallbacksTests(TestCase): databases = {'default', 'other'} callback_called = False def enqueue_callback(self, using='default'): def hook(): self.callback_called = True transaction.on_commit(hook, using=using) def test_no_arguments(self): with self.captureOnCommitCallbacks() as callbacks: self.enqueue_callback() self.assertEqual(len(callbacks), 1) self.assertIs(self.callback_called, False) callbacks[0]() self.assertIs(self.callback_called, True) def test_using(self): with self.captureOnCommitCallbacks(using='other') as callbacks: self.enqueue_callback(using='other') self.assertEqual(len(callbacks), 1) self.assertIs(self.callback_called, False) callbacks[0]() self.assertIs(self.callback_called, True) def test_different_using(self): with self.captureOnCommitCallbacks(using='default') as callbacks: self.enqueue_callback(using='other') self.assertEqual(callbacks, []) def test_execute(self): with self.captureOnCommitCallbacks(execute=True) as callbacks: self.enqueue_callback() self.assertEqual(len(callbacks), 1) self.assertIs(self.callback_called, True) def test_pre_callback(self): def pre_hook(): pass transaction.on_commit(pre_hook, using='default') with self.captureOnCommitCallbacks() as callbacks: self.enqueue_callback() self.assertEqual(len(callbacks), 1) self.assertNotEqual(callbacks[0], pre_hook) def test_with_rolled_back_savepoint(self): with self.captureOnCommitCallbacks() as callbacks: try: with transaction.atomic(): self.enqueue_callback() raise IntegrityError except IntegrityError: # Inner transaction.atomic() has been rolled back. pass self.assertEqual(callbacks, []) class DisallowedDatabaseQueriesTests(SimpleTestCase): def test_disallowed_database_connections(self): expected_message = ( "Database connections to 'default' are not allowed in SimpleTestCase " "subclasses. Either subclass TestCase or TransactionTestCase to " "ensure proper test isolation or add 'default' to " "test_utils.tests.DisallowedDatabaseQueriesTests.databases to " "silence this failure." ) with self.assertRaisesMessage(AssertionError, expected_message): connection.connect() with self.assertRaisesMessage(AssertionError, expected_message): connection.temporary_connection() def test_disallowed_database_queries(self): expected_message = ( "Database queries to 'default' are not allowed in SimpleTestCase " "subclasses. Either subclass TestCase or TransactionTestCase to " "ensure proper test isolation or add 'default' to " "test_utils.tests.DisallowedDatabaseQueriesTests.databases to " "silence this failure." ) with self.assertRaisesMessage(AssertionError, expected_message): Car.objects.first() def test_disallowed_database_chunked_cursor_queries(self): expected_message = ( "Database queries to 'default' are not allowed in SimpleTestCase " "subclasses. Either subclass TestCase or TransactionTestCase to " "ensure proper test isolation or add 'default' to " "test_utils.tests.DisallowedDatabaseQueriesTests.databases to " "silence this failure." ) with self.assertRaisesMessage(AssertionError, expected_message): next(Car.objects.iterator()) class AllowedDatabaseQueriesTests(SimpleTestCase): databases = {'default'} def test_allowed_database_queries(self): Car.objects.first() def test_allowed_database_chunked_cursor_queries(self): next(Car.objects.iterator(), None) class DatabaseAliasTests(SimpleTestCase): def setUp(self): self.addCleanup(setattr, self.__class__, 'databases', self.databases) def test_no_close_match(self): self.__class__.databases = {'void'} message = ( "test_utils.tests.DatabaseAliasTests.databases refers to 'void' which is not defined " "in settings.DATABASES." ) with self.assertRaisesMessage(ImproperlyConfigured, message): self._validate_databases() def test_close_match(self): self.__class__.databases = {'defualt'} message = ( "test_utils.tests.DatabaseAliasTests.databases refers to 'defualt' which is not defined " "in settings.DATABASES. Did you mean 'default'?" ) with self.assertRaisesMessage(ImproperlyConfigured, message): self._validate_databases() def test_match(self): self.__class__.databases = {'default', 'other'} self.assertEqual(self._validate_databases(), frozenset({'default', 'other'})) def test_all(self): self.__class__.databases = '__all__' self.assertEqual(self._validate_databases(), frozenset(connections)) @isolate_apps('test_utils', attr_name='class_apps') class IsolatedAppsTests(SimpleTestCase): def test_installed_apps(self): self.assertEqual([app_config.label for app_config in self.class_apps.get_app_configs()], ['test_utils']) def test_class_decoration(self): class ClassDecoration(models.Model): pass self.assertEqual(ClassDecoration._meta.apps, self.class_apps) @isolate_apps('test_utils', kwarg_name='method_apps') def test_method_decoration(self, method_apps): class MethodDecoration(models.Model): pass self.assertEqual(MethodDecoration._meta.apps, method_apps) def test_context_manager(self): with isolate_apps('test_utils') as context_apps: class ContextManager(models.Model): pass self.assertEqual(ContextManager._meta.apps, context_apps) @isolate_apps('test_utils', kwarg_name='method_apps') def test_nested(self, method_apps): class MethodDecoration(models.Model): pass with isolate_apps('test_utils') as context_apps: class ContextManager(models.Model): pass with isolate_apps('test_utils') as nested_context_apps: class NestedContextManager(models.Model): pass self.assertEqual(MethodDecoration._meta.apps, method_apps) self.assertEqual(ContextManager._meta.apps, context_apps) self.assertEqual(NestedContextManager._meta.apps, nested_context_apps) class DoNothingDecorator(TestContextDecorator): def enable(self): pass def disable(self): pass class TestContextDecoratorTests(SimpleTestCase): @mock.patch.object(DoNothingDecorator, 'disable') def test_exception_in_setup(self, mock_disable): """An exception is setUp() is reraised after disable() is called.""" class ExceptionInSetUp(unittest.TestCase): def setUp(self): raise NotImplementedError('reraised') decorator = DoNothingDecorator() decorated_test_class = decorator.__call__(ExceptionInSetUp)() self.assertFalse(mock_disable.called) with self.assertRaisesMessage(NotImplementedError, 'reraised'): decorated_test_class.setUp() self.assertTrue(mock_disable.called)
dc61225416903977c231bb83d2990a4aea57bdfceaa7e7e902ba52397941621a
import datetime import pickle from decimal import Decimal from operator import attrgetter from unittest import mock from django.contrib.contenttypes.models import ContentType from django.core.exceptions import FieldError from django.db import connection from django.db.models import ( Aggregate, Avg, Case, Count, DecimalField, F, IntegerField, Max, Q, StdDev, Sum, Value, Variance, When, ) from django.test import TestCase, skipUnlessAnyDBFeature, skipUnlessDBFeature from django.test.utils import Approximate from .models import ( Alfa, Author, Book, Bravo, Charlie, Clues, Entries, HardbackBook, ItemTag, Publisher, SelfRefFK, Store, WithManualPK, ) class AggregationTests(TestCase): @classmethod def setUpTestData(cls): cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34) cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35) cls.a3 = Author.objects.create(name='Brad Dayley', age=45) cls.a4 = Author.objects.create(name='James Bennett', age=29) cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37) cls.a6 = Author.objects.create(name='Paul Bissex', age=29) cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25) cls.a8 = Author.objects.create(name='Peter Norvig', age=57) cls.a9 = Author.objects.create(name='Stuart Russell', age=46) cls.a1.friends.add(cls.a2, cls.a4) cls.a2.friends.add(cls.a1, cls.a7) cls.a4.friends.add(cls.a1) cls.a5.friends.add(cls.a6, cls.a7) cls.a6.friends.add(cls.a5, cls.a7) cls.a7.friends.add(cls.a2, cls.a5, cls.a6) cls.a8.friends.add(cls.a9) cls.a9.friends.add(cls.a8) cls.p1 = Publisher.objects.create(name='Apress', num_awards=3) cls.p2 = Publisher.objects.create(name='Sams', num_awards=1) cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7) cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9) cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0) cls.b1 = Book.objects.create( isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right', pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1, pubdate=datetime.date(2007, 12, 6) ) cls.b2 = Book.objects.create( isbn='067232959', name='Sams Teach Yourself Django in 24 Hours', pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2, pubdate=datetime.date(2008, 3, 3) ) cls.b3 = Book.objects.create( isbn='159059996', name='Practical Django Projects', pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1, pubdate=datetime.date(2008, 6, 23) ) cls.b4 = Book.objects.create( isbn='013235613', name='Python Web Development with Django', pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3, pubdate=datetime.date(2008, 11, 3) ) cls.b5 = HardbackBook.objects.create( isbn='013790395', name='Artificial Intelligence: A Modern Approach', pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3, pubdate=datetime.date(1995, 1, 15), weight=4.5) cls.b6 = HardbackBook.objects.create( isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4, pubdate=datetime.date(1991, 10, 15), weight=3.7) cls.b1.authors.add(cls.a1, cls.a2) cls.b2.authors.add(cls.a3) cls.b3.authors.add(cls.a4) cls.b4.authors.add(cls.a5, cls.a6, cls.a7) cls.b5.authors.add(cls.a8, cls.a9) cls.b6.authors.add(cls.a8) s1 = Store.objects.create( name='Amazon.com', original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42), friday_night_closing=datetime.time(23, 59, 59) ) s2 = Store.objects.create( name='Books.com', original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37), friday_night_closing=datetime.time(23, 59, 59) ) s3 = Store.objects.create( name="Mamma and Pappa's Books", original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14), friday_night_closing=datetime.time(21, 30) ) s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6) s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6) s3.books.add(cls.b3, cls.b4, cls.b6) def assertObjectAttrs(self, obj, **kwargs): for attr, value in kwargs.items(): self.assertEqual(getattr(obj, attr), value) def test_annotation_with_value(self): values = Book.objects.filter( name='Practical Django Projects', ).annotate( discount_price=F('price') * 2, ).values( 'discount_price', ).annotate(sum_discount=Sum('discount_price')) self.assertSequenceEqual( values, [{'discount_price': Decimal('59.38'), 'sum_discount': Decimal('59.38')}] ) def test_aggregates_in_where_clause(self): """ Regression test for #12822: DatabaseError: aggregates not allowed in WHERE clause The subselect works and returns results equivalent to a query with the IDs listed. Before the corresponding fix for this bug, this test passed in 1.1 and failed in 1.2-beta (trunk). """ qs = Book.objects.values('contact').annotate(Max('id')) qs = qs.order_by('contact').values_list('id__max', flat=True) # don't do anything with the queryset (qs) before including it as a # subquery books = Book.objects.order_by('id') qs1 = books.filter(id__in=qs) qs2 = books.filter(id__in=list(qs)) self.assertEqual(list(qs1), list(qs2)) def test_aggregates_in_where_clause_pre_eval(self): """ Regression test for #12822: DatabaseError: aggregates not allowed in WHERE clause Same as the above test, but evaluates the queryset for the subquery before it's used as a subquery. Before the corresponding fix for this bug, this test failed in both 1.1 and 1.2-beta (trunk). """ qs = Book.objects.values('contact').annotate(Max('id')) qs = qs.order_by('contact').values_list('id__max', flat=True) # force the queryset (qs) for the subquery to be evaluated in its # current state list(qs) books = Book.objects.order_by('id') qs1 = books.filter(id__in=qs) qs2 = books.filter(id__in=list(qs)) self.assertEqual(list(qs1), list(qs2)) @skipUnlessDBFeature('supports_subqueries_in_group_by') def test_annotate_with_extra(self): """ Regression test for #11916: Extra params + aggregation creates incorrect SQL. """ # Oracle doesn't support subqueries in group by clause shortest_book_sql = """ SELECT name FROM aggregation_regress_book b WHERE b.publisher_id = aggregation_regress_publisher.id ORDER BY b.pages LIMIT 1 """ # tests that this query does not raise a DatabaseError due to the full # subselect being (erroneously) added to the GROUP BY parameters qs = Publisher.objects.extra(select={ 'name_of_shortest_book': shortest_book_sql, }).annotate(total_books=Count('book')) # force execution of the query list(qs) def test_aggregate(self): # Ordering requests are ignored self.assertEqual( Author.objects.order_by("name").aggregate(Avg("age")), {"age__avg": Approximate(37.444, places=1)} ) # Implicit ordering is also ignored self.assertEqual( Book.objects.aggregate(Sum("pages")), {"pages__sum": 3703}, ) # Baseline results self.assertEqual( Book.objects.aggregate(Sum('pages'), Avg('pages')), {'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)} ) # Empty values query doesn't affect grouping or results self.assertEqual( Book.objects.values().aggregate(Sum('pages'), Avg('pages')), {'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)} ) # Aggregate overrides extra selected column self.assertEqual( Book.objects.extra(select={'price_per_page': 'price / pages'}).aggregate(Sum('pages')), {'pages__sum': 3703} ) def test_annotation(self): # Annotations get combined with extra select clauses obj = Book.objects.annotate(mean_auth_age=Avg("authors__age")).extra( select={"manufacture_cost": "price * .5"}).get(pk=self.b2.pk) self.assertObjectAttrs( obj, contact_id=self.a3.id, isbn='067232959', mean_auth_age=45.0, name='Sams Teach Yourself Django in 24 Hours', pages=528, price=Decimal("23.09"), pubdate=datetime.date(2008, 3, 3), publisher_id=self.p2.id, rating=3.0 ) # Different DB backends return different types for the extra select computation self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545'))) # Order of the annotate/extra in the query doesn't matter obj = Book.objects.extra(select={'manufacture_cost': 'price * .5'}).annotate( mean_auth_age=Avg('authors__age')).get(pk=self.b2.pk) self.assertObjectAttrs( obj, contact_id=self.a3.id, isbn='067232959', mean_auth_age=45.0, name='Sams Teach Yourself Django in 24 Hours', pages=528, price=Decimal("23.09"), pubdate=datetime.date(2008, 3, 3), publisher_id=self.p2.id, rating=3.0 ) # Different DB backends return different types for the extra select computation self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545'))) # Values queries can be combined with annotate and extra obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra( select={'manufacture_cost': 'price * .5'}).values().get(pk=self.b2.pk) manufacture_cost = obj['manufacture_cost'] self.assertIn(manufacture_cost, (11.545, Decimal('11.545'))) del obj['manufacture_cost'] self.assertEqual(obj, { 'id': self.b2.id, 'contact_id': self.a3.id, 'isbn': '067232959', 'mean_auth_age': 45.0, 'name': 'Sams Teach Yourself Django in 24 Hours', 'pages': 528, 'price': Decimal('23.09'), 'pubdate': datetime.date(2008, 3, 3), 'publisher_id': self.p2.id, 'rating': 3.0, }) # The order of the (empty) values, annotate and extra clauses doesn't # matter obj = Book.objects.values().annotate(mean_auth_age=Avg('authors__age')).extra( select={'manufacture_cost': 'price * .5'}).get(pk=self.b2.pk) manufacture_cost = obj['manufacture_cost'] self.assertIn(manufacture_cost, (11.545, Decimal('11.545'))) del obj['manufacture_cost'] self.assertEqual(obj, { 'id': self.b2.id, 'contact_id': self.a3.id, 'isbn': '067232959', 'mean_auth_age': 45.0, 'name': 'Sams Teach Yourself Django in 24 Hours', 'pages': 528, 'price': Decimal('23.09'), 'pubdate': datetime.date(2008, 3, 3), 'publisher_id': self.p2.id, 'rating': 3.0 }) # If the annotation precedes the values clause, it won't be included # unless it is explicitly named obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra( select={'price_per_page': 'price / pages'}).values('name').get(pk=self.b1.pk) self.assertEqual(obj, { "name": 'The Definitive Guide to Django: Web Development Done Right', }) obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra( select={'price_per_page': 'price / pages'}).values('name', 'mean_auth_age').get(pk=self.b1.pk) self.assertEqual(obj, { 'mean_auth_age': 34.5, 'name': 'The Definitive Guide to Django: Web Development Done Right', }) # If an annotation isn't included in the values, it can still be used # in a filter qs = Book.objects.annotate(n_authors=Count('authors')).values('name').filter(n_authors__gt=2) self.assertSequenceEqual( qs, [ {"name": 'Python Web Development with Django'} ], ) # The annotations are added to values output if values() precedes # annotate() obj = Book.objects.values('name').annotate(mean_auth_age=Avg('authors__age')).extra( select={'price_per_page': 'price / pages'}).get(pk=self.b1.pk) self.assertEqual(obj, { 'mean_auth_age': 34.5, 'name': 'The Definitive Guide to Django: Web Development Done Right', }) # All of the objects are getting counted (allow_nulls) and that values # respects the amount of objects self.assertEqual( len(Author.objects.annotate(Avg('friends__age')).values()), 9 ) # Consecutive calls to annotate accumulate in the query qs = ( Book.objects .values('price') .annotate(oldest=Max('authors__age')) .order_by('oldest', 'price') .annotate(Max('publisher__num_awards')) ) self.assertSequenceEqual( qs, [ {'price': Decimal("30"), 'oldest': 35, 'publisher__num_awards__max': 3}, {'price': Decimal("29.69"), 'oldest': 37, 'publisher__num_awards__max': 7}, {'price': Decimal("23.09"), 'oldest': 45, 'publisher__num_awards__max': 1}, {'price': Decimal("75"), 'oldest': 57, 'publisher__num_awards__max': 9}, {'price': Decimal("82.8"), 'oldest': 57, 'publisher__num_awards__max': 7} ], ) def test_aggregate_annotation(self): # Aggregates can be composed over annotations. # The return type is derived from the composed aggregate vals = ( Book.objects .all() .annotate(num_authors=Count('authors__id')) .aggregate(Max('pages'), Max('price'), Sum('num_authors'), Avg('num_authors')) ) self.assertEqual(vals, { 'num_authors__sum': 10, 'num_authors__avg': Approximate(1.666, places=2), 'pages__max': 1132, 'price__max': Decimal("82.80") }) # Regression for #15624 - Missing SELECT columns when using values, annotate # and aggregate in a single query self.assertEqual( Book.objects.annotate(c=Count('authors')).values('c').aggregate(Max('c')), {'c__max': 3} ) def test_conditional_aggregate(self): # Conditional aggregation of a grouped queryset. self.assertEqual( Book.objects.annotate(c=Count('authors')).values('pk').aggregate(test=Sum( Case(When(c__gt=1, then=1)) ))['test'], 3 ) def test_sliced_conditional_aggregate(self): self.assertEqual( Author.objects.all()[:5].aggregate(test=Sum(Case( When(age__lte=35, then=1) )))['test'], 3 ) def test_annotated_conditional_aggregate(self): annotated_qs = Book.objects.annotate(discount_price=F('price') * Decimal('0.75')) self.assertAlmostEqual( annotated_qs.aggregate(test=Avg(Case( When(pages__lt=400, then='discount_price'), output_field=DecimalField() )))['test'], Decimal('22.27'), places=2 ) def test_distinct_conditional_aggregate(self): self.assertEqual( Book.objects.distinct().aggregate(test=Avg(Case( When(price=Decimal('29.69'), then='pages'), output_field=IntegerField() )))['test'], 325 ) def test_conditional_aggregate_on_complex_condition(self): self.assertEqual( Book.objects.distinct().aggregate(test=Avg(Case( When(Q(price__gte=Decimal('29')) & Q(price__lt=Decimal('30')), then='pages'), output_field=IntegerField() )))['test'], 325 ) def test_decimal_aggregate_annotation_filter(self): """ Filtering on an aggregate annotation with Decimal values should work. Requires special handling on SQLite (#18247). """ self.assertEqual( len(Author.objects.annotate(sum=Sum('book_contact_set__price')).filter(sum__gt=Decimal(40))), 1 ) self.assertEqual( len(Author.objects.annotate(sum=Sum('book_contact_set__price')).filter(sum__lte=Decimal(40))), 4 ) def test_field_error(self): # Bad field requests in aggregates are caught and reported msg = ( "Cannot resolve keyword 'foo' into field. Choices are: authors, " "contact, contact_id, hardbackbook, id, isbn, name, pages, price, " "pubdate, publisher, publisher_id, rating, store, tags" ) with self.assertRaisesMessage(FieldError, msg): Book.objects.all().aggregate(num_authors=Count('foo')) with self.assertRaisesMessage(FieldError, msg): Book.objects.all().annotate(num_authors=Count('foo')) msg = ( "Cannot resolve keyword 'foo' into field. Choices are: authors, " "contact, contact_id, hardbackbook, id, isbn, name, num_authors, " "pages, price, pubdate, publisher, publisher_id, rating, store, tags" ) with self.assertRaisesMessage(FieldError, msg): Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('foo')) def test_more(self): # Old-style count aggregations can be mixed with new-style self.assertEqual( Book.objects.annotate(num_authors=Count('authors')).count(), 6 ) # Non-ordinal, non-computed Aggregates over annotations correctly # inherit the annotation's internal type if the annotation is ordinal # or computed vals = Book.objects.annotate(num_authors=Count('authors')).aggregate(Max('num_authors')) self.assertEqual( vals, {'num_authors__max': 3} ) vals = Publisher.objects.annotate(avg_price=Avg('book__price')).aggregate(Max('avg_price')) self.assertEqual( vals, {'avg_price__max': 75.0} ) # Aliases are quoted to protected aliases that might be reserved names vals = Book.objects.aggregate(number=Max('pages'), select=Max('pages')) self.assertEqual( vals, {'number': 1132, 'select': 1132} ) # Regression for #10064: select_related() plays nice with aggregates obj = Book.objects.select_related('publisher').annotate( num_authors=Count('authors')).values().get(isbn='013790395') self.assertEqual(obj, { 'contact_id': self.a8.id, 'id': self.b5.id, 'isbn': '013790395', 'name': 'Artificial Intelligence: A Modern Approach', 'num_authors': 2, 'pages': 1132, 'price': Decimal("82.8"), 'pubdate': datetime.date(1995, 1, 15), 'publisher_id': self.p3.id, 'rating': 4.0, }) # Regression for #10010: exclude on an aggregate field is correctly # negated self.assertEqual( len(Book.objects.annotate(num_authors=Count('authors'))), 6 ) self.assertEqual( len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__gt=2)), 1 ) self.assertEqual( len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__gt=2)), 5 ) self.assertEqual( len( Book.objects .annotate(num_authors=Count('authors')) .filter(num_authors__lt=3) .exclude(num_authors__lt=2) ), 2 ) self.assertEqual( len( Book.objects .annotate(num_authors=Count('authors')) .exclude(num_authors__lt=2) .filter(num_authors__lt=3) ), 2 ) def test_aggregate_fexpr(self): # Aggregates can be used with F() expressions # ... where the F() is pushed into the HAVING clause qs = ( Publisher.objects .annotate(num_books=Count('book')) .filter(num_books__lt=F('num_awards') / 2) .order_by('name') .values('name', 'num_books', 'num_awards') ) self.assertSequenceEqual( qs, [ {'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9}, {'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7} ], ) qs = ( Publisher.objects .annotate(num_books=Count('book')) .exclude(num_books__lt=F('num_awards') / 2) .order_by('name') .values('name', 'num_books', 'num_awards') ) self.assertSequenceEqual( qs, [ {'num_books': 2, 'name': 'Apress', 'num_awards': 3}, {'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0}, {'num_books': 1, 'name': 'Sams', 'num_awards': 1} ], ) # ... and where the F() references an aggregate qs = ( Publisher.objects .annotate(num_books=Count('book')) .filter(num_awards__gt=2 * F('num_books')) .order_by('name') .values('name', 'num_books', 'num_awards') ) self.assertSequenceEqual( qs, [ {'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9}, {'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7} ], ) qs = ( Publisher.objects .annotate(num_books=Count('book')) .exclude(num_books__lt=F('num_awards') / 2) .order_by('name') .values('name', 'num_books', 'num_awards') ) self.assertSequenceEqual( qs, [ {'num_books': 2, 'name': 'Apress', 'num_awards': 3}, {'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0}, {'num_books': 1, 'name': 'Sams', 'num_awards': 1} ], ) def test_db_col_table(self): # Tests on fields with non-default table and column names. qs = ( Clues.objects .values('EntryID__Entry') .annotate(Appearances=Count('EntryID'), Distinct_Clues=Count('Clue', distinct=True)) ) self.assertQuerysetEqual(qs, []) qs = Entries.objects.annotate(clue_count=Count('clues__ID')) self.assertQuerysetEqual(qs, []) def test_boolean_conversion(self): # Aggregates mixed up ordering of columns for backend's convert_values # method. Refs #21126. e = Entries.objects.create(Entry='foo') c = Clues.objects.create(EntryID=e, Clue='bar') qs = Clues.objects.select_related('EntryID').annotate(Count('ID')) self.assertSequenceEqual(qs, [c]) self.assertEqual(qs[0].EntryID, e) self.assertIs(qs[0].EntryID.Exclude, False) def test_empty(self): # Regression for #10089: Check handling of empty result sets with # aggregates self.assertEqual( Book.objects.filter(id__in=[]).count(), 0 ) vals = ( Book.objects .filter(id__in=[]) .aggregate( num_authors=Count('authors'), avg_authors=Avg('authors'), max_authors=Max('authors'), max_price=Max('price'), max_rating=Max('rating'), ) ) self.assertEqual( vals, {'max_authors': None, 'max_rating': None, 'num_authors': 0, 'avg_authors': None, 'max_price': None} ) qs = ( Publisher.objects .filter(name="Jonno's House of Books") .annotate( num_authors=Count('book__authors'), avg_authors=Avg('book__authors'), max_authors=Max('book__authors'), max_price=Max('book__price'), max_rating=Max('book__rating'), ).values() ) self.assertSequenceEqual( qs, [{ 'max_authors': None, 'name': "Jonno's House of Books", 'num_awards': 0, 'max_price': None, 'num_authors': 0, 'max_rating': None, 'id': self.p5.id, 'avg_authors': None, }], ) def test_more_more(self): # Regression for #10113 - Fields mentioned in order_by() must be # included in the GROUP BY. This only becomes a problem when the # order_by introduces a new join. self.assertQuerysetEqual( Book.objects.annotate(num_authors=Count('authors')).order_by('publisher__name', 'name'), [ "Practical Django Projects", "The Definitive Guide to Django: Web Development Done Right", "Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp", "Artificial Intelligence: A Modern Approach", "Python Web Development with Django", "Sams Teach Yourself Django in 24 Hours", ], lambda b: b.name ) # Regression for #10127 - Empty select_related() works with annotate qs = Book.objects.filter(rating__lt=4.5).select_related().annotate(Avg('authors__age')).order_by('name') self.assertQuerysetEqual( qs, [ ('Artificial Intelligence: A Modern Approach', 51.5, 'Prentice Hall', 'Peter Norvig'), ('Practical Django Projects', 29.0, 'Apress', 'James Bennett'), ( 'Python Web Development with Django', Approximate(30.333, places=2), 'Prentice Hall', 'Jeffrey Forcier', ), ('Sams Teach Yourself Django in 24 Hours', 45.0, 'Sams', 'Brad Dayley') ], lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name) ) # Regression for #10132 - If the values() clause only mentioned extra # (select=) columns, those columns are used for grouping qs = Book.objects.extra(select={'pub': 'publisher_id'}).values('pub').annotate(Count('id')).order_by('pub') self.assertSequenceEqual( qs, [ {'pub': self.b1.id, 'id__count': 2}, {'pub': self.b2.id, 'id__count': 1}, {'pub': self.b3.id, 'id__count': 2}, {'pub': self.b4.id, 'id__count': 1} ], ) qs = ( Book.objects .extra(select={'pub': 'publisher_id', 'foo': 'pages'}) .values('pub') .annotate(Count('id')) .order_by('pub') ) self.assertSequenceEqual( qs, [ {'pub': self.p1.id, 'id__count': 2}, {'pub': self.p2.id, 'id__count': 1}, {'pub': self.p3.id, 'id__count': 2}, {'pub': self.p4.id, 'id__count': 1} ], ) # Regression for #10182 - Queries with aggregate calls are correctly # realiased when used in a subquery ids = ( Book.objects .filter(pages__gt=100) .annotate(n_authors=Count('authors')) .filter(n_authors__gt=2) .order_by('n_authors') ) self.assertQuerysetEqual( Book.objects.filter(id__in=ids), [ "Python Web Development with Django", ], lambda b: b.name ) # Regression for #15709 - Ensure each group_by field only exists once # per query qstr = str(Book.objects.values('publisher').annotate(max_pages=Max('pages')).order_by().query) # There is just one GROUP BY clause (zero commas means at most one clause). self.assertEqual(qstr[qstr.index('GROUP BY'):].count(', '), 0) def test_duplicate_alias(self): # Regression for #11256 - duplicating a default alias raises ValueError. msg = ( "The named annotation 'authors__age__avg' conflicts with " "the default name for another annotation." ) with self.assertRaisesMessage(ValueError, msg): Book.objects.all().annotate(Avg('authors__age'), authors__age__avg=Avg('authors__age')) def test_field_name_conflict(self): # Regression for #11256 - providing an aggregate name # that conflicts with a field name on the model raises ValueError msg = "The annotation 'age' conflicts with a field on the model." with self.assertRaisesMessage(ValueError, msg): Author.objects.annotate(age=Avg('friends__age')) def test_m2m_name_conflict(self): # Regression for #11256 - providing an aggregate name # that conflicts with an m2m name on the model raises ValueError msg = "The annotation 'friends' conflicts with a field on the model." with self.assertRaisesMessage(ValueError, msg): Author.objects.annotate(friends=Count('friends')) def test_fk_attname_conflict(self): msg = "The annotation 'contact_id' conflicts with a field on the model." with self.assertRaisesMessage(ValueError, msg): Book.objects.annotate(contact_id=F('publisher_id')) def test_values_queryset_non_conflict(self): # Regression for #14707 -- If you're using a values query set, some potential conflicts are avoided. # age is a field on Author, so it shouldn't be allowed as an aggregate. # But age isn't included in values(), so it is. results = Author.objects.values('name').annotate(age=Count('book_contact_set')).order_by('name') self.assertEqual(len(results), 9) self.assertEqual(results[0]['name'], 'Adrian Holovaty') self.assertEqual(results[0]['age'], 1) # Same problem, but aggregating over m2m fields results = Author.objects.values('name').annotate(age=Avg('friends__age')).order_by('name') self.assertEqual(len(results), 9) self.assertEqual(results[0]['name'], 'Adrian Holovaty') self.assertEqual(results[0]['age'], 32.0) # Same problem, but colliding with an m2m field results = Author.objects.values('name').annotate(friends=Count('friends')).order_by('name') self.assertEqual(len(results), 9) self.assertEqual(results[0]['name'], 'Adrian Holovaty') self.assertEqual(results[0]['friends'], 2) def test_reverse_relation_name_conflict(self): # Regression for #11256 - providing an aggregate name # that conflicts with a reverse-related name on the model raises ValueError msg = "The annotation 'book_contact_set' conflicts with a field on the model." with self.assertRaisesMessage(ValueError, msg): Author.objects.annotate(book_contact_set=Avg('friends__age')) def test_pickle(self): # Regression for #10197 -- Queries with aggregates can be pickled. # First check that pickling is possible at all. No crash = success qs = Book.objects.annotate(num_authors=Count('authors')) pickle.dumps(qs) # Then check that the round trip works. query = qs.query.get_compiler(qs.db).as_sql()[0] qs2 = pickle.loads(pickle.dumps(qs)) self.assertEqual( qs2.query.get_compiler(qs2.db).as_sql()[0], query, ) def test_more_more_more(self): # Regression for #10199 - Aggregate calls clone the original query so # the original query can still be used books = Book.objects.all() books.aggregate(Avg("authors__age")) self.assertQuerysetEqual( books.all(), [ 'Artificial Intelligence: A Modern Approach', 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 'Practical Django Projects', 'Python Web Development with Django', 'Sams Teach Yourself Django in 24 Hours', 'The Definitive Guide to Django: Web Development Done Right' ], lambda b: b.name ) # Regression for #10248 - Annotations work with dates() qs = Book.objects.annotate(num_authors=Count('authors')).filter(num_authors=2).dates('pubdate', 'day') self.assertSequenceEqual( qs, [ datetime.date(1995, 1, 15), datetime.date(2007, 12, 6), ], ) # Regression for #10290 - extra selects with parameters can be used for # grouping. qs = ( Book.objects .annotate(mean_auth_age=Avg('authors__age')) .extra(select={'sheets': '(pages + %s) / %s'}, select_params=[1, 2]) .order_by('sheets') .values('sheets') ) self.assertQuerysetEqual( qs, [ 150, 175, 224, 264, 473, 566 ], lambda b: int(b["sheets"]) ) # Regression for 10425 - annotations don't get in the way of a count() # clause self.assertEqual( Book.objects.values('publisher').annotate(Count('publisher')).count(), 4 ) self.assertEqual( Book.objects.annotate(Count('publisher')).values('publisher').count(), 6 ) # Note: intentionally no order_by(), that case needs tests, too. publishers = Publisher.objects.filter(id__in=[1, 2]) self.assertEqual( sorted(p.name for p in publishers), [ "Apress", "Sams" ] ) publishers = publishers.annotate(n_books=Count("book")) sorted_publishers = sorted(publishers, key=lambda x: x.name) self.assertEqual( sorted_publishers[0].n_books, 2 ) self.assertEqual( sorted_publishers[1].n_books, 1 ) self.assertEqual( sorted(p.name for p in publishers), [ "Apress", "Sams" ] ) books = Book.objects.filter(publisher__in=publishers) self.assertQuerysetEqual( books, [ "Practical Django Projects", "Sams Teach Yourself Django in 24 Hours", "The Definitive Guide to Django: Web Development Done Right", ], lambda b: b.name ) self.assertEqual( sorted(p.name for p in publishers), [ "Apress", "Sams" ] ) # Regression for 10666 - inherited fields work with annotations and # aggregations self.assertEqual( HardbackBook.objects.aggregate(n_pages=Sum('book_ptr__pages')), {'n_pages': 2078} ) self.assertEqual( HardbackBook.objects.aggregate(n_pages=Sum('pages')), {'n_pages': 2078}, ) qs = HardbackBook.objects.annotate( n_authors=Count('book_ptr__authors'), ).values('name', 'n_authors').order_by('name') self.assertSequenceEqual( qs, [ {'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'}, { 'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp' } ], ) qs = HardbackBook.objects.annotate(n_authors=Count('authors')).values('name', 'n_authors').order_by('name') self.assertSequenceEqual( qs, [ {'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'}, { 'n_authors': 1, 'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp' } ], ) # Regression for #10766 - Shouldn't be able to reference an aggregate # fields in an aggregate() call. msg = "Cannot compute Avg('mean_age'): 'mean_age' is an aggregate" with self.assertRaisesMessage(FieldError, msg): Book.objects.annotate(mean_age=Avg('authors__age')).annotate(Avg('mean_age')) def test_empty_filter_count(self): self.assertEqual( Author.objects.filter(id__in=[]).annotate(Count("friends")).count(), 0 ) def test_empty_filter_aggregate(self): self.assertEqual( Author.objects.filter(id__in=[]).annotate(Count("friends")).aggregate(Count("pk")), {"pk__count": None} ) def test_none_call_before_aggregate(self): # Regression for #11789 self.assertEqual( Author.objects.none().aggregate(Avg('age')), {'age__avg': None} ) def test_annotate_and_join(self): self.assertEqual( Author.objects.annotate(c=Count("friends__name")).exclude(friends__name="Joe").count(), Author.objects.count() ) def test_f_expression_annotation(self): # Books with less than 200 pages per author. qs = Book.objects.values("name").annotate( n_authors=Count("authors") ).filter( pages__lt=F("n_authors") * 200 ).values_list("pk") self.assertQuerysetEqual( Book.objects.filter(pk__in=qs), [ "Python Web Development with Django" ], attrgetter("name") ) def test_values_annotate_values(self): qs = Book.objects.values("name").annotate( n_authors=Count("authors") ).values_list("pk", flat=True).order_by('name') self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True))) def test_having_group_by(self): # When a field occurs on the LHS of a HAVING clause that it # appears correctly in the GROUP BY clause qs = Book.objects.values_list("name").annotate( n_authors=Count("authors") ).filter( pages__gt=F("n_authors") ).values_list("name", flat=True).order_by('name') # Results should be the same, all Books have more pages than authors self.assertEqual( list(qs), list(Book.objects.values_list("name", flat=True)) ) def test_values_list_annotation_args_ordering(self): """ Annotate *args ordering should be preserved in values_list results. **kwargs comes after *args. Regression test for #23659. """ books = Book.objects.values_list("publisher__name").annotate( Count("id"), Avg("price"), Avg("authors__age"), avg_pgs=Avg("pages") ).order_by("-publisher__name") self.assertEqual(books[0], ('Sams', 1, Decimal('23.09'), 45.0, 528.0)) def test_annotation_disjunction(self): qs = Book.objects.annotate(n_authors=Count("authors")).filter( Q(n_authors=2) | Q(name="Python Web Development with Django") ).order_by('name') self.assertQuerysetEqual( qs, [ "Artificial Intelligence: A Modern Approach", "Python Web Development with Django", "The Definitive Guide to Django: Web Development Done Right", ], attrgetter("name") ) qs = ( Book.objects .annotate(n_authors=Count("authors")) .filter( Q(name="The Definitive Guide to Django: Web Development Done Right") | (Q(name="Artificial Intelligence: A Modern Approach") & Q(n_authors=3)) ) ).order_by('name') self.assertQuerysetEqual( qs, [ "The Definitive Guide to Django: Web Development Done Right", ], attrgetter("name") ) qs = Publisher.objects.annotate( rating_sum=Sum("book__rating"), book_count=Count("book") ).filter( Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True) ).order_by('pk') self.assertQuerysetEqual( qs, [ "Apress", "Prentice Hall", "Jonno's House of Books", ], attrgetter("name") ) qs = Publisher.objects.annotate( rating_sum=Sum("book__rating"), book_count=Count("book") ).filter( Q(rating_sum__gt=F("book_count")) | Q(rating_sum=None) ).order_by("num_awards") self.assertQuerysetEqual( qs, [ "Jonno's House of Books", "Sams", "Apress", "Prentice Hall", "Morgan Kaufmann" ], attrgetter("name") ) def test_quoting_aggregate_order_by(self): qs = Book.objects.filter( name="Python Web Development with Django" ).annotate( authorCount=Count("authors") ).order_by("authorCount") self.assertQuerysetEqual( qs, [ ("Python Web Development with Django", 3), ], lambda b: (b.name, b.authorCount) ) def test_stddev(self): self.assertEqual( Book.objects.aggregate(StdDev('pages')), {'pages__stddev': Approximate(311.46, 1)} ) self.assertEqual( Book.objects.aggregate(StdDev('rating')), {'rating__stddev': Approximate(0.60, 1)} ) self.assertEqual( Book.objects.aggregate(StdDev('price')), {'price__stddev': Approximate(Decimal('24.16'), 2)} ) self.assertEqual( Book.objects.aggregate(StdDev('pages', sample=True)), {'pages__stddev': Approximate(341.19, 2)} ) self.assertEqual( Book.objects.aggregate(StdDev('rating', sample=True)), {'rating__stddev': Approximate(0.66, 2)} ) self.assertEqual( Book.objects.aggregate(StdDev('price', sample=True)), {'price__stddev': Approximate(Decimal('26.46'), 1)} ) self.assertEqual( Book.objects.aggregate(Variance('pages')), {'pages__variance': Approximate(97010.80, 1)} ) self.assertEqual( Book.objects.aggregate(Variance('rating')), {'rating__variance': Approximate(0.36, 1)} ) self.assertEqual( Book.objects.aggregate(Variance('price')), {'price__variance': Approximate(Decimal('583.77'), 1)} ) self.assertEqual( Book.objects.aggregate(Variance('pages', sample=True)), {'pages__variance': Approximate(116412.96, 1)} ) self.assertEqual( Book.objects.aggregate(Variance('rating', sample=True)), {'rating__variance': Approximate(0.44, 2)} ) self.assertEqual( Book.objects.aggregate(Variance('price', sample=True)), {'price__variance': Approximate(Decimal('700.53'), 2)} ) def test_filtering_by_annotation_name(self): # Regression test for #14476 # The name of the explicitly provided annotation name in this case # poses no problem qs = Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2).order_by('name') self.assertQuerysetEqual( qs, ['Peter Norvig'], lambda b: b.name ) # Neither in this case qs = Author.objects.annotate(book_count=Count('book')).filter(book_count=2).order_by('name') self.assertQuerysetEqual( qs, ['Peter Norvig'], lambda b: b.name ) # This case used to fail because the ORM couldn't resolve the # automatically generated annotation name `book__count` qs = Author.objects.annotate(Count('book')).filter(book__count=2).order_by('name') self.assertQuerysetEqual( qs, ['Peter Norvig'], lambda b: b.name ) # Referencing the auto-generated name in an aggregate() also works. self.assertEqual( Author.objects.annotate(Count('book')).aggregate(Max('book__count')), {'book__count__max': 2} ) def test_annotate_joins(self): """ The base table's join isn't promoted to LOUTER. This could cause the query generation to fail if there is an exclude() for fk-field in the query, too. Refs #19087. """ qs = Book.objects.annotate(n=Count('pk')) self.assertIs(qs.query.alias_map['aggregation_regress_book'].join_type, None) # The query executes without problems. self.assertEqual(len(qs.exclude(publisher=-1)), 6) @skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks') def test_aggregate_duplicate_columns(self): # Regression test for #17144 results = Author.objects.annotate(num_contacts=Count('book_contact_set')) # There should only be one GROUP BY clause, for the `id` column. # `name` and `age` should not be grouped on. _, _, group_by = results.query.get_compiler(using='default').pre_sql_setup() self.assertEqual(len(group_by), 1) self.assertIn('id', group_by[0][0]) self.assertNotIn('name', group_by[0][0]) self.assertNotIn('age', group_by[0][0]) self.assertEqual( [(a.name, a.num_contacts) for a in results.order_by('name')], [ ('Adrian Holovaty', 1), ('Brad Dayley', 1), ('Jacob Kaplan-Moss', 0), ('James Bennett', 1), ('Jeffrey Forcier', 1), ('Paul Bissex', 0), ('Peter Norvig', 2), ('Stuart Russell', 0), ('Wesley J. Chun', 0), ] ) @skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks') def test_aggregate_duplicate_columns_only(self): # Works with only() too. results = Author.objects.only('id', 'name').annotate(num_contacts=Count('book_contact_set')) _, _, grouping = results.query.get_compiler(using='default').pre_sql_setup() self.assertEqual(len(grouping), 1) self.assertIn('id', grouping[0][0]) self.assertNotIn('name', grouping[0][0]) self.assertNotIn('age', grouping[0][0]) self.assertEqual( [(a.name, a.num_contacts) for a in results.order_by('name')], [ ('Adrian Holovaty', 1), ('Brad Dayley', 1), ('Jacob Kaplan-Moss', 0), ('James Bennett', 1), ('Jeffrey Forcier', 1), ('Paul Bissex', 0), ('Peter Norvig', 2), ('Stuart Russell', 0), ('Wesley J. Chun', 0), ] ) @skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks') def test_aggregate_duplicate_columns_select_related(self): # And select_related() results = Book.objects.select_related('contact').annotate( num_authors=Count('authors')) _, _, grouping = results.query.get_compiler(using='default').pre_sql_setup() # In the case of `group_by_selected_pks` we also group by contact.id because of the select_related. self.assertEqual(len(grouping), 1 if connection.features.allows_group_by_pk else 2) self.assertIn('id', grouping[0][0]) self.assertNotIn('name', grouping[0][0]) self.assertNotIn('contact', grouping[0][0]) self.assertEqual( [(b.name, b.num_authors) for b in results.order_by('name')], [ ('Artificial Intelligence: A Modern Approach', 2), ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1), ('Practical Django Projects', 1), ('Python Web Development with Django', 3), ('Sams Teach Yourself Django in 24 Hours', 1), ('The Definitive Guide to Django: Web Development Done Right', 2) ] ) @skipUnlessDBFeature('allows_group_by_selected_pks') def test_aggregate_unmanaged_model_columns(self): """ Unmanaged models are sometimes used to represent database views which may not allow grouping by selected primary key. """ def assertQuerysetResults(queryset): self.assertEqual( [(b.name, b.num_authors) for b in queryset.order_by('name')], [ ('Artificial Intelligence: A Modern Approach', 2), ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1), ('Practical Django Projects', 1), ('Python Web Development with Django', 3), ('Sams Teach Yourself Django in 24 Hours', 1), ('The Definitive Guide to Django: Web Development Done Right', 2), ] ) queryset = Book.objects.select_related('contact').annotate(num_authors=Count('authors')) # Unmanaged origin model. with mock.patch.object(Book._meta, 'managed', False): _, _, grouping = queryset.query.get_compiler(using='default').pre_sql_setup() self.assertEqual(len(grouping), len(Book._meta.fields) + 1) for index, field in enumerate(Book._meta.fields): self.assertIn(field.name, grouping[index][0]) self.assertIn(Author._meta.pk.name, grouping[-1][0]) assertQuerysetResults(queryset) # Unmanaged related model. with mock.patch.object(Author._meta, 'managed', False): _, _, grouping = queryset.query.get_compiler(using='default').pre_sql_setup() self.assertEqual(len(grouping), len(Author._meta.fields) + 1) self.assertIn(Book._meta.pk.name, grouping[0][0]) for index, field in enumerate(Author._meta.fields): self.assertIn(field.name, grouping[index + 1][0]) assertQuerysetResults(queryset) @skipUnlessDBFeature('allows_group_by_selected_pks') def test_aggregate_unmanaged_model_as_tables(self): qs = Book.objects.select_related('contact').annotate(num_authors=Count('authors')) # Force treating unmanaged models as tables. with mock.patch( 'django.db.connection.features.allows_group_by_selected_pks_on_model', return_value=True, ): with mock.patch.object(Book._meta, 'managed', False), \ mock.patch.object(Author._meta, 'managed', False): _, _, grouping = qs.query.get_compiler(using='default').pre_sql_setup() self.assertEqual(len(grouping), 2) self.assertIn('id', grouping[0][0]) self.assertIn('id', grouping[1][0]) self.assertQuerysetEqual( qs.order_by('name'), [ ('Artificial Intelligence: A Modern Approach', 2), ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1), ('Practical Django Projects', 1), ('Python Web Development with Django', 3), ('Sams Teach Yourself Django in 24 Hours', 1), ('The Definitive Guide to Django: Web Development Done Right', 2), ], attrgetter('name', 'num_authors'), ) def test_reverse_join_trimming(self): qs = Author.objects.annotate(Count('book_contact_set__contact')) self.assertIn(' JOIN ', str(qs.query)) def test_aggregation_with_generic_reverse_relation(self): """ Regression test for #10870: Aggregates with joins ignore extra filters provided by setup_joins tests aggregations with generic reverse relations """ django_book = Book.objects.get(name='Practical Django Projects') ItemTag.objects.create( object_id=django_book.id, tag='intermediate', content_type=ContentType.objects.get_for_model(django_book), ) ItemTag.objects.create( object_id=django_book.id, tag='django', content_type=ContentType.objects.get_for_model(django_book), ) # Assign a tag to model with same PK as the book above. If the JOIN # used in aggregation doesn't have content type as part of the # condition the annotation will also count the 'hi mom' tag for b. wmpk = WithManualPK.objects.create(id=django_book.pk) ItemTag.objects.create( object_id=wmpk.id, tag='hi mom', content_type=ContentType.objects.get_for_model(wmpk), ) ai_book = Book.objects.get(name__startswith='Paradigms of Artificial Intelligence') ItemTag.objects.create( object_id=ai_book.id, tag='intermediate', content_type=ContentType.objects.get_for_model(ai_book), ) self.assertEqual(Book.objects.aggregate(Count('tags')), {'tags__count': 3}) results = Book.objects.annotate(Count('tags')).order_by('-tags__count', 'name') self.assertEqual( [(b.name, b.tags__count) for b in results], [ ('Practical Django Projects', 2), ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1), ('Artificial Intelligence: A Modern Approach', 0), ('Python Web Development with Django', 0), ('Sams Teach Yourself Django in 24 Hours', 0), ('The Definitive Guide to Django: Web Development Done Right', 0) ] ) def test_negated_aggregation(self): expected_results = Author.objects.exclude( pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2) ).order_by('name') expected_results = [a.name for a in expected_results] qs = Author.objects.annotate(book_cnt=Count('book')).exclude( Q(book_cnt=2), Q(book_cnt=2)).order_by('name') self.assertQuerysetEqual( qs, expected_results, lambda b: b.name ) expected_results = Author.objects.exclude( pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2) ).order_by('name') expected_results = [a.name for a in expected_results] qs = Author.objects.annotate(book_cnt=Count('book')).exclude(Q(book_cnt=2) | Q(book_cnt=2)).order_by('name') self.assertQuerysetEqual( qs, expected_results, lambda b: b.name ) def test_name_filters(self): qs = Author.objects.annotate(Count('book')).filter( Q(book__count__exact=2) | Q(name='Adrian Holovaty') ).order_by('name') self.assertQuerysetEqual( qs, ['Adrian Holovaty', 'Peter Norvig'], lambda b: b.name ) def test_name_expressions(self): # Aggregates are spotted correctly from F objects. # Note that Adrian's age is 34 in the fixtures, and he has one book # so both conditions match one author. qs = Author.objects.annotate(Count('book')).filter( Q(name='Peter Norvig') | Q(age=F('book__count') + 33) ).order_by('name') self.assertQuerysetEqual( qs, ['Adrian Holovaty', 'Peter Norvig'], lambda b: b.name ) def test_ticket_11293(self): q1 = Q(price__gt=50) q2 = Q(authors__count__gt=1) query = Book.objects.annotate(Count('authors')).filter( q1 | q2).order_by('pk') self.assertQuerysetEqual( query, [1, 4, 5, 6], lambda b: b.pk) def test_ticket_11293_q_immutable(self): """ Splitting a q object to parts for where/having doesn't alter the original q-object. """ q1 = Q(isbn='') q2 = Q(authors__count__gt=1) query = Book.objects.annotate(Count('authors')) query.filter(q1 | q2) self.assertEqual(len(q2.children), 1) def test_fobj_group_by(self): """ An F() object referring to related column works correctly in group by. """ qs = Book.objects.annotate( account=Count('authors') ).filter( account=F('publisher__num_awards') ) self.assertQuerysetEqual( qs, ['Sams Teach Yourself Django in 24 Hours'], lambda b: b.name) def test_annotate_reserved_word(self): """ Regression #18333 - Ensure annotated column name is properly quoted. """ vals = Book.objects.annotate(select=Count('authors__id')).aggregate(Sum('select'), Avg('select')) self.assertEqual(vals, { 'select__sum': 10, 'select__avg': Approximate(1.666, places=2), }) def test_annotate_on_relation(self): book = Book.objects.annotate(avg_price=Avg('price'), publisher_name=F('publisher__name')).get(pk=self.b1.pk) self.assertEqual(book.avg_price, 30.00) self.assertEqual(book.publisher_name, "Apress") def test_aggregate_on_relation(self): # A query with an existing annotation aggregation on a relation should # succeed. qs = Book.objects.annotate(avg_price=Avg('price')).aggregate( publisher_awards=Sum('publisher__num_awards') ) self.assertEqual(qs['publisher_awards'], 30) def test_annotate_distinct_aggregate(self): # There are three books with rating of 4.0 and two of the books have # the same price. Hence, the distinct removes one rating of 4.0 # from the results. vals1 = Book.objects.values('rating', 'price').distinct().aggregate(result=Sum('rating')) vals2 = Book.objects.aggregate(result=Sum('rating') - Value(4.0)) self.assertEqual(vals1, vals2) def test_annotate_values_list_flat(self): """Find ages that are shared by at least two authors.""" qs = Author.objects.values_list('age', flat=True).annotate(age_count=Count('age')).filter(age_count__gt=1) self.assertSequenceEqual(qs, [29]) def test_allow_distinct(self): class MyAggregate(Aggregate): pass with self.assertRaisesMessage(TypeError, 'MyAggregate does not allow distinct'): MyAggregate('foo', distinct=True) class DistinctAggregate(Aggregate): allow_distinct = True DistinctAggregate('foo', distinct=True) class JoinPromotionTests(TestCase): def test_ticket_21150(self): b = Bravo.objects.create() c = Charlie.objects.create(bravo=b) qs = Charlie.objects.select_related('alfa').annotate(Count('bravo__charlie')) self.assertSequenceEqual(qs, [c]) self.assertIs(qs[0].alfa, None) a = Alfa.objects.create() c.alfa = a c.save() # Force re-evaluation qs = qs.all() self.assertSequenceEqual(qs, [c]) self.assertEqual(qs[0].alfa, a) def test_existing_join_not_promoted(self): # No promotion for existing joins qs = Charlie.objects.filter(alfa__name__isnull=False).annotate(Count('alfa__name')) self.assertIn(' INNER JOIN ', str(qs.query)) # Also, the existing join is unpromoted when doing filtering for already # promoted join. qs = Charlie.objects.annotate(Count('alfa__name')).filter(alfa__name__isnull=False) self.assertIn(' INNER JOIN ', str(qs.query)) # But, as the join is nullable first use by annotate will be LOUTER qs = Charlie.objects.annotate(Count('alfa__name')) self.assertIn(' LEFT OUTER JOIN ', str(qs.query)) def test_non_nullable_fk_not_promoted(self): qs = Book.objects.annotate(Count('contact__name')) self.assertIn(' INNER JOIN ', str(qs.query)) class SelfReferentialFKTests(TestCase): def test_ticket_24748(self): t1 = SelfRefFK.objects.create(name='t1') SelfRefFK.objects.create(name='t2', parent=t1) SelfRefFK.objects.create(name='t3', parent=t1) self.assertQuerysetEqual( SelfRefFK.objects.annotate(num_children=Count('children')).order_by('name'), [('t1', 2), ('t2', 0), ('t3', 0)], lambda x: (x.name, x.num_children) )
1fc7e097bacab4ef5a7144dbe9ac24ff77182e24d61fe81d615b3691b70f165e
import datetime import json from decimal import Decimal from django import forms from django.core import exceptions, serializers from django.db.models import DateField, DateTimeField, F, Func, Value from django.http import QueryDict from django.test import override_settings from django.test.utils import isolate_apps from django.utils import timezone from . import PostgreSQLSimpleTestCase, PostgreSQLTestCase from .models import ( BigAutoFieldModel, PostgreSQLModel, RangeLookupsModel, RangesModel, SmallAutoFieldModel, ) try: from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange from django.contrib.postgres import fields as pg_fields, forms as pg_forms from django.contrib.postgres.validators import ( RangeMaxValueValidator, RangeMinValueValidator, ) except ImportError: pass @isolate_apps('postgres_tests') class BasicTests(PostgreSQLSimpleTestCase): def test_get_field_display(self): class Model(PostgreSQLModel): field = pg_fields.IntegerRangeField( choices=[ ['1-50', [((1, 25), '1-25'), ([26, 50], '26-50')]], ((51, 100), '51-100'), ], ) tests = ( ((1, 25), '1-25'), ([26, 50], '26-50'), ((51, 100), '51-100'), ((1, 2), '(1, 2)'), ([1, 2], '[1, 2]'), ) for value, display in tests: with self.subTest(value=value, display=display): instance = Model(field=value) self.assertEqual(instance.get_field_display(), display) class TestSaveLoad(PostgreSQLTestCase): def test_all_fields(self): now = timezone.now() instance = RangesModel( ints=NumericRange(0, 10), bigints=NumericRange(10, 20), decimals=NumericRange(20, 30), timestamps=DateTimeTZRange(now - datetime.timedelta(hours=1), now), dates=DateRange(now.date() - datetime.timedelta(days=1), now.date()), ) instance.save() loaded = RangesModel.objects.get() self.assertEqual(instance.ints, loaded.ints) self.assertEqual(instance.bigints, loaded.bigints) self.assertEqual(instance.decimals, loaded.decimals) self.assertEqual(instance.timestamps, loaded.timestamps) self.assertEqual(instance.dates, loaded.dates) def test_range_object(self): r = NumericRange(0, 10) instance = RangesModel(ints=r) instance.save() loaded = RangesModel.objects.get() self.assertEqual(r, loaded.ints) def test_tuple(self): instance = RangesModel(ints=(0, 10)) instance.save() loaded = RangesModel.objects.get() self.assertEqual(NumericRange(0, 10), loaded.ints) def test_range_object_boundaries(self): r = NumericRange(0, 10, '[]') instance = RangesModel(decimals=r) instance.save() loaded = RangesModel.objects.get() self.assertEqual(r, loaded.decimals) self.assertIn(10, loaded.decimals) def test_unbounded(self): r = NumericRange(None, None, '()') instance = RangesModel(decimals=r) instance.save() loaded = RangesModel.objects.get() self.assertEqual(r, loaded.decimals) def test_empty(self): r = NumericRange(empty=True) instance = RangesModel(ints=r) instance.save() loaded = RangesModel.objects.get() self.assertEqual(r, loaded.ints) def test_null(self): instance = RangesModel(ints=None) instance.save() loaded = RangesModel.objects.get() self.assertIsNone(loaded.ints) def test_model_set_on_base_field(self): instance = RangesModel() field = instance._meta.get_field('ints') self.assertEqual(field.model, RangesModel) self.assertEqual(field.base_field.model, RangesModel) class TestRangeContainsLookup(PostgreSQLTestCase): @classmethod def setUpTestData(cls): cls.timestamps = [ datetime.datetime(year=2016, month=1, day=1), datetime.datetime(year=2016, month=1, day=2, hour=1), datetime.datetime(year=2016, month=1, day=2, hour=12), datetime.datetime(year=2016, month=1, day=3), datetime.datetime(year=2016, month=1, day=3, hour=1), datetime.datetime(year=2016, month=2, day=2), ] cls.aware_timestamps = [ timezone.make_aware(timestamp) for timestamp in cls.timestamps ] cls.dates = [ datetime.date(year=2016, month=1, day=1), datetime.date(year=2016, month=1, day=2), datetime.date(year=2016, month=1, day=3), datetime.date(year=2016, month=1, day=4), datetime.date(year=2016, month=2, day=2), datetime.date(year=2016, month=2, day=3), ] cls.obj = RangesModel.objects.create( dates=(cls.dates[0], cls.dates[3]), dates_inner=(cls.dates[1], cls.dates[2]), timestamps=(cls.timestamps[0], cls.timestamps[3]), timestamps_inner=(cls.timestamps[1], cls.timestamps[2]), ) cls.aware_obj = RangesModel.objects.create( dates=(cls.dates[0], cls.dates[3]), dates_inner=(cls.dates[1], cls.dates[2]), timestamps=(cls.aware_timestamps[0], cls.aware_timestamps[3]), timestamps_inner=(cls.timestamps[1], cls.timestamps[2]), ) # Objects that don't match any queries. for i in range(3, 4): RangesModel.objects.create( dates=(cls.dates[i], cls.dates[i + 1]), timestamps=(cls.timestamps[i], cls.timestamps[i + 1]), ) RangesModel.objects.create( dates=(cls.dates[i], cls.dates[i + 1]), timestamps=(cls.aware_timestamps[i], cls.aware_timestamps[i + 1]), ) def test_datetime_range_contains(self): filter_args = ( self.timestamps[1], self.aware_timestamps[1], (self.timestamps[1], self.timestamps[2]), (self.aware_timestamps[1], self.aware_timestamps[2]), Value(self.dates[0]), Func(F('dates'), function='lower', output_field=DateTimeField()), F('timestamps_inner'), ) for filter_arg in filter_args: with self.subTest(filter_arg=filter_arg): self.assertCountEqual( RangesModel.objects.filter(**{'timestamps__contains': filter_arg}), [self.obj, self.aware_obj], ) def test_date_range_contains(self): filter_args = ( self.timestamps[1], (self.dates[1], self.dates[2]), Value(self.dates[0], output_field=DateField()), Func(F('timestamps'), function='lower', output_field=DateField()), F('dates_inner'), ) for filter_arg in filter_args: with self.subTest(filter_arg=filter_arg): self.assertCountEqual( RangesModel.objects.filter(**{'dates__contains': filter_arg}), [self.obj, self.aware_obj], ) class TestQuerying(PostgreSQLTestCase): @classmethod def setUpTestData(cls): cls.objs = RangesModel.objects.bulk_create([ RangesModel(ints=NumericRange(0, 10)), RangesModel(ints=NumericRange(5, 15)), RangesModel(ints=NumericRange(None, 0)), RangesModel(ints=NumericRange(empty=True)), RangesModel(ints=None), ]) def test_exact(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__exact=NumericRange(0, 10)), [self.objs[0]], ) def test_isnull(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__isnull=True), [self.objs[4]], ) def test_isempty(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__isempty=True), [self.objs[3]], ) def test_contains(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__contains=8), [self.objs[0], self.objs[1]], ) def test_contains_range(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__contains=NumericRange(3, 8)), [self.objs[0]], ) def test_contained_by(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__contained_by=NumericRange(0, 20)), [self.objs[0], self.objs[1], self.objs[3]], ) def test_overlap(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__overlap=NumericRange(3, 8)), [self.objs[0], self.objs[1]], ) def test_fully_lt(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__fully_lt=NumericRange(5, 10)), [self.objs[2]], ) def test_fully_gt(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__fully_gt=NumericRange(5, 10)), [], ) def test_not_lt(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__not_lt=NumericRange(5, 10)), [self.objs[1]], ) def test_not_gt(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__not_gt=NumericRange(5, 10)), [self.objs[0], self.objs[2]], ) def test_adjacent_to(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__adjacent_to=NumericRange(0, 5)), [self.objs[1], self.objs[2]], ) def test_startswith(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__startswith=0), [self.objs[0]], ) def test_endswith(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__endswith=0), [self.objs[2]], ) def test_startswith_chaining(self): self.assertSequenceEqual( RangesModel.objects.filter(ints__startswith__gte=0), [self.objs[0], self.objs[1]], ) def test_bound_type(self): decimals = RangesModel.objects.bulk_create([ RangesModel(decimals=NumericRange(None, 10)), RangesModel(decimals=NumericRange(10, None)), RangesModel(decimals=NumericRange(5, 15)), RangesModel(decimals=NumericRange(5, 15, '(]')), ]) tests = [ ('lower_inc', True, [decimals[1], decimals[2]]), ('lower_inc', False, [decimals[0], decimals[3]]), ('lower_inf', True, [decimals[0]]), ('lower_inf', False, [decimals[1], decimals[2], decimals[3]]), ('upper_inc', True, [decimals[3]]), ('upper_inc', False, [decimals[0], decimals[1], decimals[2]]), ('upper_inf', True, [decimals[1]]), ('upper_inf', False, [decimals[0], decimals[2], decimals[3]]), ] for lookup, filter_arg, excepted_result in tests: with self.subTest(lookup=lookup, filter_arg=filter_arg): self.assertSequenceEqual( RangesModel.objects.filter(**{'decimals__%s' % lookup: filter_arg}), excepted_result, ) class TestQueryingWithRanges(PostgreSQLTestCase): def test_date_range(self): objs = [ RangeLookupsModel.objects.create(date='2015-01-01'), RangeLookupsModel.objects.create(date='2015-05-05'), ] self.assertSequenceEqual( RangeLookupsModel.objects.filter(date__contained_by=DateRange('2015-01-01', '2015-05-04')), [objs[0]], ) def test_date_range_datetime_field(self): objs = [ RangeLookupsModel.objects.create(timestamp='2015-01-01'), RangeLookupsModel.objects.create(timestamp='2015-05-05'), ] self.assertSequenceEqual( RangeLookupsModel.objects.filter(timestamp__date__contained_by=DateRange('2015-01-01', '2015-05-04')), [objs[0]], ) def test_datetime_range(self): objs = [ RangeLookupsModel.objects.create(timestamp='2015-01-01T09:00:00'), RangeLookupsModel.objects.create(timestamp='2015-05-05T17:00:00'), ] self.assertSequenceEqual( RangeLookupsModel.objects.filter( timestamp__contained_by=DateTimeTZRange('2015-01-01T09:00', '2015-05-04T23:55') ), [objs[0]], ) def test_small_integer_field_contained_by(self): objs = [ RangeLookupsModel.objects.create(small_integer=8), RangeLookupsModel.objects.create(small_integer=4), RangeLookupsModel.objects.create(small_integer=-1), ] self.assertSequenceEqual( RangeLookupsModel.objects.filter(small_integer__contained_by=NumericRange(4, 6)), [objs[1]], ) def test_integer_range(self): objs = [ RangeLookupsModel.objects.create(integer=5), RangeLookupsModel.objects.create(integer=99), RangeLookupsModel.objects.create(integer=-1), ] self.assertSequenceEqual( RangeLookupsModel.objects.filter(integer__contained_by=NumericRange(1, 98)), [objs[0]] ) def test_biginteger_range(self): objs = [ RangeLookupsModel.objects.create(big_integer=5), RangeLookupsModel.objects.create(big_integer=99), RangeLookupsModel.objects.create(big_integer=-1), ] self.assertSequenceEqual( RangeLookupsModel.objects.filter(big_integer__contained_by=NumericRange(1, 98)), [objs[0]] ) def test_decimal_field_contained_by(self): objs = [ RangeLookupsModel.objects.create(decimal_field=Decimal('1.33')), RangeLookupsModel.objects.create(decimal_field=Decimal('2.88')), RangeLookupsModel.objects.create(decimal_field=Decimal('99.17')), ] self.assertSequenceEqual( RangeLookupsModel.objects.filter( decimal_field__contained_by=NumericRange(Decimal('1.89'), Decimal('7.91')), ), [objs[1]], ) def test_float_range(self): objs = [ RangeLookupsModel.objects.create(float=5), RangeLookupsModel.objects.create(float=99), RangeLookupsModel.objects.create(float=-1), ] self.assertSequenceEqual( RangeLookupsModel.objects.filter(float__contained_by=NumericRange(1, 98)), [objs[0]] ) def test_small_auto_field_contained_by(self): objs = SmallAutoFieldModel.objects.bulk_create([ SmallAutoFieldModel() for i in range(1, 5) ]) self.assertSequenceEqual( SmallAutoFieldModel.objects.filter( id__contained_by=NumericRange(objs[1].pk, objs[3].pk), ), objs[1:3], ) def test_auto_field_contained_by(self): objs = RangeLookupsModel.objects.bulk_create([ RangeLookupsModel() for i in range(1, 5) ]) self.assertSequenceEqual( RangeLookupsModel.objects.filter( id__contained_by=NumericRange(objs[1].pk, objs[3].pk), ), objs[1:3], ) def test_big_auto_field_contained_by(self): objs = BigAutoFieldModel.objects.bulk_create([ BigAutoFieldModel() for i in range(1, 5) ]) self.assertSequenceEqual( BigAutoFieldModel.objects.filter( id__contained_by=NumericRange(objs[1].pk, objs[3].pk), ), objs[1:3], ) def test_f_ranges(self): parent = RangesModel.objects.create(decimals=NumericRange(0, 10)) objs = [ RangeLookupsModel.objects.create(float=5, parent=parent), RangeLookupsModel.objects.create(float=99, parent=parent), ] self.assertSequenceEqual( RangeLookupsModel.objects.filter(float__contained_by=F('parent__decimals')), [objs[0]] ) def test_exclude(self): objs = [ RangeLookupsModel.objects.create(float=5), RangeLookupsModel.objects.create(float=99), RangeLookupsModel.objects.create(float=-1), ] self.assertSequenceEqual( RangeLookupsModel.objects.exclude(float__contained_by=NumericRange(0, 100)), [objs[2]] ) class TestSerialization(PostgreSQLSimpleTestCase): test_data = ( '[{"fields": {"ints": "{\\"upper\\": \\"10\\", \\"lower\\": \\"0\\", ' '\\"bounds\\": \\"[)\\"}", "decimals": "{\\"empty\\": true}", ' '"bigints": null, "timestamps": "{\\"upper\\": \\"2014-02-02T12:12:12+00:00\\", ' '\\"lower\\": \\"2014-01-01T00:00:00+00:00\\", \\"bounds\\": \\"[)\\"}", ' '"timestamps_inner": null, ' '"dates": "{\\"upper\\": \\"2014-02-02\\", \\"lower\\": \\"2014-01-01\\", \\"bounds\\": \\"[)\\"}", ' '"dates_inner": null }, ' '"model": "postgres_tests.rangesmodel", "pk": null}]' ) lower_date = datetime.date(2014, 1, 1) upper_date = datetime.date(2014, 2, 2) lower_dt = datetime.datetime(2014, 1, 1, 0, 0, 0, tzinfo=timezone.utc) upper_dt = datetime.datetime(2014, 2, 2, 12, 12, 12, tzinfo=timezone.utc) def test_dumping(self): instance = RangesModel( ints=NumericRange(0, 10), decimals=NumericRange(empty=True), timestamps=DateTimeTZRange(self.lower_dt, self.upper_dt), dates=DateRange(self.lower_date, self.upper_date), ) data = serializers.serialize('json', [instance]) dumped = json.loads(data) for field in ('ints', 'dates', 'timestamps'): dumped[0]['fields'][field] = json.loads(dumped[0]['fields'][field]) check = json.loads(self.test_data) for field in ('ints', 'dates', 'timestamps'): check[0]['fields'][field] = json.loads(check[0]['fields'][field]) self.assertEqual(dumped, check) def test_loading(self): instance = list(serializers.deserialize('json', self.test_data))[0].object self.assertEqual(instance.ints, NumericRange(0, 10)) self.assertEqual(instance.decimals, NumericRange(empty=True)) self.assertIsNone(instance.bigints) self.assertEqual(instance.dates, DateRange(self.lower_date, self.upper_date)) self.assertEqual(instance.timestamps, DateTimeTZRange(self.lower_dt, self.upper_dt)) def test_serialize_range_with_null(self): instance = RangesModel(ints=NumericRange(None, 10)) data = serializers.serialize('json', [instance]) new_instance = list(serializers.deserialize('json', data))[0].object self.assertEqual(new_instance.ints, NumericRange(None, 10)) instance = RangesModel(ints=NumericRange(10, None)) data = serializers.serialize('json', [instance]) new_instance = list(serializers.deserialize('json', data))[0].object self.assertEqual(new_instance.ints, NumericRange(10, None)) class TestChecks(PostgreSQLSimpleTestCase): def test_choices_tuple_list(self): class Model(PostgreSQLModel): field = pg_fields.IntegerRangeField( choices=[ ['1-50', [((1, 25), '1-25'), ([26, 50], '26-50')]], ((51, 100), '51-100'), ], ) self.assertEqual(Model._meta.get_field('field').check(), []) class TestValidators(PostgreSQLSimpleTestCase): def test_max(self): validator = RangeMaxValueValidator(5) validator(NumericRange(0, 5)) msg = 'Ensure that this range is completely less than or equal to 5.' with self.assertRaises(exceptions.ValidationError) as cm: validator(NumericRange(0, 10)) self.assertEqual(cm.exception.messages[0], msg) self.assertEqual(cm.exception.code, 'max_value') with self.assertRaisesMessage(exceptions.ValidationError, msg): validator(NumericRange(0, None)) # an unbound range def test_min(self): validator = RangeMinValueValidator(5) validator(NumericRange(10, 15)) msg = 'Ensure that this range is completely greater than or equal to 5.' with self.assertRaises(exceptions.ValidationError) as cm: validator(NumericRange(0, 10)) self.assertEqual(cm.exception.messages[0], msg) self.assertEqual(cm.exception.code, 'min_value') with self.assertRaisesMessage(exceptions.ValidationError, msg): validator(NumericRange(None, 10)) # an unbound range class TestFormField(PostgreSQLSimpleTestCase): def test_valid_integer(self): field = pg_forms.IntegerRangeField() value = field.clean(['1', '2']) self.assertEqual(value, NumericRange(1, 2)) def test_valid_decimal(self): field = pg_forms.DecimalRangeField() value = field.clean(['1.12345', '2.001']) self.assertEqual(value, NumericRange(Decimal('1.12345'), Decimal('2.001'))) def test_valid_timestamps(self): field = pg_forms.DateTimeRangeField() value = field.clean(['01/01/2014 00:00:00', '02/02/2014 12:12:12']) lower = datetime.datetime(2014, 1, 1, 0, 0, 0) upper = datetime.datetime(2014, 2, 2, 12, 12, 12) self.assertEqual(value, DateTimeTZRange(lower, upper)) def test_valid_dates(self): field = pg_forms.DateRangeField() value = field.clean(['01/01/2014', '02/02/2014']) lower = datetime.date(2014, 1, 1) upper = datetime.date(2014, 2, 2) self.assertEqual(value, DateRange(lower, upper)) def test_using_split_datetime_widget(self): class SplitDateTimeRangeField(pg_forms.DateTimeRangeField): base_field = forms.SplitDateTimeField class SplitForm(forms.Form): field = SplitDateTimeRangeField() form = SplitForm() self.assertHTMLEqual(str(form), ''' <tr> <th> <label for="id_field_0">Field:</label> </th> <td> <input id="id_field_0_0" name="field_0_0" type="text"> <input id="id_field_0_1" name="field_0_1" type="text"> <input id="id_field_1_0" name="field_1_0" type="text"> <input id="id_field_1_1" name="field_1_1" type="text"> </td> </tr> ''') form = SplitForm({ 'field_0_0': '01/01/2014', 'field_0_1': '00:00:00', 'field_1_0': '02/02/2014', 'field_1_1': '12:12:12', }) self.assertTrue(form.is_valid()) lower = datetime.datetime(2014, 1, 1, 0, 0, 0) upper = datetime.datetime(2014, 2, 2, 12, 12, 12) self.assertEqual(form.cleaned_data['field'], DateTimeTZRange(lower, upper)) def test_none(self): field = pg_forms.IntegerRangeField(required=False) value = field.clean(['', '']) self.assertIsNone(value) def test_datetime_form_as_table(self): class DateTimeRangeForm(forms.Form): datetime_field = pg_forms.DateTimeRangeField(show_hidden_initial=True) form = DateTimeRangeForm() self.assertHTMLEqual( form.as_table(), """ <tr><th> <label for="id_datetime_field_0">Datetime field:</label> </th><td> <input type="text" name="datetime_field_0" id="id_datetime_field_0"> <input type="text" name="datetime_field_1" id="id_datetime_field_1"> <input type="hidden" name="initial-datetime_field_0" id="initial-id_datetime_field_0"> <input type="hidden" name="initial-datetime_field_1" id="initial-id_datetime_field_1"> </td></tr> """ ) form = DateTimeRangeForm({ 'datetime_field_0': '2010-01-01 11:13:00', 'datetime_field_1': '2020-12-12 16:59:00', }) self.assertHTMLEqual( form.as_table(), """ <tr><th> <label for="id_datetime_field_0">Datetime field:</label> </th><td> <input type="text" name="datetime_field_0" value="2010-01-01 11:13:00" id="id_datetime_field_0"> <input type="text" name="datetime_field_1" value="2020-12-12 16:59:00" id="id_datetime_field_1"> <input type="hidden" name="initial-datetime_field_0" value="2010-01-01 11:13:00" id="initial-id_datetime_field_0"> <input type="hidden" name="initial-datetime_field_1" value="2020-12-12 16:59:00" id="initial-id_datetime_field_1"></td></tr> """ ) def test_datetime_form_initial_data(self): class DateTimeRangeForm(forms.Form): datetime_field = pg_forms.DateTimeRangeField(show_hidden_initial=True) data = QueryDict(mutable=True) data.update({ 'datetime_field_0': '2010-01-01 11:13:00', 'datetime_field_1': '', 'initial-datetime_field_0': '2010-01-01 10:12:00', 'initial-datetime_field_1': '', }) form = DateTimeRangeForm(data=data) self.assertTrue(form.has_changed()) data['initial-datetime_field_0'] = '2010-01-01 11:13:00' form = DateTimeRangeForm(data=data) self.assertFalse(form.has_changed()) def test_rendering(self): class RangeForm(forms.Form): ints = pg_forms.IntegerRangeField() self.assertHTMLEqual(str(RangeForm()), ''' <tr> <th><label for="id_ints_0">Ints:</label></th> <td> <input id="id_ints_0" name="ints_0" type="number"> <input id="id_ints_1" name="ints_1" type="number"> </td> </tr> ''') def test_integer_lower_bound_higher(self): field = pg_forms.IntegerRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['10', '2']) self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.') self.assertEqual(cm.exception.code, 'bound_ordering') def test_integer_open(self): field = pg_forms.IntegerRangeField() value = field.clean(['', '0']) self.assertEqual(value, NumericRange(None, 0)) def test_integer_incorrect_data_type(self): field = pg_forms.IntegerRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean('1') self.assertEqual(cm.exception.messages[0], 'Enter two whole numbers.') self.assertEqual(cm.exception.code, 'invalid') def test_integer_invalid_lower(self): field = pg_forms.IntegerRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['a', '2']) self.assertEqual(cm.exception.messages[0], 'Enter a whole number.') def test_integer_invalid_upper(self): field = pg_forms.IntegerRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['1', 'b']) self.assertEqual(cm.exception.messages[0], 'Enter a whole number.') def test_integer_required(self): field = pg_forms.IntegerRangeField(required=True) with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['', '']) self.assertEqual(cm.exception.messages[0], 'This field is required.') value = field.clean([1, '']) self.assertEqual(value, NumericRange(1, None)) def test_decimal_lower_bound_higher(self): field = pg_forms.DecimalRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['1.8', '1.6']) self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.') self.assertEqual(cm.exception.code, 'bound_ordering') def test_decimal_open(self): field = pg_forms.DecimalRangeField() value = field.clean(['', '3.1415926']) self.assertEqual(value, NumericRange(None, Decimal('3.1415926'))) def test_decimal_incorrect_data_type(self): field = pg_forms.DecimalRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean('1.6') self.assertEqual(cm.exception.messages[0], 'Enter two numbers.') self.assertEqual(cm.exception.code, 'invalid') def test_decimal_invalid_lower(self): field = pg_forms.DecimalRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['a', '3.1415926']) self.assertEqual(cm.exception.messages[0], 'Enter a number.') def test_decimal_invalid_upper(self): field = pg_forms.DecimalRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['1.61803399', 'b']) self.assertEqual(cm.exception.messages[0], 'Enter a number.') def test_decimal_required(self): field = pg_forms.DecimalRangeField(required=True) with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['', '']) self.assertEqual(cm.exception.messages[0], 'This field is required.') value = field.clean(['1.61803399', '']) self.assertEqual(value, NumericRange(Decimal('1.61803399'), None)) def test_date_lower_bound_higher(self): field = pg_forms.DateRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['2013-04-09', '1976-04-16']) self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.') self.assertEqual(cm.exception.code, 'bound_ordering') def test_date_open(self): field = pg_forms.DateRangeField() value = field.clean(['', '2013-04-09']) self.assertEqual(value, DateRange(None, datetime.date(2013, 4, 9))) def test_date_incorrect_data_type(self): field = pg_forms.DateRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean('1') self.assertEqual(cm.exception.messages[0], 'Enter two valid dates.') self.assertEqual(cm.exception.code, 'invalid') def test_date_invalid_lower(self): field = pg_forms.DateRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['a', '2013-04-09']) self.assertEqual(cm.exception.messages[0], 'Enter a valid date.') def test_date_invalid_upper(self): field = pg_forms.DateRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['2013-04-09', 'b']) self.assertEqual(cm.exception.messages[0], 'Enter a valid date.') def test_date_required(self): field = pg_forms.DateRangeField(required=True) with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['', '']) self.assertEqual(cm.exception.messages[0], 'This field is required.') value = field.clean(['1976-04-16', '']) self.assertEqual(value, DateRange(datetime.date(1976, 4, 16), None)) def test_date_has_changed_first(self): self.assertTrue(pg_forms.DateRangeField().has_changed( ['2010-01-01', '2020-12-12'], ['2010-01-31', '2020-12-12'], )) def test_date_has_changed_last(self): self.assertTrue(pg_forms.DateRangeField().has_changed( ['2010-01-01', '2020-12-12'], ['2010-01-01', '2020-12-31'], )) def test_datetime_lower_bound_higher(self): field = pg_forms.DateTimeRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['2006-10-25 14:59', '2006-10-25 14:58']) self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.') self.assertEqual(cm.exception.code, 'bound_ordering') def test_datetime_open(self): field = pg_forms.DateTimeRangeField() value = field.clean(['', '2013-04-09 11:45']) self.assertEqual(value, DateTimeTZRange(None, datetime.datetime(2013, 4, 9, 11, 45))) def test_datetime_incorrect_data_type(self): field = pg_forms.DateTimeRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean('2013-04-09 11:45') self.assertEqual(cm.exception.messages[0], 'Enter two valid date/times.') self.assertEqual(cm.exception.code, 'invalid') def test_datetime_invalid_lower(self): field = pg_forms.DateTimeRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['45', '2013-04-09 11:45']) self.assertEqual(cm.exception.messages[0], 'Enter a valid date/time.') def test_datetime_invalid_upper(self): field = pg_forms.DateTimeRangeField() with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['2013-04-09 11:45', 'sweet pickles']) self.assertEqual(cm.exception.messages[0], 'Enter a valid date/time.') def test_datetime_required(self): field = pg_forms.DateTimeRangeField(required=True) with self.assertRaises(exceptions.ValidationError) as cm: field.clean(['', '']) self.assertEqual(cm.exception.messages[0], 'This field is required.') value = field.clean(['2013-04-09 11:45', '']) self.assertEqual(value, DateTimeTZRange(datetime.datetime(2013, 4, 9, 11, 45), None)) @override_settings(USE_TZ=True, TIME_ZONE='Africa/Johannesburg') def test_datetime_prepare_value(self): field = pg_forms.DateTimeRangeField() value = field.prepare_value( DateTimeTZRange(datetime.datetime(2015, 5, 22, 16, 6, 33, tzinfo=timezone.utc), None) ) self.assertEqual(value, [datetime.datetime(2015, 5, 22, 18, 6, 33), None]) def test_datetime_has_changed_first(self): self.assertTrue(pg_forms.DateTimeRangeField().has_changed( ['2010-01-01 00:00', '2020-12-12 00:00'], ['2010-01-31 23:00', '2020-12-12 00:00'], )) def test_datetime_has_changed_last(self): self.assertTrue(pg_forms.DateTimeRangeField().has_changed( ['2010-01-01 00:00', '2020-12-12 00:00'], ['2010-01-01 00:00', '2020-12-31 23:00'], )) def test_model_field_formfield_integer(self): model_field = pg_fields.IntegerRangeField() form_field = model_field.formfield() self.assertIsInstance(form_field, pg_forms.IntegerRangeField) def test_model_field_formfield_biginteger(self): model_field = pg_fields.BigIntegerRangeField() form_field = model_field.formfield() self.assertIsInstance(form_field, pg_forms.IntegerRangeField) def test_model_field_formfield_float(self): model_field = pg_fields.DecimalRangeField() form_field = model_field.formfield() self.assertIsInstance(form_field, pg_forms.DecimalRangeField) def test_model_field_formfield_date(self): model_field = pg_fields.DateRangeField() form_field = model_field.formfield() self.assertIsInstance(form_field, pg_forms.DateRangeField) def test_model_field_formfield_datetime(self): model_field = pg_fields.DateTimeRangeField() form_field = model_field.formfield() self.assertIsInstance(form_field, pg_forms.DateTimeRangeField) def test_has_changed(self): for field, value in ( (pg_forms.DateRangeField(), ['2010-01-01', '2020-12-12']), (pg_forms.DateTimeRangeField(), ['2010-01-01 11:13', '2020-12-12 14:52']), (pg_forms.IntegerRangeField(), [1, 2]), (pg_forms.DecimalRangeField(), ['1.12345', '2.001']), ): with self.subTest(field=field.__class__.__name__): self.assertTrue(field.has_changed(None, value)) self.assertTrue(field.has_changed([value[0], ''], value)) self.assertTrue(field.has_changed(['', value[1]], value)) self.assertFalse(field.has_changed(value, value)) class TestWidget(PostgreSQLSimpleTestCase): def test_range_widget(self): f = pg_forms.ranges.DateTimeRangeField() self.assertHTMLEqual( f.widget.render('datetimerange', ''), '<input type="text" name="datetimerange_0"><input type="text" name="datetimerange_1">' ) self.assertHTMLEqual( f.widget.render('datetimerange', None), '<input type="text" name="datetimerange_0"><input type="text" name="datetimerange_1">' ) dt_range = DateTimeTZRange( datetime.datetime(2006, 1, 10, 7, 30), datetime.datetime(2006, 2, 12, 9, 50) ) self.assertHTMLEqual( f.widget.render('datetimerange', dt_range), '<input type="text" name="datetimerange_0" value="2006-01-10 07:30:00">' '<input type="text" name="datetimerange_1" value="2006-02-12 09:50:00">' )
fa4e3a98ab381589123821a3964e82c7bac9689ab6337defc2818f4d31a8ef10
import datetime from decimal import Decimal from unittest import skipIf from django.core.exceptions import FieldDoesNotExist, FieldError from django.db import connection from django.db.models import ( BooleanField, Case, Count, DateTimeField, Exists, ExpressionWrapper, F, FloatField, Func, IntegerField, Max, NullBooleanField, OuterRef, Q, Subquery, Sum, Value, When, ) from django.db.models.expressions import RawSQL from django.db.models.functions import Length, Lower from django.test import TestCase, skipUnlessDBFeature from .models import ( Author, Book, Company, DepartmentStore, Employee, Publisher, Store, Ticket, ) def cxOracle_py3_bug(func): """ There's a bug in Django/cx_Oracle with respect to string handling under Python 3 (essentially, they treat Python 3 strings as Python 2 strings rather than unicode). This makes some tests here fail under Python 3, so we mark them as expected failures until someone fixes them in #23843. """ from unittest import expectedFailure from django.db import connection return expectedFailure(func) if connection.vendor == 'oracle' else func class NonAggregateAnnotationTestCase(TestCase): @classmethod def setUpTestData(cls): cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34) cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35) cls.a3 = Author.objects.create(name='Brad Dayley', age=45) cls.a4 = Author.objects.create(name='James Bennett', age=29) cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37) cls.a6 = Author.objects.create(name='Paul Bissex', age=29) cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25) cls.a8 = Author.objects.create(name='Peter Norvig', age=57) cls.a9 = Author.objects.create(name='Stuart Russell', age=46) cls.a1.friends.add(cls.a2, cls.a4) cls.a2.friends.add(cls.a1, cls.a7) cls.a4.friends.add(cls.a1) cls.a5.friends.add(cls.a6, cls.a7) cls.a6.friends.add(cls.a5, cls.a7) cls.a7.friends.add(cls.a2, cls.a5, cls.a6) cls.a8.friends.add(cls.a9) cls.a9.friends.add(cls.a8) cls.p1 = Publisher.objects.create(name='Apress', num_awards=3) cls.p2 = Publisher.objects.create(name='Sams', num_awards=1) cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7) cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9) cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0) cls.b1 = Book.objects.create( isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right', pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1, pubdate=datetime.date(2007, 12, 6) ) cls.b2 = Book.objects.create( isbn='067232959', name='Sams Teach Yourself Django in 24 Hours', pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2, pubdate=datetime.date(2008, 3, 3) ) cls.b3 = Book.objects.create( isbn='159059996', name='Practical Django Projects', pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1, pubdate=datetime.date(2008, 6, 23) ) cls.b4 = Book.objects.create( isbn='013235613', name='Python Web Development with Django', pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3, pubdate=datetime.date(2008, 11, 3) ) cls.b5 = Book.objects.create( isbn='013790395', name='Artificial Intelligence: A Modern Approach', pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3, pubdate=datetime.date(1995, 1, 15) ) cls.b6 = Book.objects.create( isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4, pubdate=datetime.date(1991, 10, 15) ) cls.b1.authors.add(cls.a1, cls.a2) cls.b2.authors.add(cls.a3) cls.b3.authors.add(cls.a4) cls.b4.authors.add(cls.a5, cls.a6, cls.a7) cls.b5.authors.add(cls.a8, cls.a9) cls.b6.authors.add(cls.a8) s1 = Store.objects.create( name='Amazon.com', original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42), friday_night_closing=datetime.time(23, 59, 59) ) s2 = Store.objects.create( name='Books.com', original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37), friday_night_closing=datetime.time(23, 59, 59) ) s3 = Store.objects.create( name="Mamma and Pappa's Books", original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14), friday_night_closing=datetime.time(21, 30) ) s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6) s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6) s3.books.add(cls.b3, cls.b4, cls.b6) def test_basic_annotation(self): books = Book.objects.annotate(is_book=Value(1)) for book in books: self.assertEqual(book.is_book, 1) def test_basic_f_annotation(self): books = Book.objects.annotate(another_rating=F('rating')) for book in books: self.assertEqual(book.another_rating, book.rating) def test_joined_annotation(self): books = Book.objects.select_related('publisher').annotate( num_awards=F('publisher__num_awards')) for book in books: self.assertEqual(book.num_awards, book.publisher.num_awards) def test_mixed_type_annotation_date_interval(self): active = datetime.datetime(2015, 3, 20, 14, 0, 0) duration = datetime.timedelta(hours=1) expires = datetime.datetime(2015, 3, 20, 14, 0, 0) + duration Ticket.objects.create(active_at=active, duration=duration) t = Ticket.objects.annotate( expires=ExpressionWrapper(F('active_at') + F('duration'), output_field=DateTimeField()) ).first() self.assertEqual(t.expires, expires) def test_mixed_type_annotation_numbers(self): test = self.b1 b = Book.objects.annotate( combined=ExpressionWrapper(F('pages') + F('rating'), output_field=IntegerField()) ).get(isbn=test.isbn) combined = int(test.pages + test.rating) self.assertEqual(b.combined, combined) def test_empty_expression_annotation(self): books = Book.objects.annotate( selected=ExpressionWrapper(Q(pk__in=[]), output_field=BooleanField()) ) self.assertEqual(len(books), Book.objects.count()) self.assertTrue(all(not book.selected for book in books)) books = Book.objects.annotate( selected=ExpressionWrapper(Q(pk__in=Book.objects.none()), output_field=BooleanField()) ) self.assertEqual(len(books), Book.objects.count()) self.assertTrue(all(not book.selected for book in books)) def test_annotate_with_aggregation(self): books = Book.objects.annotate(is_book=Value(1), rating_count=Count('rating')) for book in books: self.assertEqual(book.is_book, 1) self.assertEqual(book.rating_count, 1) def test_combined_expression_annotation_with_aggregation(self): book = Book.objects.annotate( combined=ExpressionWrapper(Value(3) * Value(4), output_field=IntegerField()), rating_count=Count('rating'), ).first() self.assertEqual(book.combined, 12) self.assertEqual(book.rating_count, 1) def test_combined_f_expression_annotation_with_aggregation(self): book = Book.objects.filter(isbn='159059725').annotate( combined=ExpressionWrapper(F('price') * F('pages'), output_field=FloatField()), rating_count=Count('rating'), ).first() self.assertEqual(book.combined, 13410.0) self.assertEqual(book.rating_count, 1) def test_aggregate_over_annotation(self): agg = Author.objects.annotate(other_age=F('age')).aggregate(otherage_sum=Sum('other_age')) other_agg = Author.objects.aggregate(age_sum=Sum('age')) self.assertEqual(agg['otherage_sum'], other_agg['age_sum']) @skipUnlessDBFeature('can_distinct_on_fields') def test_distinct_on_with_annotation(self): store = Store.objects.create( name='test store', original_opening=datetime.datetime.now(), friday_night_closing=datetime.time(21, 00, 00), ) names = [ 'Theodore Roosevelt', 'Eleanor Roosevelt', 'Franklin Roosevelt', 'Ned Stark', 'Catelyn Stark', ] for name in names: Employee.objects.create( store=store, first_name=name.split()[0], last_name=name.split()[1], age=30, salary=2000, ) people = Employee.objects.annotate( name_lower=Lower('last_name'), ).distinct('name_lower') self.assertEqual({p.last_name for p in people}, {'Stark', 'Roosevelt'}) self.assertEqual(len(people), 2) people2 = Employee.objects.annotate( test_alias=F('store__name'), ).distinct('test_alias') self.assertEqual(len(people2), 1) lengths = Employee.objects.annotate( name_len=Length('first_name'), ).distinct('name_len').values_list('name_len', flat=True) self.assertCountEqual(lengths, [3, 7, 8]) def test_filter_annotation(self): books = Book.objects.annotate(is_book=Value(1)).filter(is_book=1) for book in books: self.assertEqual(book.is_book, 1) def test_filter_annotation_with_f(self): books = Book.objects.annotate( other_rating=F('rating') ).filter(other_rating=3.5) for book in books: self.assertEqual(book.other_rating, 3.5) def test_filter_annotation_with_double_f(self): books = Book.objects.annotate( other_rating=F('rating') ).filter(other_rating=F('rating')) for book in books: self.assertEqual(book.other_rating, book.rating) def test_filter_agg_with_double_f(self): books = Book.objects.annotate( sum_rating=Sum('rating') ).filter(sum_rating=F('sum_rating')) for book in books: self.assertEqual(book.sum_rating, book.rating) def test_filter_wrong_annotation(self): with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."): list(Book.objects.annotate( sum_rating=Sum('rating') ).filter(sum_rating=F('nope'))) def test_decimal_annotation(self): salary = Decimal(10) ** -Employee._meta.get_field('salary').decimal_places Employee.objects.create( first_name='Max', last_name='Paine', store=Store.objects.first(), age=23, salary=salary, ) self.assertEqual( Employee.objects.annotate(new_salary=F('salary') / 10).get().new_salary, salary / 10, ) def test_filter_decimal_annotation(self): qs = Book.objects.annotate(new_price=F('price') + 1).filter(new_price=Decimal(31)).values_list('new_price') self.assertEqual(qs.get(), (Decimal(31),)) def test_combined_annotation_commutative(self): book1 = Book.objects.annotate(adjusted_rating=F('rating') + 2).get(pk=self.b1.pk) book2 = Book.objects.annotate(adjusted_rating=2 + F('rating')).get(pk=self.b1.pk) self.assertEqual(book1.adjusted_rating, book2.adjusted_rating) book1 = Book.objects.annotate(adjusted_rating=F('rating') + None).get(pk=self.b1.pk) book2 = Book.objects.annotate(adjusted_rating=None + F('rating')).get(pk=self.b1.pk) self.assertEqual(book1.adjusted_rating, book2.adjusted_rating) def test_update_with_annotation(self): book_preupdate = Book.objects.get(pk=self.b2.pk) Book.objects.annotate(other_rating=F('rating') - 1).update(rating=F('other_rating')) book_postupdate = Book.objects.get(pk=self.b2.pk) self.assertEqual(book_preupdate.rating - 1, book_postupdate.rating) def test_annotation_with_m2m(self): books = Book.objects.annotate(author_age=F('authors__age')).filter(pk=self.b1.pk).order_by('author_age') self.assertEqual(books[0].author_age, 34) self.assertEqual(books[1].author_age, 35) def test_annotation_reverse_m2m(self): books = Book.objects.annotate( store_name=F('store__name'), ).filter( name='Practical Django Projects', ).order_by('store_name') self.assertQuerysetEqual( books, [ 'Amazon.com', 'Books.com', 'Mamma and Pappa\'s Books' ], lambda b: b.store_name ) def test_values_annotation(self): """ Annotations can reference fields in a values clause, and contribute to an existing values clause. """ # annotate references a field in values() qs = Book.objects.values('rating').annotate(other_rating=F('rating') - 1) book = qs.get(pk=self.b1.pk) self.assertEqual(book['rating'] - 1, book['other_rating']) # filter refs the annotated value book = qs.get(other_rating=4) self.assertEqual(book['other_rating'], 4) # can annotate an existing values with a new field book = qs.annotate(other_isbn=F('isbn')).get(other_rating=4) self.assertEqual(book['other_rating'], 4) self.assertEqual(book['other_isbn'], '155860191') def test_values_with_pk_annotation(self): # annotate references a field in values() with pk publishers = Publisher.objects.values('id', 'book__rating').annotate(total=Sum('book__rating')) for publisher in publishers.filter(pk=self.p1.pk): self.assertEqual(publisher['book__rating'], publisher['total']) @skipUnlessDBFeature('allows_group_by_pk') def test_rawsql_group_by_collapse(self): raw = RawSQL('SELECT MIN(id) FROM annotations_book', []) qs = Author.objects.values('id').annotate( min_book_id=raw, count_friends=Count('friends'), ).order_by() _, _, group_by = qs.query.get_compiler(using='default').pre_sql_setup() self.assertEqual(len(group_by), 1) self.assertNotEqual(raw, group_by[0]) def test_defer_annotation(self): """ Deferred attributes can be referenced by an annotation, but they are not themselves deferred, and cannot be deferred. """ qs = Book.objects.defer('rating').annotate(other_rating=F('rating') - 1) with self.assertNumQueries(2): book = qs.get(other_rating=4) self.assertEqual(book.rating, 5) self.assertEqual(book.other_rating, 4) with self.assertRaisesMessage(FieldDoesNotExist, "Book has no field named 'other_rating'"): book = qs.defer('other_rating').get(other_rating=4) def test_mti_annotations(self): """ Fields on an inherited model can be referenced by an annotated field. """ d = DepartmentStore.objects.create( name='Angus & Robinson', original_opening=datetime.date(2014, 3, 8), friday_night_closing=datetime.time(21, 00, 00), chain='Westfield' ) books = Book.objects.filter(rating__gt=4) for b in books: d.books.add(b) qs = DepartmentStore.objects.annotate( other_name=F('name'), other_chain=F('chain'), is_open=Value(True, BooleanField()), book_isbn=F('books__isbn') ).order_by('book_isbn').filter(chain='Westfield') self.assertQuerysetEqual( qs, [ ('Angus & Robinson', 'Westfield', True, '155860191'), ('Angus & Robinson', 'Westfield', True, '159059725') ], lambda d: (d.other_name, d.other_chain, d.is_open, d.book_isbn) ) def test_null_annotation(self): """ Annotating None onto a model round-trips """ book = Book.objects.annotate(no_value=Value(None, output_field=IntegerField())).first() self.assertIsNone(book.no_value) def test_order_by_annotation(self): authors = Author.objects.annotate(other_age=F('age')).order_by('other_age') self.assertQuerysetEqual( authors, [ 25, 29, 29, 34, 35, 37, 45, 46, 57, ], lambda a: a.other_age ) def test_order_by_aggregate(self): authors = Author.objects.values('age').annotate(age_count=Count('age')).order_by('age_count', 'age') self.assertQuerysetEqual( authors, [ (25, 1), (34, 1), (35, 1), (37, 1), (45, 1), (46, 1), (57, 1), (29, 2), ], lambda a: (a['age'], a['age_count']) ) def test_raw_sql_with_inherited_field(self): DepartmentStore.objects.create( name='Angus & Robinson', original_opening=datetime.date(2014, 3, 8), friday_night_closing=datetime.time(21), chain='Westfield', area=123, ) tests = ( ('name', 'Angus & Robinson'), ('surface', 123), ("case when name='Angus & Robinson' then chain else name end", 'Westfield'), ) for sql, expected_result in tests: with self.subTest(sql=sql): self.assertSequenceEqual( DepartmentStore.objects.annotate( annotation=RawSQL(sql, ()), ).values_list('annotation', flat=True), [expected_result], ) def test_annotate_exists(self): authors = Author.objects.annotate(c=Count('id')).filter(c__gt=1) self.assertFalse(authors.exists()) def test_column_field_ordering(self): """ Columns are aligned in the correct order for resolve_columns. This test will fail on MySQL if column ordering is out. Column fields should be aligned as: 1. extra_select 2. model_fields 3. annotation_fields 4. model_related_fields """ store = Store.objects.first() Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine', store=store, age=23, salary=Decimal(50000.00)) Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers', store=store, age=18, salary=Decimal(40000.00)) qs = Employee.objects.extra( select={'random_value': '42'} ).select_related('store').annotate( annotated_value=Value(17), ) rows = [ (1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17), (2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17) ] self.assertQuerysetEqual( qs.order_by('id'), rows, lambda e: ( e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age, e.salary, e.store.name, e.annotated_value)) def test_column_field_ordering_with_deferred(self): store = Store.objects.first() Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine', store=store, age=23, salary=Decimal(50000.00)) Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers', store=store, age=18, salary=Decimal(40000.00)) qs = Employee.objects.extra( select={'random_value': '42'} ).select_related('store').annotate( annotated_value=Value(17), ) rows = [ (1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17), (2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17) ] # and we respect deferred columns! self.assertQuerysetEqual( qs.defer('age').order_by('id'), rows, lambda e: ( e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age, e.salary, e.store.name, e.annotated_value)) @cxOracle_py3_bug def test_custom_functions(self): Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save() Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save() Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save() Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save() qs = Company.objects.annotate( tagline=Func( F('motto'), F('ticker_name'), F('description'), Value('No Tag'), function='COALESCE' ) ).order_by('name') self.assertQuerysetEqual( qs, [ ('Apple', 'APPL'), ('Django Software Foundation', 'No Tag'), ('Google', 'Do No Evil'), ('Yahoo', 'Internet Company') ], lambda c: (c.name, c.tagline) ) @cxOracle_py3_bug def test_custom_functions_can_ref_other_functions(self): Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save() Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save() Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save() Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save() class Lower(Func): function = 'LOWER' qs = Company.objects.annotate( tagline=Func( F('motto'), F('ticker_name'), F('description'), Value('No Tag'), function='COALESCE', ) ).annotate( tagline_lower=Lower(F('tagline')), ).order_by('name') # LOWER function supported by: # oracle, postgres, mysql, sqlite, sqlserver self.assertQuerysetEqual( qs, [ ('Apple', 'APPL'.lower()), ('Django Software Foundation', 'No Tag'.lower()), ('Google', 'Do No Evil'.lower()), ('Yahoo', 'Internet Company'.lower()) ], lambda c: (c.name, c.tagline_lower) ) def test_boolean_value_annotation(self): books = Book.objects.annotate( is_book=Value(True, output_field=BooleanField()), is_pony=Value(False, output_field=BooleanField()), is_none=Value(None, output_field=BooleanField(null=True)), is_none_old=Value(None, output_field=NullBooleanField()), ) self.assertGreater(len(books), 0) for book in books: self.assertIs(book.is_book, True) self.assertIs(book.is_pony, False) self.assertIsNone(book.is_none) self.assertIsNone(book.is_none_old) def test_annotation_in_f_grouped_by_annotation(self): qs = ( Publisher.objects.annotate(multiplier=Value(3)) # group by option => sum of value * multiplier .values('name') .annotate(multiplied_value_sum=Sum(F('multiplier') * F('num_awards'))) .order_by() ) self.assertCountEqual( qs, [ {'multiplied_value_sum': 9, 'name': 'Apress'}, {'multiplied_value_sum': 0, 'name': "Jonno's House of Books"}, {'multiplied_value_sum': 27, 'name': 'Morgan Kaufmann'}, {'multiplied_value_sum': 21, 'name': 'Prentice Hall'}, {'multiplied_value_sum': 3, 'name': 'Sams'}, ] ) def test_arguments_must_be_expressions(self): msg = 'QuerySet.annotate() received non-expression(s): %s.' with self.assertRaisesMessage(TypeError, msg % BooleanField()): Book.objects.annotate(BooleanField()) with self.assertRaisesMessage(TypeError, msg % True): Book.objects.annotate(is_book=True) with self.assertRaisesMessage(TypeError, msg % ', '.join([str(BooleanField()), 'True'])): Book.objects.annotate(BooleanField(), Value(False), is_book=True) def test_chaining_annotation_filter_with_m2m(self): qs = Author.objects.filter( name='Adrian Holovaty', friends__age=35, ).annotate( jacob_name=F('friends__name'), ).filter( friends__age=29, ).annotate( james_name=F('friends__name'), ).values('jacob_name', 'james_name') self.assertCountEqual( qs, [{'jacob_name': 'Jacob Kaplan-Moss', 'james_name': 'James Bennett'}], ) def test_annotation_filter_with_subquery(self): long_books_qs = Book.objects.filter( publisher=OuterRef('pk'), pages__gt=400, ).values('publisher').annotate(count=Count('pk')).values('count') publisher_books_qs = Publisher.objects.annotate( total_books=Count('book'), ).filter( total_books=Subquery(long_books_qs, output_field=IntegerField()), ).values('name') self.assertCountEqual(publisher_books_qs, [{'name': 'Sams'}, {'name': 'Morgan Kaufmann'}]) def test_annotation_exists_aggregate_values_chaining(self): qs = Book.objects.values('publisher').annotate( has_authors=Exists(Book.authors.through.objects.filter(book=OuterRef('pk'))), max_pubdate=Max('pubdate'), ).values_list('max_pubdate', flat=True).order_by('max_pubdate') self.assertCountEqual(qs, [ datetime.date(1991, 10, 15), datetime.date(2008, 3, 3), datetime.date(2008, 6, 23), datetime.date(2008, 11, 3), ]) @skipIf( connection.vendor == 'mysql' and 'ONLY_FULL_GROUP_BY' in connection.sql_mode, 'GROUP BY optimization does not work properly when ONLY_FULL_GROUP_BY ' 'mode is enabled on MySQL, see #31331.', ) def test_annotation_aggregate_with_m2o(self): qs = Author.objects.filter(age__lt=30).annotate( max_pages=Case( When(book_contact_set__isnull=True, then=Value(0)), default=Max(F('book__pages')), ), ).values('name', 'max_pages') self.assertCountEqual(qs, [ {'name': 'James Bennett', 'max_pages': 300}, {'name': 'Paul Bissex', 'max_pages': 0}, {'name': 'Wesley J. Chun', 'max_pages': 0}, ])
cdf0bff7ce243e754264e58964ce85a9f3be01ace57de04c2e777373588dfcbd
import unittest from datetime import date, datetime, time, timedelta from decimal import Decimal from operator import attrgetter, itemgetter from uuid import UUID from django.core.exceptions import FieldError from django.db.models import ( BinaryField, BooleanField, Case, Count, DecimalField, F, GenericIPAddressField, IntegerField, Max, Min, Q, Sum, TextField, Value, When, ) from django.test import SimpleTestCase, TestCase from .models import CaseTestModel, Client, FKCaseTestModel, O2OCaseTestModel try: from PIL import Image except ImportError: Image = None class CaseExpressionTests(TestCase): @classmethod def setUpTestData(cls): o = CaseTestModel.objects.create(integer=1, integer2=1, string='1') O2OCaseTestModel.objects.create(o2o=o, integer=1) FKCaseTestModel.objects.create(fk=o, integer=1) o = CaseTestModel.objects.create(integer=2, integer2=3, string='2') O2OCaseTestModel.objects.create(o2o=o, integer=2) FKCaseTestModel.objects.create(fk=o, integer=2) FKCaseTestModel.objects.create(fk=o, integer=3) o = CaseTestModel.objects.create(integer=3, integer2=4, string='3') O2OCaseTestModel.objects.create(o2o=o, integer=3) FKCaseTestModel.objects.create(fk=o, integer=3) FKCaseTestModel.objects.create(fk=o, integer=4) o = CaseTestModel.objects.create(integer=2, integer2=2, string='2') O2OCaseTestModel.objects.create(o2o=o, integer=2) FKCaseTestModel.objects.create(fk=o, integer=2) FKCaseTestModel.objects.create(fk=o, integer=3) o = CaseTestModel.objects.create(integer=3, integer2=4, string='3') O2OCaseTestModel.objects.create(o2o=o, integer=3) FKCaseTestModel.objects.create(fk=o, integer=3) FKCaseTestModel.objects.create(fk=o, integer=4) o = CaseTestModel.objects.create(integer=3, integer2=3, string='3') O2OCaseTestModel.objects.create(o2o=o, integer=3) FKCaseTestModel.objects.create(fk=o, integer=3) FKCaseTestModel.objects.create(fk=o, integer=4) o = CaseTestModel.objects.create(integer=4, integer2=5, string='4') O2OCaseTestModel.objects.create(o2o=o, integer=1) FKCaseTestModel.objects.create(fk=o, integer=5) # GROUP BY on Oracle fails with TextField/BinaryField; see #24096. cls.non_lob_fields = [ f.name for f in CaseTestModel._meta.get_fields() if not (f.is_relation and f.auto_created) and not isinstance(f, (BinaryField, TextField)) ] def test_annotate(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate(test=Case( When(integer=1, then=Value('one')), When(integer=2, then=Value('two')), default=Value('other'), )).order_by('pk'), [(1, 'one'), (2, 'two'), (3, 'other'), (2, 'two'), (3, 'other'), (3, 'other'), (4, 'other')], transform=attrgetter('integer', 'test') ) def test_annotate_without_default(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate(test=Case( When(integer=1, then=1), When(integer=2, then=2), )).order_by('pk'), [(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'test') ) def test_annotate_with_expression_as_value(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate(f_test=Case( When(integer=1, then=F('integer') + 1), When(integer=2, then=F('integer') + 3), default='integer', )).order_by('pk'), [(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 4)], transform=attrgetter('integer', 'f_test') ) def test_annotate_with_expression_as_condition(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate(f_test=Case( When(integer2=F('integer'), then=Value('equal')), When(integer2=F('integer') + 1, then=Value('+1')), )).order_by('pk'), [(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, '+1')], transform=attrgetter('integer', 'f_test') ) def test_annotate_with_join_in_value(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate(join_test=Case( When(integer=1, then=F('o2o_rel__integer') + 1), When(integer=2, then=F('o2o_rel__integer') + 3), default='o2o_rel__integer', )).order_by('pk'), [(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 1)], transform=attrgetter('integer', 'join_test') ) def test_annotate_with_in_clause(self): fk_rels = FKCaseTestModel.objects.filter(integer__in=[5]) self.assertQuerysetEqual( CaseTestModel.objects.only('pk', 'integer').annotate(in_test=Sum(Case( When(fk_rel__in=fk_rels, then=F('fk_rel__integer')), default=Value(0), ))).order_by('pk'), [(1, 0), (2, 0), (3, 0), (2, 0), (3, 0), (3, 0), (4, 5)], transform=attrgetter('integer', 'in_test') ) def test_annotate_with_join_in_condition(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate(join_test=Case( When(integer2=F('o2o_rel__integer'), then=Value('equal')), When(integer2=F('o2o_rel__integer') + 1, then=Value('+1')), default=Value('other'), )).order_by('pk'), [(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, 'other')], transform=attrgetter('integer', 'join_test') ) def test_annotate_with_join_in_predicate(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate(join_test=Case( When(o2o_rel__integer=1, then=Value('one')), When(o2o_rel__integer=2, then=Value('two')), When(o2o_rel__integer=3, then=Value('three')), default=Value('other'), )).order_by('pk'), [(1, 'one'), (2, 'two'), (3, 'three'), (2, 'two'), (3, 'three'), (3, 'three'), (4, 'one')], transform=attrgetter('integer', 'join_test') ) def test_annotate_with_annotation_in_value(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate( f_plus_1=F('integer') + 1, f_plus_3=F('integer') + 3, ).annotate( f_test=Case( When(integer=1, then='f_plus_1'), When(integer=2, then='f_plus_3'), default='integer', ), ).order_by('pk'), [(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 4)], transform=attrgetter('integer', 'f_test') ) def test_annotate_with_annotation_in_condition(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate( f_plus_1=F('integer') + 1, ).annotate( f_test=Case( When(integer2=F('integer'), then=Value('equal')), When(integer2=F('f_plus_1'), then=Value('+1')), ), ).order_by('pk'), [(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, '+1')], transform=attrgetter('integer', 'f_test') ) def test_annotate_with_annotation_in_predicate(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate( f_minus_2=F('integer') - 2, ).annotate( test=Case( When(f_minus_2=-1, then=Value('negative one')), When(f_minus_2=0, then=Value('zero')), When(f_minus_2=1, then=Value('one')), default=Value('other'), ), ).order_by('pk'), [(1, 'negative one'), (2, 'zero'), (3, 'one'), (2, 'zero'), (3, 'one'), (3, 'one'), (4, 'other')], transform=attrgetter('integer', 'test') ) def test_annotate_with_aggregation_in_value(self): self.assertQuerysetEqual( CaseTestModel.objects.values(*self.non_lob_fields).annotate( min=Min('fk_rel__integer'), max=Max('fk_rel__integer'), ).annotate( test=Case( When(integer=2, then='min'), When(integer=3, then='max'), ), ).order_by('pk'), [(1, None, 1, 1), (2, 2, 2, 3), (3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4), (3, 4, 3, 4), (4, None, 5, 5)], transform=itemgetter('integer', 'test', 'min', 'max') ) def test_annotate_with_aggregation_in_condition(self): self.assertQuerysetEqual( CaseTestModel.objects.values(*self.non_lob_fields).annotate( min=Min('fk_rel__integer'), max=Max('fk_rel__integer'), ).annotate( test=Case( When(integer2=F('min'), then=Value('min')), When(integer2=F('max'), then=Value('max')), ), ).order_by('pk'), [(1, 1, 'min'), (2, 3, 'max'), (3, 4, 'max'), (2, 2, 'min'), (3, 4, 'max'), (3, 3, 'min'), (4, 5, 'min')], transform=itemgetter('integer', 'integer2', 'test') ) def test_annotate_with_aggregation_in_predicate(self): self.assertQuerysetEqual( CaseTestModel.objects.values(*self.non_lob_fields).annotate( max=Max('fk_rel__integer'), ).annotate( test=Case( When(max=3, then=Value('max = 3')), When(max=4, then=Value('max = 4')), default=Value(''), ), ).order_by('pk'), [(1, 1, ''), (2, 3, 'max = 3'), (3, 4, 'max = 4'), (2, 3, 'max = 3'), (3, 4, 'max = 4'), (3, 4, 'max = 4'), (4, 5, '')], transform=itemgetter('integer', 'max', 'test') ) def test_annotate_exclude(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate(test=Case( When(integer=1, then=Value('one')), When(integer=2, then=Value('two')), default=Value('other'), )).exclude(test='other').order_by('pk'), [(1, 'one'), (2, 'two'), (2, 'two')], transform=attrgetter('integer', 'test') ) def test_annotate_values_not_in_order_by(self): self.assertEqual( list(CaseTestModel.objects.annotate(test=Case( When(integer=1, then=Value('one')), When(integer=2, then=Value('two')), When(integer=3, then=Value('three')), default=Value('other'), )).order_by('test').values_list('integer', flat=True)), [1, 4, 3, 3, 3, 2, 2] ) def test_annotate_with_empty_when(self): objects = CaseTestModel.objects.annotate( selected=Case( When(pk__in=[], then=Value('selected')), default=Value('not selected'), ) ) self.assertEqual(len(objects), CaseTestModel.objects.count()) self.assertTrue(all(obj.selected == 'not selected' for obj in objects)) def test_combined_expression(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate( test=Case( When(integer=1, then=2), When(integer=2, then=1), default=3, ) + 1, ).order_by('pk'), [(1, 3), (2, 2), (3, 4), (2, 2), (3, 4), (3, 4), (4, 4)], transform=attrgetter('integer', 'test') ) def test_in_subquery(self): self.assertQuerysetEqual( CaseTestModel.objects.filter( pk__in=CaseTestModel.objects.annotate( test=Case( When(integer=F('integer2'), then='pk'), When(integer=4, then='pk'), ), ).values('test')).order_by('pk'), [(1, 1), (2, 2), (3, 3), (4, 5)], transform=attrgetter('integer', 'integer2') ) def test_condition_with_lookups(self): qs = CaseTestModel.objects.annotate( test=Case( When(Q(integer2=1), string='2', then=Value(False)), When(Q(integer2=1), string='1', then=Value(True)), default=Value(False), output_field=BooleanField(), ), ) self.assertIs(qs.get(integer=1).test, True) def test_case_reuse(self): SOME_CASE = Case( When(pk=0, then=Value('0')), default=Value('1'), ) self.assertQuerysetEqual( CaseTestModel.objects.annotate(somecase=SOME_CASE).order_by('pk'), CaseTestModel.objects.annotate(somecase=SOME_CASE).order_by('pk').values_list('pk', 'somecase'), lambda x: (x.pk, x.somecase) ) def test_aggregate(self): self.assertEqual( CaseTestModel.objects.aggregate( one=Sum(Case( When(integer=1, then=1), )), two=Sum(Case( When(integer=2, then=1), )), three=Sum(Case( When(integer=3, then=1), )), four=Sum(Case( When(integer=4, then=1), )), ), {'one': 1, 'two': 2, 'three': 3, 'four': 1} ) def test_aggregate_with_expression_as_value(self): self.assertEqual( CaseTestModel.objects.aggregate( one=Sum(Case(When(integer=1, then='integer'))), two=Sum(Case(When(integer=2, then=F('integer') - 1))), three=Sum(Case(When(integer=3, then=F('integer') + 1))), ), {'one': 1, 'two': 2, 'three': 12} ) def test_aggregate_with_expression_as_condition(self): self.assertEqual( CaseTestModel.objects.aggregate( equal=Sum(Case( When(integer2=F('integer'), then=1), )), plus_one=Sum(Case( When(integer2=F('integer') + 1, then=1), )), ), {'equal': 3, 'plus_one': 4} ) def test_filter(self): self.assertQuerysetEqual( CaseTestModel.objects.filter(integer2=Case( When(integer=2, then=3), When(integer=3, then=4), default=1, )).order_by('pk'), [(1, 1), (2, 3), (3, 4), (3, 4)], transform=attrgetter('integer', 'integer2') ) def test_filter_without_default(self): self.assertQuerysetEqual( CaseTestModel.objects.filter(integer2=Case( When(integer=2, then=3), When(integer=3, then=4), )).order_by('pk'), [(2, 3), (3, 4), (3, 4)], transform=attrgetter('integer', 'integer2') ) def test_filter_with_expression_as_value(self): self.assertQuerysetEqual( CaseTestModel.objects.filter(integer2=Case( When(integer=2, then=F('integer') + 1), When(integer=3, then=F('integer')), default='integer', )).order_by('pk'), [(1, 1), (2, 3), (3, 3)], transform=attrgetter('integer', 'integer2') ) def test_filter_with_expression_as_condition(self): self.assertQuerysetEqual( CaseTestModel.objects.filter(string=Case( When(integer2=F('integer'), then=Value('2')), When(integer2=F('integer') + 1, then=Value('3')), )).order_by('pk'), [(3, 4, '3'), (2, 2, '2'), (3, 4, '3')], transform=attrgetter('integer', 'integer2', 'string') ) def test_filter_with_join_in_value(self): self.assertQuerysetEqual( CaseTestModel.objects.filter(integer2=Case( When(integer=2, then=F('o2o_rel__integer') + 1), When(integer=3, then=F('o2o_rel__integer')), default='o2o_rel__integer', )).order_by('pk'), [(1, 1), (2, 3), (3, 3)], transform=attrgetter('integer', 'integer2') ) def test_filter_with_join_in_condition(self): self.assertQuerysetEqual( CaseTestModel.objects.filter(integer=Case( When(integer2=F('o2o_rel__integer') + 1, then=2), When(integer2=F('o2o_rel__integer'), then=3), )).order_by('pk'), [(2, 3), (3, 3)], transform=attrgetter('integer', 'integer2') ) def test_filter_with_join_in_predicate(self): self.assertQuerysetEqual( CaseTestModel.objects.filter(integer2=Case( When(o2o_rel__integer=1, then=1), When(o2o_rel__integer=2, then=3), When(o2o_rel__integer=3, then=4), )).order_by('pk'), [(1, 1), (2, 3), (3, 4), (3, 4)], transform=attrgetter('integer', 'integer2') ) def test_filter_with_annotation_in_value(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate( f=F('integer'), f_plus_1=F('integer') + 1, ).filter( integer2=Case( When(integer=2, then='f_plus_1'), When(integer=3, then='f'), ), ).order_by('pk'), [(2, 3), (3, 3)], transform=attrgetter('integer', 'integer2') ) def test_filter_with_annotation_in_condition(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate( f_plus_1=F('integer') + 1, ).filter( integer=Case( When(integer2=F('integer'), then=2), When(integer2=F('f_plus_1'), then=3), ), ).order_by('pk'), [(3, 4), (2, 2), (3, 4)], transform=attrgetter('integer', 'integer2') ) def test_filter_with_annotation_in_predicate(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate( f_plus_1=F('integer') + 1, ).filter( integer2=Case( When(f_plus_1=3, then=3), When(f_plus_1=4, then=4), default=1, ), ).order_by('pk'), [(1, 1), (2, 3), (3, 4), (3, 4)], transform=attrgetter('integer', 'integer2') ) def test_filter_with_aggregation_in_value(self): self.assertQuerysetEqual( CaseTestModel.objects.values(*self.non_lob_fields).annotate( min=Min('fk_rel__integer'), max=Max('fk_rel__integer'), ).filter( integer2=Case( When(integer=2, then='min'), When(integer=3, then='max'), ), ).order_by('pk'), [(3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4)], transform=itemgetter('integer', 'integer2', 'min', 'max') ) def test_filter_with_aggregation_in_condition(self): self.assertQuerysetEqual( CaseTestModel.objects.values(*self.non_lob_fields).annotate( min=Min('fk_rel__integer'), max=Max('fk_rel__integer'), ).filter( integer=Case( When(integer2=F('min'), then=2), When(integer2=F('max'), then=3), ), ).order_by('pk'), [(3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4)], transform=itemgetter('integer', 'integer2', 'min', 'max') ) def test_filter_with_aggregation_in_predicate(self): self.assertQuerysetEqual( CaseTestModel.objects.values(*self.non_lob_fields).annotate( max=Max('fk_rel__integer'), ).filter( integer=Case( When(max=3, then=2), When(max=4, then=3), ), ).order_by('pk'), [(2, 3, 3), (3, 4, 4), (2, 2, 3), (3, 4, 4), (3, 3, 4)], transform=itemgetter('integer', 'integer2', 'max') ) def test_update(self): CaseTestModel.objects.update( string=Case( When(integer=1, then=Value('one')), When(integer=2, then=Value('two')), default=Value('other'), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, 'one'), (2, 'two'), (3, 'other'), (2, 'two'), (3, 'other'), (3, 'other'), (4, 'other')], transform=attrgetter('integer', 'string') ) def test_update_without_default(self): CaseTestModel.objects.update( integer2=Case( When(integer=1, then=1), When(integer=2, then=2), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'integer2') ) def test_update_with_expression_as_value(self): CaseTestModel.objects.update( integer=Case( When(integer=1, then=F('integer') + 1), When(integer=2, then=F('integer') + 3), default='integer', ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [('1', 2), ('2', 5), ('3', 3), ('2', 5), ('3', 3), ('3', 3), ('4', 4)], transform=attrgetter('string', 'integer') ) def test_update_with_expression_as_condition(self): CaseTestModel.objects.update( string=Case( When(integer2=F('integer'), then=Value('equal')), When(integer2=F('integer') + 1, then=Value('+1')), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, '+1')], transform=attrgetter('integer', 'string') ) def test_update_with_join_in_condition_raise_field_error(self): with self.assertRaisesMessage(FieldError, 'Joined field references are not permitted in this query'): CaseTestModel.objects.update( integer=Case( When(integer2=F('o2o_rel__integer') + 1, then=2), When(integer2=F('o2o_rel__integer'), then=3), ), ) def test_update_with_join_in_predicate_raise_field_error(self): with self.assertRaisesMessage(FieldError, 'Joined field references are not permitted in this query'): CaseTestModel.objects.update( string=Case( When(o2o_rel__integer=1, then=Value('one')), When(o2o_rel__integer=2, then=Value('two')), When(o2o_rel__integer=3, then=Value('three')), default=Value('other'), ), ) def test_update_big_integer(self): CaseTestModel.objects.update( big_integer=Case( When(integer=1, then=1), When(integer=2, then=2), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'big_integer') ) def test_update_binary(self): CaseTestModel.objects.update( binary=Case( When(integer=1, then=b'one'), When(integer=2, then=b'two'), default=b'', ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, b'one'), (2, b'two'), (3, b''), (2, b'two'), (3, b''), (3, b''), (4, b'')], transform=lambda o: (o.integer, bytes(o.binary)) ) def test_update_boolean(self): CaseTestModel.objects.update( boolean=Case( When(integer=1, then=True), When(integer=2, then=True), default=False, ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, True), (2, True), (3, False), (2, True), (3, False), (3, False), (4, False)], transform=attrgetter('integer', 'boolean') ) def test_update_date(self): CaseTestModel.objects.update( date=Case( When(integer=1, then=date(2015, 1, 1)), When(integer=2, then=date(2015, 1, 2)), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [ (1, date(2015, 1, 1)), (2, date(2015, 1, 2)), (3, None), (2, date(2015, 1, 2)), (3, None), (3, None), (4, None) ], transform=attrgetter('integer', 'date') ) def test_update_date_time(self): CaseTestModel.objects.update( date_time=Case( When(integer=1, then=datetime(2015, 1, 1)), When(integer=2, then=datetime(2015, 1, 2)), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [ (1, datetime(2015, 1, 1)), (2, datetime(2015, 1, 2)), (3, None), (2, datetime(2015, 1, 2)), (3, None), (3, None), (4, None) ], transform=attrgetter('integer', 'date_time') ) def test_update_decimal(self): CaseTestModel.objects.update( decimal=Case( When(integer=1, then=Decimal('1.1')), When(integer=2, then=Value(Decimal('2.2'), output_field=DecimalField())), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [ (1, Decimal('1.1')), (2, Decimal('2.2')), (3, None), (2, Decimal('2.2')), (3, None), (3, None), (4, None) ], transform=attrgetter('integer', 'decimal') ) def test_update_duration(self): CaseTestModel.objects.update( duration=Case( When(integer=1, then=timedelta(1)), When(integer=2, then=timedelta(2)), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, timedelta(1)), (2, timedelta(2)), (3, None), (2, timedelta(2)), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'duration') ) def test_update_email(self): CaseTestModel.objects.update( email=Case( When(integer=1, then=Value('[email protected]')), When(integer=2, then=Value('[email protected]')), default=Value(''), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, '[email protected]'), (2, '[email protected]'), (3, ''), (2, '[email protected]'), (3, ''), (3, ''), (4, '')], transform=attrgetter('integer', 'email') ) def test_update_file(self): CaseTestModel.objects.update( file=Case( When(integer=1, then=Value('~/1')), When(integer=2, then=Value('~/2')), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, '~/1'), (2, '~/2'), (3, ''), (2, '~/2'), (3, ''), (3, ''), (4, '')], transform=lambda o: (o.integer, str(o.file)) ) def test_update_file_path(self): CaseTestModel.objects.update( file_path=Case( When(integer=1, then=Value('~/1')), When(integer=2, then=Value('~/2')), default=Value(''), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, '~/1'), (2, '~/2'), (3, ''), (2, '~/2'), (3, ''), (3, ''), (4, '')], transform=attrgetter('integer', 'file_path') ) def test_update_float(self): CaseTestModel.objects.update( float=Case( When(integer=1, then=1.1), When(integer=2, then=2.2), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, 1.1), (2, 2.2), (3, None), (2, 2.2), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'float') ) @unittest.skipUnless(Image, "Pillow not installed") def test_update_image(self): CaseTestModel.objects.update( image=Case( When(integer=1, then=Value('~/1')), When(integer=2, then=Value('~/2')), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, '~/1'), (2, '~/2'), (3, ''), (2, '~/2'), (3, ''), (3, ''), (4, '')], transform=lambda o: (o.integer, str(o.image)) ) def test_update_generic_ip_address(self): CaseTestModel.objects.update( generic_ip_address=Case( When(integer=1, then=Value('1.1.1.1')), When(integer=2, then=Value('2.2.2.2')), output_field=GenericIPAddressField(), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, '1.1.1.1'), (2, '2.2.2.2'), (3, None), (2, '2.2.2.2'), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'generic_ip_address') ) def test_update_null_boolean(self): CaseTestModel.objects.update( null_boolean=Case( When(integer=1, then=True), When(integer=2, then=False), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, True), (2, False), (3, None), (2, False), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'null_boolean') ) def test_update_null_boolean_old(self): CaseTestModel.objects.update( null_boolean_old=Case( When(integer=1, then=True), When(integer=2, then=False), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, True), (2, False), (3, None), (2, False), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'null_boolean_old') ) def test_update_positive_big_integer(self): CaseTestModel.objects.update( positive_big_integer=Case( When(integer=1, then=1), When(integer=2, then=2), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'positive_big_integer') ) def test_update_positive_integer(self): CaseTestModel.objects.update( positive_integer=Case( When(integer=1, then=1), When(integer=2, then=2), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'positive_integer') ) def test_update_positive_small_integer(self): CaseTestModel.objects.update( positive_small_integer=Case( When(integer=1, then=1), When(integer=2, then=2), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'positive_small_integer') ) def test_update_slug(self): CaseTestModel.objects.update( slug=Case( When(integer=1, then=Value('1')), When(integer=2, then=Value('2')), default=Value(''), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, '1'), (2, '2'), (3, ''), (2, '2'), (3, ''), (3, ''), (4, '')], transform=attrgetter('integer', 'slug') ) def test_update_small_integer(self): CaseTestModel.objects.update( small_integer=Case( When(integer=1, then=1), When(integer=2, then=2), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'small_integer') ) def test_update_string(self): CaseTestModel.objects.filter(string__in=['1', '2']).update( string=Case( When(integer=1, then=Value('1')), When(integer=2, then=Value('2')), ), ) self.assertQuerysetEqual( CaseTestModel.objects.filter(string__in=['1', '2']).order_by('pk'), [(1, '1'), (2, '2'), (2, '2')], transform=attrgetter('integer', 'string') ) def test_update_text(self): CaseTestModel.objects.update( text=Case( When(integer=1, then=Value('1')), When(integer=2, then=Value('2')), default=Value(''), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, '1'), (2, '2'), (3, ''), (2, '2'), (3, ''), (3, ''), (4, '')], transform=attrgetter('integer', 'text') ) def test_update_time(self): CaseTestModel.objects.update( time=Case( When(integer=1, then=time(1)), When(integer=2, then=time(2)), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, time(1)), (2, time(2)), (3, None), (2, time(2)), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'time') ) def test_update_url(self): CaseTestModel.objects.update( url=Case( When(integer=1, then=Value('http://1.example.com/')), When(integer=2, then=Value('http://2.example.com/')), default=Value(''), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [ (1, 'http://1.example.com/'), (2, 'http://2.example.com/'), (3, ''), (2, 'http://2.example.com/'), (3, ''), (3, ''), (4, '') ], transform=attrgetter('integer', 'url') ) def test_update_uuid(self): CaseTestModel.objects.update( uuid=Case( When(integer=1, then=UUID('11111111111111111111111111111111')), When(integer=2, then=UUID('22222222222222222222222222222222')), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [ (1, UUID('11111111111111111111111111111111')), (2, UUID('22222222222222222222222222222222')), (3, None), (2, UUID('22222222222222222222222222222222')), (3, None), (3, None), (4, None), ], transform=attrgetter('integer', 'uuid') ) def test_update_fk(self): obj1, obj2 = CaseTestModel.objects.all()[:2] CaseTestModel.objects.update( fk=Case( When(integer=1, then=obj1.pk), When(integer=2, then=obj2.pk), ), ) self.assertQuerysetEqual( CaseTestModel.objects.all().order_by('pk'), [(1, obj1.pk), (2, obj2.pk), (3, None), (2, obj2.pk), (3, None), (3, None), (4, None)], transform=attrgetter('integer', 'fk_id') ) def test_lookup_in_condition(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate( test=Case( When(integer__lt=2, then=Value('less than 2')), When(integer__gt=2, then=Value('greater than 2')), default=Value('equal to 2'), ), ).order_by('pk'), [ (1, 'less than 2'), (2, 'equal to 2'), (3, 'greater than 2'), (2, 'equal to 2'), (3, 'greater than 2'), (3, 'greater than 2'), (4, 'greater than 2') ], transform=attrgetter('integer', 'test') ) def test_lookup_different_fields(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate( test=Case( When(integer=2, integer2=3, then=Value('when')), default=Value('default'), ), ).order_by('pk'), [ (1, 1, 'default'), (2, 3, 'when'), (3, 4, 'default'), (2, 2, 'default'), (3, 4, 'default'), (3, 3, 'default'), (4, 5, 'default') ], transform=attrgetter('integer', 'integer2', 'test') ) def test_combined_q_object(self): self.assertQuerysetEqual( CaseTestModel.objects.annotate( test=Case( When(Q(integer=2) | Q(integer2=3), then=Value('when')), default=Value('default'), ), ).order_by('pk'), [ (1, 1, 'default'), (2, 3, 'when'), (3, 4, 'default'), (2, 2, 'when'), (3, 4, 'default'), (3, 3, 'when'), (4, 5, 'default') ], transform=attrgetter('integer', 'integer2', 'test') ) def test_order_by_conditional_implicit(self): self.assertQuerysetEqual( CaseTestModel.objects.filter(integer__lte=2).annotate(test=Case( When(integer=1, then=2), When(integer=2, then=1), default=3, )).order_by('test', 'pk'), [(2, 1), (2, 1), (1, 2)], transform=attrgetter('integer', 'test') ) def test_order_by_conditional_explicit(self): self.assertQuerysetEqual( CaseTestModel.objects.filter(integer__lte=2).annotate(test=Case( When(integer=1, then=2), When(integer=2, then=1), default=3, )).order_by(F('test').asc(), 'pk'), [(2, 1), (2, 1), (1, 2)], transform=attrgetter('integer', 'test') ) def test_join_promotion(self): o = CaseTestModel.objects.create(integer=1, integer2=1, string='1') # Testing that: # 1. There isn't any object on the remote side of the fk_rel # relation. If the query used inner joins, then the join to fk_rel # would remove o from the results. So, in effect we are testing that # we are promoting the fk_rel join to a left outer join here. # 2. The default value of 3 is generated for the case expression. self.assertQuerysetEqual( CaseTestModel.objects.filter(pk=o.pk).annotate( foo=Case( When(fk_rel__pk=1, then=2), default=3, ), ), [(o, 3)], lambda x: (x, x.foo) ) # Now 2 should be generated, as the fk_rel is null. self.assertQuerysetEqual( CaseTestModel.objects.filter(pk=o.pk).annotate( foo=Case( When(fk_rel__isnull=True, then=2), default=3, ), ), [(o, 2)], lambda x: (x, x.foo) ) def test_join_promotion_multiple_annotations(self): o = CaseTestModel.objects.create(integer=1, integer2=1, string='1') # Testing that: # 1. There isn't any object on the remote side of the fk_rel # relation. If the query used inner joins, then the join to fk_rel # would remove o from the results. So, in effect we are testing that # we are promoting the fk_rel join to a left outer join here. # 2. The default value of 3 is generated for the case expression. self.assertQuerysetEqual( CaseTestModel.objects.filter(pk=o.pk).annotate( foo=Case( When(fk_rel__pk=1, then=2), default=3, ), bar=Case( When(fk_rel__pk=1, then=4), default=5, ), ), [(o, 3, 5)], lambda x: (x, x.foo, x.bar) ) # Now 2 should be generated, as the fk_rel is null. self.assertQuerysetEqual( CaseTestModel.objects.filter(pk=o.pk).annotate( foo=Case( When(fk_rel__isnull=True, then=2), default=3, ), bar=Case( When(fk_rel__isnull=True, then=4), default=5, ), ), [(o, 2, 4)], lambda x: (x, x.foo, x.bar) ) def test_m2m_exclude(self): CaseTestModel.objects.create(integer=10, integer2=1, string='1') qs = CaseTestModel.objects.values_list('id', 'integer').annotate( cnt=Sum( Case(When(~Q(fk_rel__integer=1), then=1), default=2), ), ).order_by('integer') # The first o has 2 as its fk_rel__integer=1, thus it hits the # default=2 case. The other ones have 2 as the result as they have 2 # fk_rel objects, except for integer=4 and integer=10 (created above). # The integer=4 case has one integer, thus the result is 1, and # integer=10 doesn't have any and this too generates 1 (instead of 0) # as ~Q() also matches nulls. self.assertQuerysetEqual( qs, [(1, 2), (2, 2), (2, 2), (3, 2), (3, 2), (3, 2), (4, 1), (10, 1)], lambda x: x[1:] ) def test_m2m_reuse(self): CaseTestModel.objects.create(integer=10, integer2=1, string='1') # Need to use values before annotate so that Oracle will not group # by fields it isn't capable of grouping by. qs = CaseTestModel.objects.values_list('id', 'integer').annotate( cnt=Sum( Case(When(~Q(fk_rel__integer=1), then=1), default=2), ), ).annotate( cnt2=Sum( Case(When(~Q(fk_rel__integer=1), then=1), default=2), ), ).order_by('integer') self.assertEqual(str(qs.query).count(' JOIN '), 1) self.assertQuerysetEqual( qs, [(1, 2, 2), (2, 2, 2), (2, 2, 2), (3, 2, 2), (3, 2, 2), (3, 2, 2), (4, 1, 1), (10, 1, 1)], lambda x: x[1:] ) class CaseDocumentationExamples(TestCase): @classmethod def setUpTestData(cls): Client.objects.create( name='Jane Doe', account_type=Client.REGULAR, registered_on=date.today() - timedelta(days=36), ) Client.objects.create( name='James Smith', account_type=Client.GOLD, registered_on=date.today() - timedelta(days=5), ) Client.objects.create( name='Jack Black', account_type=Client.PLATINUM, registered_on=date.today() - timedelta(days=10 * 365), ) def test_simple_example(self): self.assertQuerysetEqual( Client.objects.annotate( discount=Case( When(account_type=Client.GOLD, then=Value('5%')), When(account_type=Client.PLATINUM, then=Value('10%')), default=Value('0%'), ), ).order_by('pk'), [('Jane Doe', '0%'), ('James Smith', '5%'), ('Jack Black', '10%')], transform=attrgetter('name', 'discount') ) def test_lookup_example(self): a_month_ago = date.today() - timedelta(days=30) a_year_ago = date.today() - timedelta(days=365) self.assertQuerysetEqual( Client.objects.annotate( discount=Case( When(registered_on__lte=a_year_ago, then=Value('10%')), When(registered_on__lte=a_month_ago, then=Value('5%')), default=Value('0%'), ), ).order_by('pk'), [('Jane Doe', '5%'), ('James Smith', '0%'), ('Jack Black', '10%')], transform=attrgetter('name', 'discount') ) def test_conditional_update_example(self): a_month_ago = date.today() - timedelta(days=30) a_year_ago = date.today() - timedelta(days=365) Client.objects.update( account_type=Case( When(registered_on__lte=a_year_ago, then=Value(Client.PLATINUM)), When(registered_on__lte=a_month_ago, then=Value(Client.GOLD)), default=Value(Client.REGULAR), ), ) self.assertQuerysetEqual( Client.objects.all().order_by('pk'), [('Jane Doe', 'G'), ('James Smith', 'R'), ('Jack Black', 'P')], transform=attrgetter('name', 'account_type') ) def test_conditional_aggregation_example(self): Client.objects.create( name='Jean Grey', account_type=Client.REGULAR, registered_on=date.today(), ) Client.objects.create( name='James Bond', account_type=Client.PLATINUM, registered_on=date.today(), ) Client.objects.create( name='Jane Porter', account_type=Client.PLATINUM, registered_on=date.today(), ) self.assertEqual( Client.objects.aggregate( regular=Count('pk', filter=Q(account_type=Client.REGULAR)), gold=Count('pk', filter=Q(account_type=Client.GOLD)), platinum=Count('pk', filter=Q(account_type=Client.PLATINUM)), ), {'regular': 2, 'gold': 1, 'platinum': 3} ) # This was the example before the filter argument was added. self.assertEqual( Client.objects.aggregate( regular=Sum(Case( When(account_type=Client.REGULAR, then=1), )), gold=Sum(Case( When(account_type=Client.GOLD, then=1), )), platinum=Sum(Case( When(account_type=Client.PLATINUM, then=1), )), ), {'regular': 2, 'gold': 1, 'platinum': 3} ) def test_filter_example(self): a_month_ago = date.today() - timedelta(days=30) a_year_ago = date.today() - timedelta(days=365) self.assertQuerysetEqual( Client.objects.filter( registered_on__lte=Case( When(account_type=Client.GOLD, then=a_month_ago), When(account_type=Client.PLATINUM, then=a_year_ago), ), ), [('Jack Black', 'P')], transform=attrgetter('name', 'account_type') ) def test_hash(self): expression_1 = Case( When(account_type__in=[Client.REGULAR, Client.GOLD], then=1), default=2, output_field=IntegerField(), ) expression_2 = Case( When(account_type__in=(Client.REGULAR, Client.GOLD), then=1), default=2, output_field=IntegerField(), ) expression_3 = Case(When(account_type__in=[Client.REGULAR, Client.GOLD], then=1), default=2) expression_4 = Case(When(account_type__in=[Client.PLATINUM, Client.GOLD], then=2), default=1) self.assertEqual(hash(expression_1), hash(expression_2)) self.assertNotEqual(hash(expression_2), hash(expression_3)) self.assertNotEqual(hash(expression_1), hash(expression_4)) self.assertNotEqual(hash(expression_3), hash(expression_4)) class CaseWhenTests(SimpleTestCase): def test_only_when_arguments(self): msg = 'Positional arguments must all be When objects.' with self.assertRaisesMessage(TypeError, msg): Case(When(Q(pk__in=[])), object()) def test_invalid_when_constructor_args(self): msg = ( 'When() supports a Q object, a boolean expression, or lookups as ' 'a condition.' ) with self.assertRaisesMessage(TypeError, msg): When(condition=object()) with self.assertRaisesMessage(TypeError, msg): When(condition=Value(1)) with self.assertRaisesMessage(TypeError, msg): When(Value(1), string='1') with self.assertRaisesMessage(TypeError, msg): When() def test_empty_q_object(self): msg = "An empty Q() can't be used as a When() condition." with self.assertRaisesMessage(ValueError, msg): When(Q(), then=Value(True))
3c9c9384e56195192b1736c0f7d246f65f260b42f91158025042b93365846d80
import unittest from django.core.exceptions import FieldError from django.db import IntegrityError, connection, transaction from django.db.models import Count, F, Max from django.db.models.functions import Concat, Lower from django.test import TestCase from .models import A, B, Bar, D, DataPoint, Foo, RelatedPoint, UniqueNumber class SimpleTest(TestCase): @classmethod def setUpTestData(cls): cls.a1 = A.objects.create() cls.a2 = A.objects.create() for x in range(20): B.objects.create(a=cls.a1) D.objects.create(a=cls.a1) def test_nonempty_update(self): """ Update changes the right number of rows for a nonempty queryset """ num_updated = self.a1.b_set.update(y=100) self.assertEqual(num_updated, 20) cnt = B.objects.filter(y=100).count() self.assertEqual(cnt, 20) def test_empty_update(self): """ Update changes the right number of rows for an empty queryset """ num_updated = self.a2.b_set.update(y=100) self.assertEqual(num_updated, 0) cnt = B.objects.filter(y=100).count() self.assertEqual(cnt, 0) def test_nonempty_update_with_inheritance(self): """ Update changes the right number of rows for an empty queryset when the update affects only a base table """ num_updated = self.a1.d_set.update(y=100) self.assertEqual(num_updated, 20) cnt = D.objects.filter(y=100).count() self.assertEqual(cnt, 20) def test_empty_update_with_inheritance(self): """ Update changes the right number of rows for an empty queryset when the update affects only a base table """ num_updated = self.a2.d_set.update(y=100) self.assertEqual(num_updated, 0) cnt = D.objects.filter(y=100).count() self.assertEqual(cnt, 0) def test_foreign_key_update_with_id(self): """ Update works using <field>_id for foreign keys """ num_updated = self.a1.d_set.update(a_id=self.a2) self.assertEqual(num_updated, 20) self.assertEqual(self.a2.d_set.count(), 20) class AdvancedTests(TestCase): @classmethod def setUpTestData(cls): cls.d0 = DataPoint.objects.create(name="d0", value="apple") cls.d2 = DataPoint.objects.create(name="d2", value="banana") cls.d3 = DataPoint.objects.create(name="d3", value="banana") cls.r1 = RelatedPoint.objects.create(name="r1", data=cls.d3) def test_update(self): """ Objects are updated by first filtering the candidates into a queryset and then calling the update() method. It executes immediately and returns nothing. """ resp = DataPoint.objects.filter(value="apple").update(name="d1") self.assertEqual(resp, 1) resp = DataPoint.objects.filter(value="apple") self.assertEqual(list(resp), [self.d0]) def test_update_multiple_objects(self): """ We can update multiple objects at once. """ resp = DataPoint.objects.filter(value='banana').update(value='pineapple') self.assertEqual(resp, 2) self.assertEqual(DataPoint.objects.get(name="d2").value, 'pineapple') def test_update_fk(self): """ Foreign key fields can also be updated, although you can only update the object referred to, not anything inside the related object. """ resp = RelatedPoint.objects.filter(name="r1").update(data=self.d0) self.assertEqual(resp, 1) resp = RelatedPoint.objects.filter(data__name="d0") self.assertEqual(list(resp), [self.r1]) def test_update_multiple_fields(self): """ Multiple fields can be updated at once """ resp = DataPoint.objects.filter(value="apple").update( value="fruit", another_value="peach") self.assertEqual(resp, 1) d = DataPoint.objects.get(name="d0") self.assertEqual(d.value, 'fruit') self.assertEqual(d.another_value, 'peach') def test_update_all(self): """ In the rare case you want to update every instance of a model, update() is also a manager method. """ self.assertEqual(DataPoint.objects.update(value='thing'), 3) resp = DataPoint.objects.values('value').distinct() self.assertEqual(list(resp), [{'value': 'thing'}]) def test_update_slice_fail(self): """ We do not support update on already sliced query sets. """ method = DataPoint.objects.all()[:2].update msg = 'Cannot update a query once a slice has been taken.' with self.assertRaisesMessage(AssertionError, msg): method(another_value='another thing') def test_update_respects_to_field(self): """ Update of an FK field which specifies a to_field works. """ a_foo = Foo.objects.create(target='aaa') b_foo = Foo.objects.create(target='bbb') bar = Bar.objects.create(foo=a_foo) self.assertEqual(bar.foo_id, a_foo.target) bar_qs = Bar.objects.filter(pk=bar.pk) self.assertEqual(bar_qs[0].foo_id, a_foo.target) bar_qs.update(foo=b_foo) self.assertEqual(bar_qs[0].foo_id, b_foo.target) def test_update_m2m_field(self): msg = ( 'Cannot update model field ' '<django.db.models.fields.related.ManyToManyField: m2m_foo> ' '(only non-relations and foreign keys permitted).' ) with self.assertRaisesMessage(FieldError, msg): Bar.objects.update(m2m_foo='whatever') def test_update_annotated_queryset(self): """ Update of a queryset that's been annotated. """ # Trivial annotated update qs = DataPoint.objects.annotate(alias=F('value')) self.assertEqual(qs.update(another_value='foo'), 3) # Update where annotation is used for filtering qs = DataPoint.objects.annotate(alias=F('value')).filter(alias='apple') self.assertEqual(qs.update(another_value='foo'), 1) # Update where annotation is used in update parameters qs = DataPoint.objects.annotate(alias=F('value')) self.assertEqual(qs.update(another_value=F('alias')), 3) # Update where aggregation annotation is used in update parameters qs = DataPoint.objects.annotate(max=Max('value')) msg = ( 'Aggregate functions are not allowed in this query ' '(another_value=Max(Col(update_datapoint, update.DataPoint.value))).' ) with self.assertRaisesMessage(FieldError, msg): qs.update(another_value=F('max')) def test_update_annotated_multi_table_queryset(self): """ Update of a queryset that's been annotated and involves multiple tables. """ # Trivial annotated update qs = DataPoint.objects.annotate(related_count=Count('relatedpoint')) self.assertEqual(qs.update(value='Foo'), 3) # Update where annotation is used for filtering qs = DataPoint.objects.annotate(related_count=Count('relatedpoint')) self.assertEqual(qs.filter(related_count=1).update(value='Foo'), 1) # Update where aggregation annotation is used in update parameters qs = RelatedPoint.objects.annotate(max=Max('data__value')) msg = 'Joined field references are not permitted in this query' with self.assertRaisesMessage(FieldError, msg): qs.update(name=F('max')) def test_update_with_joined_field_annotation(self): msg = 'Joined field references are not permitted in this query' for annotation in ( F('data__name'), Lower('data__name'), Concat('data__name', 'data__value'), ): with self.subTest(annotation=annotation): with self.assertRaisesMessage(FieldError, msg): RelatedPoint.objects.annotate(new_name=annotation).update(name=F('new_name')) @unittest.skipUnless( connection.vendor == 'mysql', 'UPDATE...ORDER BY syntax is supported on MySQL/MariaDB', ) class MySQLUpdateOrderByTest(TestCase): """Update field with a unique constraint using an ordered queryset.""" @classmethod def setUpTestData(cls): UniqueNumber.objects.create(number=1) UniqueNumber.objects.create(number=2) def test_order_by_update_on_unique_constraint(self): tests = [ ('-number', 'id'), (F('number').desc(), 'id'), (F('number') * -1, 'id'), ] for ordering in tests: with self.subTest(ordering=ordering), transaction.atomic(): updated = UniqueNumber.objects.order_by(*ordering).update( number=F('number') + 1, ) self.assertEqual(updated, 2) def test_order_by_update_on_unique_constraint_annotation(self): # Ordering by annotations is omitted because they cannot be resolved in # .update(). with self.assertRaises(IntegrityError): UniqueNumber.objects.annotate( number_inverse=F('number').desc(), ).order_by('number_inverse').update( number=F('number') + 1, )
8b3fab79d217355204a19d94821434fec2cd00f4bab1788367e1b9f7e090805a
""" Tests for the update() queryset method that allows in-place, multi-object updates. """ from django.db import models class DataPoint(models.Model): name = models.CharField(max_length=20) value = models.CharField(max_length=20) another_value = models.CharField(max_length=20, blank=True) class RelatedPoint(models.Model): name = models.CharField(max_length=20) data = models.ForeignKey(DataPoint, models.CASCADE) class A(models.Model): x = models.IntegerField(default=10) class B(models.Model): a = models.ForeignKey(A, models.CASCADE) y = models.IntegerField(default=10) class C(models.Model): y = models.IntegerField(default=10) class D(C): a = models.ForeignKey(A, models.CASCADE) class Foo(models.Model): target = models.CharField(max_length=10, unique=True) class Bar(models.Model): foo = models.ForeignKey(Foo, models.CASCADE, to_field='target') m2m_foo = models.ManyToManyField(Foo, related_name='m2m_foo') class UniqueNumber(models.Model): number = models.IntegerField(unique=True)
8588e885446e88a2464697af77446dd94236c38d71d8f5798e2b6c846b3b8a8b
import os from argparse import ArgumentParser from contextlib import contextmanager from unittest import ( TestSuite, TextTestRunner, defaultTestLoader, mock, skipUnless, ) from django.db import connections from django.test import SimpleTestCase from django.test.runner import DiscoverRunner from django.test.utils import captured_stderr, captured_stdout from django.utils.version import PY37 @contextmanager def change_cwd(directory): current_dir = os.path.abspath(os.path.dirname(__file__)) new_dir = os.path.join(current_dir, directory) old_cwd = os.getcwd() os.chdir(new_dir) try: yield finally: os.chdir(old_cwd) class DiscoverRunnerTests(SimpleTestCase): @staticmethod def get_test_methods_names(suite): return [ t.__class__.__name__ + '.' + t._testMethodName for t in suite._tests ] def test_init_debug_mode(self): runner = DiscoverRunner() self.assertFalse(runner.debug_mode) def test_add_arguments_debug_mode(self): parser = ArgumentParser() DiscoverRunner.add_arguments(parser) ns = parser.parse_args([]) self.assertFalse(ns.debug_mode) ns = parser.parse_args(["--debug-mode"]) self.assertTrue(ns.debug_mode) def test_dotted_test_module(self): count = DiscoverRunner().build_suite( ['test_runner_apps.sample.tests_sample'], ).countTestCases() self.assertEqual(count, 4) def test_dotted_test_class_vanilla_unittest(self): count = DiscoverRunner().build_suite( ['test_runner_apps.sample.tests_sample.TestVanillaUnittest'], ).countTestCases() self.assertEqual(count, 1) def test_dotted_test_class_django_testcase(self): count = DiscoverRunner().build_suite( ['test_runner_apps.sample.tests_sample.TestDjangoTestCase'], ).countTestCases() self.assertEqual(count, 1) def test_dotted_test_method_django_testcase(self): count = DiscoverRunner().build_suite( ['test_runner_apps.sample.tests_sample.TestDjangoTestCase.test_sample'], ).countTestCases() self.assertEqual(count, 1) def test_pattern(self): count = DiscoverRunner( pattern="*_tests.py", ).build_suite(['test_runner_apps.sample']).countTestCases() self.assertEqual(count, 1) @skipUnless(PY37, 'unittest -k option requires Python 3.7 and later') def test_name_patterns(self): all_test_1 = [ 'DjangoCase1.test_1', 'DjangoCase2.test_1', 'SimpleCase1.test_1', 'SimpleCase2.test_1', 'UnittestCase1.test_1', 'UnittestCase2.test_1', ] all_test_2 = [ 'DjangoCase1.test_2', 'DjangoCase2.test_2', 'SimpleCase1.test_2', 'SimpleCase2.test_2', 'UnittestCase1.test_2', 'UnittestCase2.test_2', ] all_tests = sorted([*all_test_1, *all_test_2, 'UnittestCase2.test_3_test']) for pattern, expected in [ [['test_1'], all_test_1], [['UnittestCase1'], ['UnittestCase1.test_1', 'UnittestCase1.test_2']], [['*test'], ['UnittestCase2.test_3_test']], [['test*'], all_tests], [['test'], all_tests], [['test_1', 'test_2'], sorted([*all_test_1, *all_test_2])], [['test*1'], all_test_1], ]: with self.subTest(pattern): suite = DiscoverRunner( test_name_patterns=pattern ).build_suite(['test_runner_apps.simple']) self.assertEqual(expected, self.get_test_methods_names(suite)) def test_file_path(self): with change_cwd(".."): count = DiscoverRunner().build_suite( ['test_runner_apps/sample/'], ).countTestCases() self.assertEqual(count, 5) def test_empty_label(self): """ If the test label is empty, discovery should happen on the current working directory. """ with change_cwd("."): suite = DiscoverRunner().build_suite([]) self.assertEqual( suite._tests[0].id().split(".")[0], os.path.basename(os.getcwd()), ) def test_empty_test_case(self): count = DiscoverRunner().build_suite( ['test_runner_apps.sample.tests_sample.EmptyTestCase'], ).countTestCases() self.assertEqual(count, 0) def test_discovery_on_package(self): count = DiscoverRunner().build_suite( ['test_runner_apps.sample.tests'], ).countTestCases() self.assertEqual(count, 1) def test_ignore_adjacent(self): """ When given a dotted path to a module, unittest discovery searches not just the module, but also the directory containing the module. This results in tests from adjacent modules being run when they should not. The discover runner avoids this behavior. """ count = DiscoverRunner().build_suite( ['test_runner_apps.sample.empty'], ).countTestCases() self.assertEqual(count, 0) def test_testcase_ordering(self): with change_cwd(".."): suite = DiscoverRunner().build_suite(['test_runner_apps/sample/']) self.assertEqual( suite._tests[0].__class__.__name__, 'TestDjangoTestCase', msg="TestDjangoTestCase should be the first test case") self.assertEqual( suite._tests[1].__class__.__name__, 'TestZimpleTestCase', msg="TestZimpleTestCase should be the second test case") # All others can follow in unspecified order, including doctests self.assertIn('DocTestCase', [t.__class__.__name__ for t in suite._tests[2:]]) def test_duplicates_ignored(self): """ Tests shouldn't be discovered twice when discovering on overlapping paths. """ base_app = 'forms_tests' sub_app = 'forms_tests.field_tests' with self.modify_settings(INSTALLED_APPS={'append': sub_app}): single = DiscoverRunner().build_suite([base_app]).countTestCases() dups = DiscoverRunner().build_suite([base_app, sub_app]).countTestCases() self.assertEqual(single, dups) def test_reverse(self): """ Reverse should reorder tests while maintaining the grouping specified by ``DiscoverRunner.reorder_by``. """ runner = DiscoverRunner(reverse=True) suite = runner.build_suite( test_labels=('test_runner_apps.sample', 'test_runner_apps.simple')) self.assertIn('test_runner_apps.simple', next(iter(suite)).id(), msg="Test labels should be reversed.") suite = runner.build_suite(test_labels=('test_runner_apps.simple',)) suite = tuple(suite) self.assertIn('DjangoCase', suite[0].id(), msg="Test groups should not be reversed.") self.assertIn('SimpleCase', suite[4].id(), msg="Test groups order should be preserved.") self.assertIn('DjangoCase2', suite[0].id(), msg="Django test cases should be reversed.") self.assertIn('SimpleCase2', suite[4].id(), msg="Simple test cases should be reversed.") self.assertIn('UnittestCase2', suite[8].id(), msg="Unittest test cases should be reversed.") self.assertIn('test_2', suite[0].id(), msg="Methods of Django cases should be reversed.") self.assertIn('test_2', suite[4].id(), msg="Methods of simple cases should be reversed.") self.assertIn('test_2', suite[9].id(), msg="Methods of unittest cases should be reversed.") def test_overridable_get_test_runner_kwargs(self): self.assertIsInstance(DiscoverRunner().get_test_runner_kwargs(), dict) def test_overridable_test_suite(self): self.assertEqual(DiscoverRunner().test_suite, TestSuite) def test_overridable_test_runner(self): self.assertEqual(DiscoverRunner().test_runner, TextTestRunner) def test_overridable_test_loader(self): self.assertEqual(DiscoverRunner().test_loader, defaultTestLoader) def test_tags(self): runner = DiscoverRunner(tags=['core']) self.assertEqual(runner.build_suite(['test_runner_apps.tagged.tests']).countTestCases(), 1) runner = DiscoverRunner(tags=['fast']) self.assertEqual(runner.build_suite(['test_runner_apps.tagged.tests']).countTestCases(), 2) runner = DiscoverRunner(tags=['slow']) self.assertEqual(runner.build_suite(['test_runner_apps.tagged.tests']).countTestCases(), 2) def test_exclude_tags(self): runner = DiscoverRunner(tags=['fast'], exclude_tags=['core']) self.assertEqual(runner.build_suite(['test_runner_apps.tagged.tests']).countTestCases(), 1) runner = DiscoverRunner(tags=['fast'], exclude_tags=['slow']) self.assertEqual(runner.build_suite(['test_runner_apps.tagged.tests']).countTestCases(), 0) runner = DiscoverRunner(exclude_tags=['slow']) self.assertEqual(runner.build_suite(['test_runner_apps.tagged.tests']).countTestCases(), 0) def test_tag_inheritance(self): def count_tests(**kwargs): suite = DiscoverRunner(**kwargs).build_suite(['test_runner_apps.tagged.tests_inheritance']) return suite.countTestCases() self.assertEqual(count_tests(tags=['foo']), 4) self.assertEqual(count_tests(tags=['bar']), 2) self.assertEqual(count_tests(tags=['baz']), 2) self.assertEqual(count_tests(tags=['foo'], exclude_tags=['bar']), 2) self.assertEqual(count_tests(tags=['foo'], exclude_tags=['bar', 'baz']), 1) self.assertEqual(count_tests(exclude_tags=['foo']), 0) def test_included_tags_displayed(self): runner = DiscoverRunner(tags=['foo', 'bar'], verbosity=2) with captured_stdout() as stdout: runner.build_suite(['test_runner_apps.tagged.tests']) self.assertIn('Including test tag(s): bar, foo.\n', stdout.getvalue()) def test_excluded_tags_displayed(self): runner = DiscoverRunner(exclude_tags=['foo', 'bar'], verbosity=3) with captured_stdout() as stdout: runner.build_suite(['test_runner_apps.tagged.tests']) self.assertIn('Excluding test tag(s): bar, foo.\n', stdout.getvalue()) def test_pdb_with_parallel(self): msg = ( 'You cannot use --pdb with parallel tests; pass --parallel=1 to ' 'use it.' ) with self.assertRaisesMessage(ValueError, msg): DiscoverRunner(pdb=True, parallel=2) def test_buffer_with_parallel(self): msg = ( 'You cannot use -b/--buffer with parallel tests; pass ' '--parallel=1 to use it.' ) with self.assertRaisesMessage(ValueError, msg): DiscoverRunner(buffer=True, parallel=2) def test_buffer_mode_test_pass(self): runner = DiscoverRunner(buffer=True, verbose=0) with captured_stdout() as stdout, captured_stderr() as stderr: suite = runner.build_suite([ 'test_runner_apps.buffer.tests_buffer.WriteToStdoutStderrTestCase.test_pass', ]) runner.run_suite(suite) self.assertNotIn('Write to stderr.', stderr.getvalue()) self.assertNotIn('Write to stdout.', stdout.getvalue()) def test_buffer_mode_test_fail(self): runner = DiscoverRunner(buffer=True, verbose=0) with captured_stdout() as stdout, captured_stderr() as stderr: suite = runner.build_suite([ 'test_runner_apps.buffer.tests_buffer.WriteToStdoutStderrTestCase.test_fail', ]) runner.run_suite(suite) self.assertIn('Write to stderr.', stderr.getvalue()) self.assertIn('Write to stdout.', stdout.getvalue()) @mock.patch('faulthandler.enable') def test_faulthandler_enabled(self, mocked_enable): with mock.patch('faulthandler.is_enabled', return_value=False): DiscoverRunner(enable_faulthandler=True) mocked_enable.assert_called() @mock.patch('faulthandler.enable') def test_faulthandler_already_enabled(self, mocked_enable): with mock.patch('faulthandler.is_enabled', return_value=True): DiscoverRunner(enable_faulthandler=True) mocked_enable.assert_not_called() @mock.patch('faulthandler.enable') def test_faulthandler_enabled_fileno(self, mocked_enable): # sys.stderr that is not an actual file. with mock.patch('faulthandler.is_enabled', return_value=False), captured_stderr(): DiscoverRunner(enable_faulthandler=True) mocked_enable.assert_called() @mock.patch('faulthandler.enable') def test_faulthandler_disabled(self, mocked_enable): with mock.patch('faulthandler.is_enabled', return_value=False): DiscoverRunner(enable_faulthandler=False) mocked_enable.assert_not_called() class DiscoverRunnerGetDatabasesTests(SimpleTestCase): runner = DiscoverRunner(verbosity=2) skip_msg = 'Skipping setup of unused database(s): ' def get_databases(self, test_labels): suite = self.runner.build_suite(test_labels) with captured_stdout() as stdout: databases = self.runner.get_databases(suite) return databases, stdout.getvalue() def assertSkippedDatabases(self, test_labels, expected_databases): databases, output = self.get_databases(test_labels) self.assertEqual(databases, expected_databases) skipped_databases = set(connections) - expected_databases if skipped_databases: self.assertIn(self.skip_msg + ', '.join(sorted(skipped_databases)), output) else: self.assertNotIn(self.skip_msg, output) def test_mixed(self): databases, output = self.get_databases(['test_runner_apps.databases.tests']) self.assertEqual(databases, set(connections)) self.assertNotIn(self.skip_msg, output) def test_all(self): databases, output = self.get_databases(['test_runner_apps.databases.tests.AllDatabasesTests']) self.assertEqual(databases, set(connections)) self.assertNotIn(self.skip_msg, output) def test_default_and_other(self): self.assertSkippedDatabases([ 'test_runner_apps.databases.tests.DefaultDatabaseTests', 'test_runner_apps.databases.tests.OtherDatabaseTests', ], {'default', 'other'}) def test_default_only(self): self.assertSkippedDatabases([ 'test_runner_apps.databases.tests.DefaultDatabaseTests', ], {'default'}) def test_other_only(self): self.assertSkippedDatabases([ 'test_runner_apps.databases.tests.OtherDatabaseTests' ], {'other'}) def test_no_databases_required(self): self.assertSkippedDatabases([ 'test_runner_apps.databases.tests.NoDatabaseTests' ], set())
9cf16e1bea2de625f2a1e85e3c28cbe5fcf89389fa43df0f47a1199796c5df9b
import operator from django.db import DatabaseError, NotSupportedError, connection from django.db.models import Exists, F, IntegerField, OuterRef, Value from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature from .models import Number, ReservedName @skipUnlessDBFeature('supports_select_union') class QuerySetSetOperationTests(TestCase): @classmethod def setUpTestData(cls): Number.objects.bulk_create(Number(num=i, other_num=10 - i) for i in range(10)) def assertNumbersEqual(self, queryset, expected_numbers, ordered=True): self.assertQuerysetEqual(queryset, expected_numbers, operator.attrgetter('num'), ordered) def test_simple_union(self): qs1 = Number.objects.filter(num__lte=1) qs2 = Number.objects.filter(num__gte=8) qs3 = Number.objects.filter(num=5) self.assertNumbersEqual(qs1.union(qs2, qs3), [0, 1, 5, 8, 9], ordered=False) @skipUnlessDBFeature('supports_select_intersection') def test_simple_intersection(self): qs1 = Number.objects.filter(num__lte=5) qs2 = Number.objects.filter(num__gte=5) qs3 = Number.objects.filter(num__gte=4, num__lte=6) self.assertNumbersEqual(qs1.intersection(qs2, qs3), [5], ordered=False) @skipUnlessDBFeature('supports_select_intersection') def test_intersection_with_values(self): ReservedName.objects.create(name='a', order=2) qs1 = ReservedName.objects.all() reserved_name = qs1.intersection(qs1).values('name', 'order', 'id').get() self.assertEqual(reserved_name['name'], 'a') self.assertEqual(reserved_name['order'], 2) reserved_name = qs1.intersection(qs1).values_list('name', 'order', 'id').get() self.assertEqual(reserved_name[:2], ('a', 2)) @skipUnlessDBFeature('supports_select_difference') def test_simple_difference(self): qs1 = Number.objects.filter(num__lte=5) qs2 = Number.objects.filter(num__lte=4) self.assertNumbersEqual(qs1.difference(qs2), [5], ordered=False) def test_union_distinct(self): qs1 = Number.objects.all() qs2 = Number.objects.all() self.assertEqual(len(list(qs1.union(qs2, all=True))), 20) self.assertEqual(len(list(qs1.union(qs2))), 10) def test_union_none(self): qs1 = Number.objects.filter(num__lte=1) qs2 = Number.objects.filter(num__gte=8) qs3 = qs1.union(qs2) self.assertSequenceEqual(qs3.none(), []) self.assertNumbersEqual(qs3, [0, 1, 8, 9], ordered=False) @skipUnlessDBFeature('supports_select_intersection') def test_intersection_with_empty_qs(self): qs1 = Number.objects.all() qs2 = Number.objects.none() qs3 = Number.objects.filter(pk__in=[]) self.assertEqual(len(qs1.intersection(qs2)), 0) self.assertEqual(len(qs1.intersection(qs3)), 0) self.assertEqual(len(qs2.intersection(qs1)), 0) self.assertEqual(len(qs3.intersection(qs1)), 0) self.assertEqual(len(qs2.intersection(qs2)), 0) self.assertEqual(len(qs3.intersection(qs3)), 0) @skipUnlessDBFeature('supports_select_difference') def test_difference_with_empty_qs(self): qs1 = Number.objects.all() qs2 = Number.objects.none() qs3 = Number.objects.filter(pk__in=[]) self.assertEqual(len(qs1.difference(qs2)), 10) self.assertEqual(len(qs1.difference(qs3)), 10) self.assertEqual(len(qs2.difference(qs1)), 0) self.assertEqual(len(qs3.difference(qs1)), 0) self.assertEqual(len(qs2.difference(qs2)), 0) self.assertEqual(len(qs3.difference(qs3)), 0) @skipUnlessDBFeature('supports_select_difference') def test_difference_with_values(self): ReservedName.objects.create(name='a', order=2) qs1 = ReservedName.objects.all() qs2 = ReservedName.objects.none() reserved_name = qs1.difference(qs2).values('name', 'order', 'id').get() self.assertEqual(reserved_name['name'], 'a') self.assertEqual(reserved_name['order'], 2) reserved_name = qs1.difference(qs2).values_list('name', 'order', 'id').get() self.assertEqual(reserved_name[:2], ('a', 2)) def test_union_with_empty_qs(self): qs1 = Number.objects.all() qs2 = Number.objects.none() qs3 = Number.objects.filter(pk__in=[]) self.assertEqual(len(qs1.union(qs2)), 10) self.assertEqual(len(qs2.union(qs1)), 10) self.assertEqual(len(qs1.union(qs3)), 10) self.assertEqual(len(qs3.union(qs1)), 10) self.assertEqual(len(qs2.union(qs1, qs1, qs1)), 10) self.assertEqual(len(qs2.union(qs1, qs1, all=True)), 20) self.assertEqual(len(qs2.union(qs2)), 0) self.assertEqual(len(qs3.union(qs3)), 0) def test_limits(self): qs1 = Number.objects.all() qs2 = Number.objects.all() self.assertEqual(len(list(qs1.union(qs2)[:2])), 2) def test_ordering(self): qs1 = Number.objects.filter(num__lte=1) qs2 = Number.objects.filter(num__gte=2, num__lte=3) self.assertNumbersEqual(qs1.union(qs2).order_by('-num'), [3, 2, 1, 0]) def test_ordering_by_alias(self): qs1 = Number.objects.filter(num__lte=1).values(alias=F('num')) qs2 = Number.objects.filter(num__gte=2, num__lte=3).values(alias=F('num')) self.assertQuerysetEqual( qs1.union(qs2).order_by('-alias'), [3, 2, 1, 0], operator.itemgetter('alias'), ) def test_ordering_by_f_expression(self): qs1 = Number.objects.filter(num__lte=1) qs2 = Number.objects.filter(num__gte=2, num__lte=3) self.assertNumbersEqual(qs1.union(qs2).order_by(F('num').desc()), [3, 2, 1, 0]) def test_ordering_by_f_expression_and_alias(self): qs1 = Number.objects.filter(num__lte=1).values(alias=F('other_num')) qs2 = Number.objects.filter(num__gte=2, num__lte=3).values(alias=F('other_num')) self.assertQuerysetEqual( qs1.union(qs2).order_by(F('alias').desc()), [10, 9, 8, 7], operator.itemgetter('alias'), ) Number.objects.create(num=-1) self.assertQuerysetEqual( qs1.union(qs2).order_by(F('alias').desc(nulls_last=True)), [10, 9, 8, 7, None], operator.itemgetter('alias'), ) def test_union_with_values(self): ReservedName.objects.create(name='a', order=2) qs1 = ReservedName.objects.all() reserved_name = qs1.union(qs1).values('name', 'order', 'id').get() self.assertEqual(reserved_name['name'], 'a') self.assertEqual(reserved_name['order'], 2) reserved_name = qs1.union(qs1).values_list('name', 'order', 'id').get() self.assertEqual(reserved_name[:2], ('a', 2)) # List of columns can be changed. reserved_name = qs1.union(qs1).values_list('order').get() self.assertEqual(reserved_name, (2,)) def test_union_with_two_annotated_values_list(self): qs1 = Number.objects.filter(num=1).annotate( count=Value(0, IntegerField()), ).values_list('num', 'count') qs2 = Number.objects.filter(num=2).values('pk').annotate( count=F('num'), ).annotate( num=Value(1, IntegerField()), ).values_list('num', 'count') self.assertCountEqual(qs1.union(qs2), [(1, 0), (2, 1)]) def test_union_with_extra_and_values_list(self): qs1 = Number.objects.filter(num=1).extra( select={'count': 0}, ).values_list('num', 'count') qs2 = Number.objects.filter(num=2).extra(select={'count': 1}) self.assertCountEqual(qs1.union(qs2), [(1, 0), (2, 1)]) def test_union_with_values_list_on_annotated_and_unannotated(self): ReservedName.objects.create(name='rn1', order=1) qs1 = Number.objects.annotate( has_reserved_name=Exists(ReservedName.objects.filter(order=OuterRef('num'))) ).filter(has_reserved_name=True) qs2 = Number.objects.filter(num=9) self.assertCountEqual(qs1.union(qs2).values_list('num', flat=True), [1, 9]) def test_union_with_values_list_and_order(self): ReservedName.objects.bulk_create([ ReservedName(name='rn1', order=7), ReservedName(name='rn2', order=5), ReservedName(name='rn0', order=6), ReservedName(name='rn9', order=-1), ]) qs1 = ReservedName.objects.filter(order__gte=6) qs2 = ReservedName.objects.filter(order__lte=5) union_qs = qs1.union(qs2) for qs, expected_result in ( # Order by a single column. (union_qs.order_by('-pk').values_list('order', flat=True), [-1, 6, 5, 7]), (union_qs.order_by('pk').values_list('order', flat=True), [7, 5, 6, -1]), (union_qs.values_list('order', flat=True).order_by('-pk'), [-1, 6, 5, 7]), (union_qs.values_list('order', flat=True).order_by('pk'), [7, 5, 6, -1]), # Order by multiple columns. (union_qs.order_by('-name', 'pk').values_list('order', flat=True), [-1, 5, 7, 6]), (union_qs.values_list('order', flat=True).order_by('-name', 'pk'), [-1, 5, 7, 6]), ): with self.subTest(qs=qs): self.assertEqual(list(qs), expected_result) def test_count_union(self): qs1 = Number.objects.filter(num__lte=1).values('num') qs2 = Number.objects.filter(num__gte=2, num__lte=3).values('num') self.assertEqual(qs1.union(qs2).count(), 4) def test_count_union_empty_result(self): qs = Number.objects.filter(pk__in=[]) self.assertEqual(qs.union(qs).count(), 0) @skipUnlessDBFeature('supports_select_difference') def test_count_difference(self): qs1 = Number.objects.filter(num__lt=10) qs2 = Number.objects.filter(num__lt=9) self.assertEqual(qs1.difference(qs2).count(), 1) @skipUnlessDBFeature('supports_select_intersection') def test_count_intersection(self): qs1 = Number.objects.filter(num__gte=5) qs2 = Number.objects.filter(num__lte=5) self.assertEqual(qs1.intersection(qs2).count(), 1) @skipUnlessDBFeature('supports_slicing_ordering_in_compound') def test_ordering_subqueries(self): qs1 = Number.objects.order_by('num')[:2] qs2 = Number.objects.order_by('-num')[:2] self.assertNumbersEqual(qs1.union(qs2).order_by('-num')[:4], [9, 8, 1, 0]) @skipIfDBFeature('supports_slicing_ordering_in_compound') def test_unsupported_ordering_slicing_raises_db_error(self): qs1 = Number.objects.all() qs2 = Number.objects.all() msg = 'LIMIT/OFFSET not allowed in subqueries of compound statements' with self.assertRaisesMessage(DatabaseError, msg): list(qs1.union(qs2[:10])) msg = 'ORDER BY not allowed in subqueries of compound statements' with self.assertRaisesMessage(DatabaseError, msg): list(qs1.order_by('id').union(qs2)) @skipIfDBFeature('supports_select_intersection') def test_unsupported_intersection_raises_db_error(self): qs1 = Number.objects.all() qs2 = Number.objects.all() msg = 'intersection is not supported on this database backend' with self.assertRaisesMessage(NotSupportedError, msg): list(qs1.intersection(qs2)) def test_combining_multiple_models(self): ReservedName.objects.create(name='99 little bugs', order=99) qs1 = Number.objects.filter(num=1).values_list('num', flat=True) qs2 = ReservedName.objects.values_list('order') self.assertEqual(list(qs1.union(qs2).order_by('num')), [1, 99]) def test_order_raises_on_non_selected_column(self): qs1 = Number.objects.filter().annotate( annotation=Value(1, IntegerField()), ).values('annotation', num2=F('num')) qs2 = Number.objects.filter().values('id', 'num') # Should not raise list(qs1.union(qs2).order_by('annotation')) list(qs1.union(qs2).order_by('num2')) msg = 'ORDER BY term does not match any column in the result set' # 'id' is not part of the select with self.assertRaisesMessage(DatabaseError, msg): list(qs1.union(qs2).order_by('id')) # 'num' got realiased to num2 with self.assertRaisesMessage(DatabaseError, msg): list(qs1.union(qs2).order_by('num')) with self.assertRaisesMessage(DatabaseError, msg): list(qs1.union(qs2).order_by(F('num'))) with self.assertRaisesMessage(DatabaseError, msg): list(qs1.union(qs2).order_by(F('num').desc())) # switched order, now 'exists' again: list(qs2.union(qs1).order_by('num')) @skipUnlessDBFeature('supports_select_difference', 'supports_select_intersection') def test_qs_with_subcompound_qs(self): qs1 = Number.objects.all() qs2 = Number.objects.intersection(Number.objects.filter(num__gt=1)) self.assertEqual(qs1.difference(qs2).count(), 2) def test_order_by_same_type(self): qs = Number.objects.all() union = qs.union(qs) numbers = list(range(10)) self.assertNumbersEqual(union.order_by('num'), numbers) self.assertNumbersEqual(union.order_by('other_num'), reversed(numbers)) def test_unsupported_operations_on_combined_qs(self): qs = Number.objects.all() msg = 'Calling QuerySet.%s() after %s() is not supported.' combinators = ['union'] if connection.features.supports_select_difference: combinators.append('difference') if connection.features.supports_select_intersection: combinators.append('intersection') for combinator in combinators: for operation in ( 'annotate', 'defer', 'delete', 'distinct', 'exclude', 'extra', 'filter', 'only', 'prefetch_related', 'select_related', 'update', ): with self.subTest(combinator=combinator, operation=operation): with self.assertRaisesMessage( NotSupportedError, msg % (operation, combinator), ): getattr(getattr(qs, combinator)(qs), operation)()
218a0c7b1868b625d2f5b0d1f244189a2c836ce62d88604a0330fbacd82c6d47
from datetime import datetime from operator import attrgetter from django.db.models import ( CharField, DateTimeField, F, Max, OuterRef, Subquery, Value, ) from django.db.models.functions import Upper from django.test import TestCase from .models import Article, Author, ChildArticle, OrderedByFArticle, Reference class OrderingTests(TestCase): @classmethod def setUpTestData(cls): cls.a1 = Article.objects.create(headline="Article 1", pub_date=datetime(2005, 7, 26)) cls.a2 = Article.objects.create(headline="Article 2", pub_date=datetime(2005, 7, 27)) cls.a3 = Article.objects.create(headline="Article 3", pub_date=datetime(2005, 7, 27)) cls.a4 = Article.objects.create(headline="Article 4", pub_date=datetime(2005, 7, 28)) cls.author_1 = Author.objects.create(name="Name 1") cls.author_2 = Author.objects.create(name="Name 2") for i in range(2): Author.objects.create() def test_default_ordering(self): """ By default, Article.objects.all() orders by pub_date descending, then headline ascending. """ self.assertQuerysetEqual( Article.objects.all(), [ "Article 4", "Article 2", "Article 3", "Article 1", ], attrgetter("headline") ) # Getting a single item should work too: self.assertEqual(Article.objects.all()[0], self.a4) def test_default_ordering_override(self): """ Override ordering with order_by, which is in the same format as the ordering attribute in models. """ self.assertQuerysetEqual( Article.objects.order_by("headline"), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.order_by("pub_date", "-headline"), [ "Article 1", "Article 3", "Article 2", "Article 4", ], attrgetter("headline") ) def test_order_by_override(self): """ Only the last order_by has any effect (since they each override any previous ordering). """ self.assertQuerysetEqual( Article.objects.order_by("id"), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.order_by("id").order_by("-headline"), [ "Article 4", "Article 3", "Article 2", "Article 1", ], attrgetter("headline") ) def test_order_by_nulls_first_and_last(self): msg = "nulls_first and nulls_last are mutually exclusive" with self.assertRaisesMessage(ValueError, msg): Article.objects.order_by(F("author").desc(nulls_last=True, nulls_first=True)) def assertQuerysetEqualReversible(self, queryset, sequence): self.assertSequenceEqual(queryset, sequence) self.assertSequenceEqual(queryset.reverse(), list(reversed(sequence))) def test_order_by_nulls_last(self): Article.objects.filter(headline="Article 3").update(author=self.author_1) Article.objects.filter(headline="Article 4").update(author=self.author_2) # asc and desc are chainable with nulls_last. self.assertQuerysetEqualReversible( Article.objects.order_by(F("author").desc(nulls_last=True), 'headline'), [self.a4, self.a3, self.a1, self.a2], ) self.assertQuerysetEqualReversible( Article.objects.order_by(F("author").asc(nulls_last=True), 'headline'), [self.a3, self.a4, self.a1, self.a2], ) self.assertQuerysetEqualReversible( Article.objects.order_by(Upper("author__name").desc(nulls_last=True), 'headline'), [self.a4, self.a3, self.a1, self.a2], ) self.assertQuerysetEqualReversible( Article.objects.order_by(Upper("author__name").asc(nulls_last=True), 'headline'), [self.a3, self.a4, self.a1, self.a2], ) def test_order_by_nulls_first(self): Article.objects.filter(headline="Article 3").update(author=self.author_1) Article.objects.filter(headline="Article 4").update(author=self.author_2) # asc and desc are chainable with nulls_first. self.assertQuerysetEqualReversible( Article.objects.order_by(F("author").asc(nulls_first=True), 'headline'), [self.a1, self.a2, self.a3, self.a4], ) self.assertQuerysetEqualReversible( Article.objects.order_by(F("author").desc(nulls_first=True), 'headline'), [self.a1, self.a2, self.a4, self.a3], ) self.assertQuerysetEqualReversible( Article.objects.order_by(Upper("author__name").asc(nulls_first=True), 'headline'), [self.a1, self.a2, self.a3, self.a4], ) self.assertQuerysetEqualReversible( Article.objects.order_by(Upper("author__name").desc(nulls_first=True), 'headline'), [self.a1, self.a2, self.a4, self.a3], ) def test_orders_nulls_first_on_filtered_subquery(self): Article.objects.filter(headline='Article 1').update(author=self.author_1) Article.objects.filter(headline='Article 2').update(author=self.author_1) Article.objects.filter(headline='Article 4').update(author=self.author_2) Author.objects.filter(name__isnull=True).delete() author_3 = Author.objects.create(name='Name 3') article_subquery = Article.objects.filter( author=OuterRef('pk'), headline__icontains='Article', ).order_by().values('author').annotate( last_date=Max('pub_date'), ).values('last_date') self.assertQuerysetEqualReversible( Author.objects.annotate( last_date=Subquery(article_subquery, output_field=DateTimeField()) ).order_by( F('last_date').asc(nulls_first=True) ).distinct(), [author_3, self.author_1, self.author_2], ) def test_stop_slicing(self): """ Use the 'stop' part of slicing notation to limit the results. """ self.assertQuerysetEqual( Article.objects.order_by("headline")[:2], [ "Article 1", "Article 2", ], attrgetter("headline") ) def test_stop_start_slicing(self): """ Use the 'stop' and 'start' parts of slicing notation to offset the result list. """ self.assertQuerysetEqual( Article.objects.order_by("headline")[1:3], [ "Article 2", "Article 3", ], attrgetter("headline") ) def test_random_ordering(self): """ Use '?' to order randomly. """ self.assertEqual( len(list(Article.objects.order_by("?"))), 4 ) def test_reversed_ordering(self): """ Ordering can be reversed using the reverse() method on a queryset. This allows you to extract things like "the last two items" (reverse and then take the first two). """ self.assertQuerysetEqual( Article.objects.all().reverse()[:2], [ "Article 1", "Article 3", ], attrgetter("headline") ) def test_reverse_ordering_pure(self): qs1 = Article.objects.order_by(F('headline').asc()) qs2 = qs1.reverse() self.assertQuerysetEqual( qs2, [ 'Article 4', 'Article 3', 'Article 2', 'Article 1', ], attrgetter('headline'), ) self.assertQuerysetEqual( qs1, [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) def test_reverse_meta_ordering_pure(self): Article.objects.create( headline='Article 5', pub_date=datetime(2005, 7, 30), author=self.author_1, second_author=self.author_2, ) Article.objects.create( headline='Article 5', pub_date=datetime(2005, 7, 30), author=self.author_2, second_author=self.author_1, ) self.assertQuerysetEqual( Article.objects.filter(headline='Article 5').reverse(), ['Name 2', 'Name 1'], attrgetter('author.name'), ) self.assertQuerysetEqual( Article.objects.filter(headline='Article 5'), ['Name 1', 'Name 2'], attrgetter('author.name'), ) def test_no_reordering_after_slicing(self): msg = 'Cannot reverse a query once a slice has been taken.' qs = Article.objects.all()[0:2] with self.assertRaisesMessage(TypeError, msg): qs.reverse() with self.assertRaisesMessage(TypeError, msg): qs.last() def test_extra_ordering(self): """ Ordering can be based on fields included from an 'extra' clause """ self.assertQuerysetEqual( Article.objects.extra(select={"foo": "pub_date"}, order_by=["foo", "headline"]), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) def test_extra_ordering_quoting(self): """ If the extra clause uses an SQL keyword for a name, it will be protected by quoting. """ self.assertQuerysetEqual( Article.objects.extra(select={"order": "pub_date"}, order_by=["order", "headline"]), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) def test_extra_ordering_with_table_name(self): self.assertQuerysetEqual( Article.objects.extra(order_by=['ordering_article.headline']), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.extra(order_by=['-ordering_article.headline']), [ "Article 4", "Article 3", "Article 2", "Article 1", ], attrgetter("headline") ) def test_order_by_pk(self): """ 'pk' works as an ordering option in Meta. """ self.assertQuerysetEqual( Author.objects.all(), list(reversed(range(1, Author.objects.count() + 1))), attrgetter("pk"), ) def test_order_by_fk_attname(self): """ ordering by a foreign key by its attribute name prevents the query from inheriting its related model ordering option (#19195). """ for i in range(1, 5): author = Author.objects.get(pk=i) article = getattr(self, "a%d" % (5 - i)) article.author = author article.save(update_fields={'author'}) self.assertQuerysetEqual( Article.objects.order_by('author_id'), [ "Article 4", "Article 3", "Article 2", "Article 1", ], attrgetter("headline") ) def test_order_by_self_referential_fk(self): self.a1.author = Author.objects.create(editor=self.author_1) self.a1.save() self.a2.author = Author.objects.create(editor=self.author_2) self.a2.save() self.assertQuerysetEqual( Article.objects.filter(author__isnull=False).order_by('author__editor'), ['Article 2', 'Article 1'], attrgetter('headline'), ) self.assertQuerysetEqual( Article.objects.filter(author__isnull=False).order_by('author__editor_id'), ['Article 1', 'Article 2'], attrgetter('headline'), ) def test_order_by_f_expression(self): self.assertQuerysetEqual( Article.objects.order_by(F('headline')), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.order_by(F('headline').asc()), [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) self.assertQuerysetEqual( Article.objects.order_by(F('headline').desc()), [ "Article 4", "Article 3", "Article 2", "Article 1", ], attrgetter("headline") ) def test_order_by_f_expression_duplicates(self): """ A column may only be included once (the first occurrence) so we check to ensure there are no duplicates by inspecting the SQL. """ qs = Article.objects.order_by(F('headline').asc(), F('headline').desc()) sql = str(qs.query).upper() fragment = sql[sql.find('ORDER BY'):] self.assertEqual(fragment.count('HEADLINE'), 1) self.assertQuerysetEqual( qs, [ "Article 1", "Article 2", "Article 3", "Article 4", ], attrgetter("headline") ) qs = Article.objects.order_by(F('headline').desc(), F('headline').asc()) sql = str(qs.query).upper() fragment = sql[sql.find('ORDER BY'):] self.assertEqual(fragment.count('HEADLINE'), 1) self.assertQuerysetEqual( qs, [ "Article 4", "Article 3", "Article 2", "Article 1", ], attrgetter("headline") ) def test_order_by_constant_value(self): # Order by annotated constant from selected columns. qs = Article.objects.annotate( constant=Value('1', output_field=CharField()), ).order_by('constant', '-headline') self.assertSequenceEqual(qs, [self.a4, self.a3, self.a2, self.a1]) # Order by annotated constant which is out of selected columns. self.assertSequenceEqual( qs.values_list('headline', flat=True), [ 'Article 4', 'Article 3', 'Article 2', 'Article 1', ], ) # Order by constant. qs = Article.objects.order_by(Value('1', output_field=CharField()), '-headline') self.assertSequenceEqual(qs, [self.a4, self.a3, self.a2, self.a1]) def test_related_ordering_duplicate_table_reference(self): """ An ordering referencing a model with an ordering referencing a model multiple time no circular reference should be detected (#24654). """ first_author = Author.objects.create() second_author = Author.objects.create() self.a1.author = first_author self.a1.second_author = second_author self.a1.save() self.a2.author = second_author self.a2.second_author = first_author self.a2.save() r1 = Reference.objects.create(article_id=self.a1.pk) r2 = Reference.objects.create(article_id=self.a2.pk) self.assertSequenceEqual(Reference.objects.all(), [r2, r1]) def test_default_ordering_by_f_expression(self): """F expressions can be used in Meta.ordering.""" articles = OrderedByFArticle.objects.all() articles.filter(headline='Article 2').update(author=self.author_2) articles.filter(headline='Article 3').update(author=self.author_1) self.assertQuerysetEqual( articles, ['Article 1', 'Article 4', 'Article 3', 'Article 2'], attrgetter('headline') ) def test_order_by_ptr_field_with_default_ordering_by_expression(self): ca1 = ChildArticle.objects.create( headline='h2', pub_date=datetime(2005, 7, 27), author=self.author_2, ) ca2 = ChildArticle.objects.create( headline='h2', pub_date=datetime(2005, 7, 27), author=self.author_1, ) ca3 = ChildArticle.objects.create( headline='h3', pub_date=datetime(2005, 7, 27), author=self.author_1, ) ca4 = ChildArticle.objects.create(headline='h1', pub_date=datetime(2005, 7, 28)) articles = ChildArticle.objects.order_by('article_ptr') self.assertSequenceEqual(articles, [ca4, ca2, ca1, ca3])
7a3965fe19dacc86461929b87974ade292d3fa9a0cc36423c207abb6ce9d9b78
import datetime import pickle import unittest import uuid from copy import deepcopy from decimal import Decimal from unittest import mock from django.core.exceptions import FieldError from django.db import DatabaseError, NotSupportedError, connection from django.db.models import ( Avg, BinaryField, BooleanField, Case, CharField, Count, DateField, DateTimeField, DecimalField, DurationField, Exists, Expression, ExpressionList, ExpressionWrapper, F, FloatField, Func, IntegerField, Max, Min, Model, OrderBy, OuterRef, Q, StdDev, Subquery, Sum, TimeField, UUIDField, Value, Variance, When, ) from django.db.models.expressions import Col, Combinable, Random, RawSQL, Ref from django.db.models.functions import ( Coalesce, Concat, Left, Length, Lower, Substr, Upper, ) from django.db.models.sql import constants from django.db.models.sql.datastructures import Join from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature from django.test.utils import Approximate, isolate_apps from django.utils.functional import SimpleLazyObject from .models import ( UUID, UUIDPK, Company, Employee, Experiment, Manager, Number, RemoteEmployee, Result, SimulationRun, Time, ) class BasicExpressionsTests(TestCase): @classmethod def setUpTestData(cls): cls.example_inc = Company.objects.create( name="Example Inc.", num_employees=2300, num_chairs=5, ceo=Employee.objects.create(firstname="Joe", lastname="Smith", salary=10) ) cls.foobar_ltd = Company.objects.create( name="Foobar Ltd.", num_employees=3, num_chairs=4, based_in_eu=True, ceo=Employee.objects.create(firstname="Frank", lastname="Meyer", salary=20) ) cls.max = Employee.objects.create(firstname='Max', lastname='Mustermann', salary=30) cls.gmbh = Company.objects.create(name='Test GmbH', num_employees=32, num_chairs=1, ceo=cls.max) def setUp(self): self.company_query = Company.objects.values( "name", "num_employees", "num_chairs" ).order_by( "name", "num_employees", "num_chairs" ) def test_annotate_values_aggregate(self): companies = Company.objects.annotate( salaries=F('ceo__salary'), ).values('num_employees', 'salaries').aggregate( result=Sum( F('salaries') + F('num_employees'), output_field=IntegerField() ), ) self.assertEqual(companies['result'], 2395) def test_annotate_values_filter(self): companies = Company.objects.annotate( foo=RawSQL('%s', ['value']), ).filter(foo='value').order_by('name') self.assertQuerysetEqual( companies, ['<Company: Example Inc.>', '<Company: Foobar Ltd.>', '<Company: Test GmbH>'], ) def test_annotate_values_count(self): companies = Company.objects.annotate(foo=RawSQL('%s', ['value'])) self.assertEqual(companies.count(), 3) @skipUnlessDBFeature('supports_boolean_expr_in_select_clause') def test_filtering_on_annotate_that_uses_q(self): self.assertEqual( Company.objects.annotate( num_employees_check=ExpressionWrapper(Q(num_employees__gt=3), output_field=BooleanField()) ).filter(num_employees_check=True).count(), 2, ) def test_filtering_on_q_that_is_boolean(self): self.assertEqual( Company.objects.filter( ExpressionWrapper(Q(num_employees__gt=3), output_field=BooleanField()) ).count(), 2, ) def test_filtering_on_rawsql_that_is_boolean(self): self.assertEqual( Company.objects.filter( RawSQL('num_employees > %s', (3,), output_field=BooleanField()), ).count(), 2, ) def test_filter_inter_attribute(self): # We can filter on attribute relationships on same model obj, e.g. # find companies where the number of employees is greater # than the number of chairs. self.assertSequenceEqual( self.company_query.filter(num_employees__gt=F("num_chairs")), [ { "num_chairs": 5, "name": "Example Inc.", "num_employees": 2300, }, { "num_chairs": 1, "name": "Test GmbH", "num_employees": 32 }, ], ) def test_update(self): # We can set one field to have the value of another field # Make sure we have enough chairs self.company_query.update(num_chairs=F("num_employees")) self.assertSequenceEqual( self.company_query, [ { "num_chairs": 2300, "name": "Example Inc.", "num_employees": 2300 }, { "num_chairs": 3, "name": "Foobar Ltd.", "num_employees": 3 }, { "num_chairs": 32, "name": "Test GmbH", "num_employees": 32 } ], ) def test_arithmetic(self): # We can perform arithmetic operations in expressions # Make sure we have 2 spare chairs self.company_query.update(num_chairs=F("num_employees") + 2) self.assertSequenceEqual( self.company_query, [ { 'num_chairs': 2302, 'name': 'Example Inc.', 'num_employees': 2300 }, { 'num_chairs': 5, 'name': 'Foobar Ltd.', 'num_employees': 3 }, { 'num_chairs': 34, 'name': 'Test GmbH', 'num_employees': 32 } ], ) def test_order_of_operations(self): # Law of order of operations is followed self.company_query.update(num_chairs=F('num_employees') + 2 * F('num_employees')) self.assertSequenceEqual( self.company_query, [ { 'num_chairs': 6900, 'name': 'Example Inc.', 'num_employees': 2300 }, { 'num_chairs': 9, 'name': 'Foobar Ltd.', 'num_employees': 3 }, { 'num_chairs': 96, 'name': 'Test GmbH', 'num_employees': 32 } ], ) def test_parenthesis_priority(self): # Law of order of operations can be overridden by parentheses self.company_query.update(num_chairs=(F('num_employees') + 2) * F('num_employees')) self.assertSequenceEqual( self.company_query, [ { 'num_chairs': 5294600, 'name': 'Example Inc.', 'num_employees': 2300 }, { 'num_chairs': 15, 'name': 'Foobar Ltd.', 'num_employees': 3 }, { 'num_chairs': 1088, 'name': 'Test GmbH', 'num_employees': 32 } ], ) def test_update_with_fk(self): # ForeignKey can become updated with the value of another ForeignKey. self.assertEqual(Company.objects.update(point_of_contact=F('ceo')), 3) self.assertQuerysetEqual( Company.objects.all(), ['Joe Smith', 'Frank Meyer', 'Max Mustermann'], lambda c: str(c.point_of_contact), ordered=False ) def test_update_with_none(self): Number.objects.create(integer=1, float=1.0) Number.objects.create(integer=2) Number.objects.filter(float__isnull=False).update(float=Value(None)) self.assertQuerysetEqual( Number.objects.all(), [None, None], lambda n: n.float, ordered=False ) def test_filter_with_join(self): # F Expressions can also span joins Company.objects.update(point_of_contact=F('ceo')) c = Company.objects.first() c.point_of_contact = Employee.objects.create(firstname="Guido", lastname="van Rossum") c.save() self.assertQuerysetEqual( Company.objects.filter(ceo__firstname=F('point_of_contact__firstname')), ['Foobar Ltd.', 'Test GmbH'], lambda c: c.name, ordered=False ) Company.objects.exclude( ceo__firstname=F("point_of_contact__firstname") ).update(name="foo") self.assertEqual( Company.objects.exclude( ceo__firstname=F('point_of_contact__firstname') ).get().name, "foo", ) msg = "Joined field references are not permitted in this query" with self.assertRaisesMessage(FieldError, msg): Company.objects.exclude( ceo__firstname=F('point_of_contact__firstname') ).update(name=F('point_of_contact__lastname')) def test_object_update(self): # F expressions can be used to update attributes on single objects self.gmbh.num_employees = F('num_employees') + 4 self.gmbh.save() self.gmbh.refresh_from_db() self.assertEqual(self.gmbh.num_employees, 36) def test_new_object_save(self): # We should be able to use Funcs when inserting new data test_co = Company(name=Lower(Value('UPPER')), num_employees=32, num_chairs=1, ceo=self.max) test_co.save() test_co.refresh_from_db() self.assertEqual(test_co.name, "upper") def test_new_object_create(self): test_co = Company.objects.create(name=Lower(Value('UPPER')), num_employees=32, num_chairs=1, ceo=self.max) test_co.refresh_from_db() self.assertEqual(test_co.name, "upper") def test_object_create_with_aggregate(self): # Aggregates are not allowed when inserting new data msg = 'Aggregate functions are not allowed in this query (num_employees=Max(Value(1))).' with self.assertRaisesMessage(FieldError, msg): Company.objects.create( name='Company', num_employees=Max(Value(1)), num_chairs=1, ceo=Employee.objects.create(firstname="Just", lastname="Doit", salary=30), ) def test_object_update_fk(self): # F expressions cannot be used to update attributes which are foreign # keys, or attributes which involve joins. test_gmbh = Company.objects.get(pk=self.gmbh.pk) msg = 'F(ceo)": "Company.point_of_contact" must be a "Employee" instance.' with self.assertRaisesMessage(ValueError, msg): test_gmbh.point_of_contact = F('ceo') test_gmbh.point_of_contact = self.gmbh.ceo test_gmbh.save() test_gmbh.name = F('ceo__lastname') msg = 'Joined field references are not permitted in this query' with self.assertRaisesMessage(FieldError, msg): test_gmbh.save() def test_update_inherited_field_value(self): msg = 'Joined field references are not permitted in this query' with self.assertRaisesMessage(FieldError, msg): RemoteEmployee.objects.update(adjusted_salary=F('salary') * 5) def test_object_update_unsaved_objects(self): # F expressions cannot be used to update attributes on objects which do # not yet exist in the database acme = Company(name='The Acme Widget Co.', num_employees=12, num_chairs=5, ceo=self.max) acme.num_employees = F("num_employees") + 16 msg = ( 'Failed to insert expression "Col(expressions_company, ' 'expressions.Company.num_employees) + Value(16)" on ' 'expressions.Company.num_employees. F() expressions can only be ' 'used to update, not to insert.' ) with self.assertRaisesMessage(ValueError, msg): acme.save() acme.num_employees = 12 acme.name = Lower(F('name')) msg = ( 'Failed to insert expression "Lower(Col(expressions_company, ' 'expressions.Company.name))" on expressions.Company.name. F() ' 'expressions can only be used to update, not to insert.' ) with self.assertRaisesMessage(ValueError, msg): acme.save() def test_ticket_11722_iexact_lookup(self): Employee.objects.create(firstname="John", lastname="Doe") Employee.objects.create(firstname="Test", lastname="test") queryset = Employee.objects.filter(firstname__iexact=F('lastname')) self.assertQuerysetEqual(queryset, ["<Employee: Test test>"]) def test_ticket_16731_startswith_lookup(self): Employee.objects.create(firstname="John", lastname="Doe") e2 = Employee.objects.create(firstname="Jack", lastname="Jackson") e3 = Employee.objects.create(firstname="Jack", lastname="jackson") self.assertSequenceEqual( Employee.objects.filter(lastname__startswith=F('firstname')), [e2, e3] if connection.features.has_case_insensitive_like else [e2] ) qs = Employee.objects.filter(lastname__istartswith=F('firstname')).order_by('pk') self.assertSequenceEqual(qs, [e2, e3]) def test_ticket_18375_join_reuse(self): # Reverse multijoin F() references and the lookup target the same join. # Pre #18375 the F() join was generated first and the lookup couldn't # reuse that join. qs = Employee.objects.filter(company_ceo_set__num_chairs=F('company_ceo_set__num_employees')) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_ticket_18375_kwarg_ordering(self): # The next query was dict-randomization dependent - if the "gte=1" # was seen first, then the F() will reuse the join generated by the # gte lookup, if F() was seen first, then it generated a join the # other lookups could not reuse. qs = Employee.objects.filter( company_ceo_set__num_chairs=F('company_ceo_set__num_employees'), company_ceo_set__num_chairs__gte=1, ) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_ticket_18375_kwarg_ordering_2(self): # Another similar case for F() than above. Now we have the same join # in two filter kwargs, one in the lhs lookup, one in F. Here pre # #18375 the amount of joins generated was random if dict # randomization was enabled, that is the generated query dependent # on which clause was seen first. qs = Employee.objects.filter( company_ceo_set__num_employees=F('pk'), pk=F('company_ceo_set__num_employees') ) self.assertEqual(str(qs.query).count('JOIN'), 1) def test_ticket_18375_chained_filters(self): # F() expressions do not reuse joins from previous filter. qs = Employee.objects.filter( company_ceo_set__num_employees=F('pk') ).filter( company_ceo_set__num_employees=F('company_ceo_set__num_employees') ) self.assertEqual(str(qs.query).count('JOIN'), 2) def test_order_by_exists(self): mary = Employee.objects.create(firstname='Mary', lastname='Mustermann', salary=20) mustermanns_by_seniority = Employee.objects.filter(lastname='Mustermann').order_by( # Order by whether the employee is the CEO of a company Exists(Company.objects.filter(ceo=OuterRef('pk'))).desc() ) self.assertSequenceEqual(mustermanns_by_seniority, [self.max, mary]) def test_order_by_multiline_sql(self): raw_order_by = ( RawSQL(''' CASE WHEN num_employees > 1000 THEN num_chairs ELSE 0 END ''', []).desc(), RawSQL(''' CASE WHEN num_chairs > 1 THEN 1 ELSE 0 END ''', []).asc() ) for qs in ( Company.objects.all(), Company.objects.distinct(), ): with self.subTest(qs=qs): self.assertSequenceEqual( qs.order_by(*raw_order_by), [self.example_inc, self.gmbh, self.foobar_ltd], ) def test_outerref(self): inner = Company.objects.filter(point_of_contact=OuterRef('pk')) msg = ( 'This queryset contains a reference to an outer query and may only ' 'be used in a subquery.' ) with self.assertRaisesMessage(ValueError, msg): inner.exists() outer = Employee.objects.annotate(is_point_of_contact=Exists(inner)) self.assertIs(outer.exists(), True) def test_exist_single_field_output_field(self): queryset = Company.objects.values('pk') self.assertIsInstance(Exists(queryset).output_field, BooleanField) def test_subquery(self): Company.objects.filter(name='Example Inc.').update( point_of_contact=Employee.objects.get(firstname='Joe', lastname='Smith'), ceo=self.max, ) Employee.objects.create(firstname='Bob', lastname='Brown', salary=40) qs = Employee.objects.annotate( is_point_of_contact=Exists(Company.objects.filter(point_of_contact=OuterRef('pk'))), is_not_point_of_contact=~Exists(Company.objects.filter(point_of_contact=OuterRef('pk'))), is_ceo_of_small_company=Exists(Company.objects.filter(num_employees__lt=200, ceo=OuterRef('pk'))), is_ceo_small_2=~~Exists(Company.objects.filter(num_employees__lt=200, ceo=OuterRef('pk'))), largest_company=Subquery(Company.objects.order_by('-num_employees').filter( Q(ceo=OuterRef('pk')) | Q(point_of_contact=OuterRef('pk')) ).values('name')[:1], output_field=CharField()) ).values( 'firstname', 'is_point_of_contact', 'is_not_point_of_contact', 'is_ceo_of_small_company', 'is_ceo_small_2', 'largest_company', ).order_by('firstname') results = list(qs) # Could use Coalesce(subq, Value('')) instead except for the bug in # cx_Oracle mentioned in #23843. bob = results[0] if bob['largest_company'] == '' and connection.features.interprets_empty_strings_as_nulls: bob['largest_company'] = None self.assertEqual(results, [ { 'firstname': 'Bob', 'is_point_of_contact': False, 'is_not_point_of_contact': True, 'is_ceo_of_small_company': False, 'is_ceo_small_2': False, 'largest_company': None, }, { 'firstname': 'Frank', 'is_point_of_contact': False, 'is_not_point_of_contact': True, 'is_ceo_of_small_company': True, 'is_ceo_small_2': True, 'largest_company': 'Foobar Ltd.', }, { 'firstname': 'Joe', 'is_point_of_contact': True, 'is_not_point_of_contact': False, 'is_ceo_of_small_company': False, 'is_ceo_small_2': False, 'largest_company': 'Example Inc.', }, { 'firstname': 'Max', 'is_point_of_contact': False, 'is_not_point_of_contact': True, 'is_ceo_of_small_company': True, 'is_ceo_small_2': True, 'largest_company': 'Example Inc.' } ]) # A less elegant way to write the same query: this uses a LEFT OUTER # JOIN and an IS NULL, inside a WHERE NOT IN which is probably less # efficient than EXISTS. self.assertCountEqual( qs.filter(is_point_of_contact=True).values('pk'), Employee.objects.exclude(company_point_of_contact_set=None).values('pk') ) def test_subquery_eq(self): qs = Employee.objects.annotate( is_ceo=Exists(Company.objects.filter(ceo=OuterRef('pk'))), is_point_of_contact=Exists( Company.objects.filter(point_of_contact=OuterRef('pk')), ), small_company=Exists( queryset=Company.objects.filter(num_employees__lt=200), ), ).filter(is_ceo=True, is_point_of_contact=False, small_company=True) self.assertNotEqual( qs.query.annotations['is_ceo'], qs.query.annotations['is_point_of_contact'], ) self.assertNotEqual( qs.query.annotations['is_ceo'], qs.query.annotations['small_company'], ) def test_in_subquery(self): # This is a contrived test (and you really wouldn't write this query), # but it is a succinct way to test the __in=Subquery() construct. small_companies = Company.objects.filter(num_employees__lt=200).values('pk') subquery_test = Company.objects.filter(pk__in=Subquery(small_companies)) self.assertCountEqual(subquery_test, [self.foobar_ltd, self.gmbh]) subquery_test2 = Company.objects.filter(pk=Subquery(small_companies.filter(num_employees=3))) self.assertCountEqual(subquery_test2, [self.foobar_ltd]) def test_uuid_pk_subquery(self): u = UUIDPK.objects.create() UUID.objects.create(uuid_fk=u) qs = UUIDPK.objects.filter(id__in=Subquery(UUID.objects.values('uuid_fk__id'))) self.assertCountEqual(qs, [u]) def test_nested_subquery(self): inner = Company.objects.filter(point_of_contact=OuterRef('pk')) outer = Employee.objects.annotate(is_point_of_contact=Exists(inner)) contrived = Employee.objects.annotate( is_point_of_contact=Subquery( outer.filter(pk=OuterRef('pk')).values('is_point_of_contact'), output_field=BooleanField(), ), ) self.assertCountEqual(contrived.values_list(), outer.values_list()) def test_nested_subquery_join_outer_ref(self): inner = Employee.objects.filter(pk=OuterRef('ceo__pk')).values('pk') qs = Employee.objects.annotate( ceo_company=Subquery( Company.objects.filter( ceo__in=inner, ceo__pk=OuterRef('pk'), ).values('pk'), ), ) self.assertSequenceEqual( qs.values_list('ceo_company', flat=True), [self.example_inc.pk, self.foobar_ltd.pk, self.gmbh.pk], ) def test_nested_subquery_outer_ref_2(self): first = Time.objects.create(time='09:00') second = Time.objects.create(time='17:00') third = Time.objects.create(time='21:00') SimulationRun.objects.bulk_create([ SimulationRun(start=first, end=second, midpoint='12:00'), SimulationRun(start=first, end=third, midpoint='15:00'), SimulationRun(start=second, end=first, midpoint='00:00'), ]) inner = Time.objects.filter(time=OuterRef(OuterRef('time')), pk=OuterRef('start')).values('time') middle = SimulationRun.objects.annotate(other=Subquery(inner)).values('other')[:1] outer = Time.objects.annotate(other=Subquery(middle, output_field=TimeField())) # This is a contrived example. It exercises the double OuterRef form. self.assertCountEqual(outer, [first, second, third]) def test_nested_subquery_outer_ref_with_autofield(self): first = Time.objects.create(time='09:00') second = Time.objects.create(time='17:00') SimulationRun.objects.create(start=first, end=second, midpoint='12:00') inner = SimulationRun.objects.filter(start=OuterRef(OuterRef('pk'))).values('start') middle = Time.objects.annotate(other=Subquery(inner)).values('other')[:1] outer = Time.objects.annotate(other=Subquery(middle, output_field=IntegerField())) # This exercises the double OuterRef form with AutoField as pk. self.assertCountEqual(outer, [first, second]) def test_annotations_within_subquery(self): Company.objects.filter(num_employees__lt=50).update(ceo=Employee.objects.get(firstname='Frank')) inner = Company.objects.filter( ceo=OuterRef('pk') ).values('ceo').annotate(total_employees=Sum('num_employees')).values('total_employees') outer = Employee.objects.annotate(total_employees=Subquery(inner)).filter(salary__lte=Subquery(inner)) self.assertSequenceEqual( outer.order_by('-total_employees').values('salary', 'total_employees'), [{'salary': 10, 'total_employees': 2300}, {'salary': 20, 'total_employees': 35}], ) def test_subquery_references_joined_table_twice(self): inner = Company.objects.filter( num_chairs__gte=OuterRef('ceo__salary'), num_employees__gte=OuterRef('point_of_contact__salary'), ) # Another contrived example (there is no need to have a subquery here) outer = Company.objects.filter(pk__in=Subquery(inner.values('pk'))) self.assertFalse(outer.exists()) def test_subquery_filter_by_aggregate(self): Number.objects.create(integer=1000, float=1.2) Employee.objects.create(salary=1000) qs = Number.objects.annotate( min_valuable_count=Subquery( Employee.objects.filter( salary=OuterRef('integer'), ).annotate(cnt=Count('salary')).filter(cnt__gt=0).values('cnt')[:1] ), ) self.assertEqual(qs.get().float, 1.2) def test_subquery_filter_by_lazy(self): self.max.manager = Manager.objects.create(name='Manager') self.max.save() max_manager = SimpleLazyObject( lambda: Manager.objects.get(pk=self.max.manager.pk) ) qs = Company.objects.annotate( ceo_manager=Subquery( Employee.objects.filter( lastname=OuterRef('ceo__lastname'), ).values('manager'), ), ).filter(ceo_manager=max_manager) self.assertEqual(qs.get(), self.gmbh) def test_aggregate_subquery_annotation(self): with self.assertNumQueries(1) as ctx: aggregate = Company.objects.annotate( ceo_salary=Subquery( Employee.objects.filter( id=OuterRef('ceo_id'), ).values('salary') ), ).aggregate( ceo_salary_gt_20=Count('pk', filter=Q(ceo_salary__gt=20)), ) self.assertEqual(aggregate, {'ceo_salary_gt_20': 1}) # Aggregation over a subquery annotation doesn't annotate the subquery # twice in the inner query. sql = ctx.captured_queries[0]['sql'] self.assertLessEqual(sql.count('SELECT'), 3) # GROUP BY isn't required to aggregate over a query that doesn't # contain nested aggregates. self.assertNotIn('GROUP BY', sql) def test_explicit_output_field(self): class FuncA(Func): output_field = CharField() class FuncB(Func): pass expr = FuncB(FuncA()) self.assertEqual(expr.output_field, FuncA.output_field) def test_outerref_mixed_case_table_name(self): inner = Result.objects.filter(result_time__gte=OuterRef('experiment__assigned')) outer = Result.objects.filter(pk__in=Subquery(inner.values('pk'))) self.assertFalse(outer.exists()) def test_outerref_with_operator(self): inner = Company.objects.filter(num_employees=OuterRef('ceo__salary') + 2) outer = Company.objects.filter(pk__in=Subquery(inner.values('pk'))) self.assertEqual(outer.get().name, 'Test GmbH') def test_nested_outerref_with_function(self): self.gmbh.point_of_contact = Employee.objects.get(lastname='Meyer') self.gmbh.save() inner = Employee.objects.filter( lastname__startswith=Left(OuterRef(OuterRef('lastname')), 1), ) qs = Employee.objects.annotate( ceo_company=Subquery( Company.objects.filter( point_of_contact__in=inner, ceo__pk=OuterRef('pk'), ).values('name'), ), ).filter(ceo_company__isnull=False) self.assertEqual(qs.get().ceo_company, 'Test GmbH') def test_annotation_with_outerref(self): gmbh_salary = Company.objects.annotate( max_ceo_salary_raise=Subquery( Company.objects.annotate( salary_raise=OuterRef('num_employees') + F('num_employees'), ).order_by('-salary_raise').values('salary_raise')[:1], output_field=IntegerField(), ), ).get(pk=self.gmbh.pk) self.assertEqual(gmbh_salary.max_ceo_salary_raise, 2332) def test_annotation_with_nested_outerref(self): self.gmbh.point_of_contact = Employee.objects.get(lastname='Meyer') self.gmbh.save() inner = Employee.objects.annotate( outer_lastname=OuterRef(OuterRef('lastname')), ).filter(lastname__startswith=Left('outer_lastname', 1)) qs = Employee.objects.annotate( ceo_company=Subquery( Company.objects.filter( point_of_contact__in=inner, ceo__pk=OuterRef('pk'), ).values('name'), ), ).filter(ceo_company__isnull=False) self.assertEqual(qs.get().ceo_company, 'Test GmbH') def test_pickle_expression(self): expr = Value(1) expr.convert_value # populate cached property self.assertEqual(pickle.loads(pickle.dumps(expr)), expr) def test_incorrect_field_in_F_expression(self): with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."): list(Employee.objects.filter(firstname=F('nope'))) def test_incorrect_joined_field_in_F_expression(self): with self.assertRaisesMessage(FieldError, "Cannot resolve keyword 'nope' into field."): list(Company.objects.filter(ceo__pk=F('point_of_contact__nope'))) def test_exists_in_filter(self): inner = Company.objects.filter(ceo=OuterRef('pk')).values('pk') qs1 = Employee.objects.filter(Exists(inner)) qs2 = Employee.objects.annotate(found=Exists(inner)).filter(found=True) self.assertCountEqual(qs1, qs2) self.assertFalse(Employee.objects.exclude(Exists(inner)).exists()) self.assertCountEqual(qs2, Employee.objects.exclude(~Exists(inner))) def test_subquery_in_filter(self): inner = Company.objects.filter(ceo=OuterRef('pk')).values('based_in_eu') self.assertSequenceEqual( Employee.objects.filter(Subquery(inner)), [self.foobar_ltd.ceo], ) def test_subquery_group_by_outerref_in_filter(self): inner = Company.objects.annotate( employee=OuterRef('pk'), ).values('employee').annotate( min_num_chairs=Min('num_chairs'), ).values('ceo') self.assertIs(Employee.objects.filter(pk__in=Subquery(inner)).exists(), True) def test_case_in_filter_if_boolean_output_field(self): is_ceo = Company.objects.filter(ceo=OuterRef('pk')) is_poc = Company.objects.filter(point_of_contact=OuterRef('pk')) qs = Employee.objects.filter( Case( When(Exists(is_ceo), then=True), When(Exists(is_poc), then=True), default=False, output_field=BooleanField(), ), ) self.assertSequenceEqual(qs, [self.example_inc.ceo, self.foobar_ltd.ceo, self.max]) def test_boolean_expression_combined(self): is_ceo = Company.objects.filter(ceo=OuterRef('pk')) is_poc = Company.objects.filter(point_of_contact=OuterRef('pk')) self.gmbh.point_of_contact = self.max self.gmbh.save() self.assertSequenceEqual( Employee.objects.filter(Exists(is_ceo) | Exists(is_poc)), [self.example_inc.ceo, self.foobar_ltd.ceo, self.max], ) self.assertSequenceEqual( Employee.objects.filter(Exists(is_ceo) & Exists(is_poc)), [self.max], ) self.assertSequenceEqual( Employee.objects.filter(Exists(is_ceo) & Q(salary__gte=30)), [self.max], ) self.assertSequenceEqual( Employee.objects.filter(Exists(is_poc) | Q(salary__lt=15)), [self.example_inc.ceo, self.max], ) class IterableLookupInnerExpressionsTests(TestCase): @classmethod def setUpTestData(cls): ceo = Employee.objects.create(firstname='Just', lastname='Doit', salary=30) # MySQL requires that the values calculated for expressions don't pass # outside of the field's range, so it's inconvenient to use the values # in the more general tests. Company.objects.create(name='5020 Ltd', num_employees=50, num_chairs=20, ceo=ceo) Company.objects.create(name='5040 Ltd', num_employees=50, num_chairs=40, ceo=ceo) Company.objects.create(name='5050 Ltd', num_employees=50, num_chairs=50, ceo=ceo) Company.objects.create(name='5060 Ltd', num_employees=50, num_chairs=60, ceo=ceo) Company.objects.create(name='99300 Ltd', num_employees=99, num_chairs=300, ceo=ceo) def test_in_lookup_allows_F_expressions_and_expressions_for_integers(self): # __in lookups can use F() expressions for integers. queryset = Company.objects.filter(num_employees__in=([F('num_chairs') - 10])) self.assertQuerysetEqual(queryset, ['<Company: 5060 Ltd>'], ordered=False) self.assertQuerysetEqual( Company.objects.filter(num_employees__in=([F('num_chairs') - 10, F('num_chairs') + 10])), ['<Company: 5040 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter( num_employees__in=([F('num_chairs') - 10, F('num_chairs'), F('num_chairs') + 10]) ), ['<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) def test_expressions_in_lookups_join_choice(self): midpoint = datetime.time(13, 0) t1 = Time.objects.create(time=datetime.time(12, 0)) t2 = Time.objects.create(time=datetime.time(14, 0)) SimulationRun.objects.create(start=t1, end=t2, midpoint=midpoint) SimulationRun.objects.create(start=t1, end=None, midpoint=midpoint) SimulationRun.objects.create(start=None, end=t2, midpoint=midpoint) SimulationRun.objects.create(start=None, end=None, midpoint=midpoint) queryset = SimulationRun.objects.filter(midpoint__range=[F('start__time'), F('end__time')]) self.assertQuerysetEqual( queryset, ['<SimulationRun: 13:00:00 (12:00:00 to 14:00:00)>'], ordered=False ) for alias in queryset.query.alias_map.values(): if isinstance(alias, Join): self.assertEqual(alias.join_type, constants.INNER) queryset = SimulationRun.objects.exclude(midpoint__range=[F('start__time'), F('end__time')]) self.assertQuerysetEqual(queryset, [], ordered=False) for alias in queryset.query.alias_map.values(): if isinstance(alias, Join): self.assertEqual(alias.join_type, constants.LOUTER) def test_range_lookup_allows_F_expressions_and_expressions_for_integers(self): # Range lookups can use F() expressions for integers. Company.objects.filter(num_employees__exact=F("num_chairs")) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(F('num_chairs'), 100)), ['<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(F('num_chairs') - 10, F('num_chairs') + 10)), ['<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(F('num_chairs') - 10, 100)), ['<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>'], ordered=False ) self.assertQuerysetEqual( Company.objects.filter(num_employees__range=(1, 100)), [ '<Company: 5020 Ltd>', '<Company: 5040 Ltd>', '<Company: 5050 Ltd>', '<Company: 5060 Ltd>', '<Company: 99300 Ltd>', ], ordered=False ) @unittest.skipUnless(connection.vendor == 'sqlite', "This defensive test only works on databases that don't validate parameter types") def test_complex_expressions_do_not_introduce_sql_injection_via_untrusted_string_inclusion(self): """ This tests that SQL injection isn't possible using compilation of expressions in iterable filters, as their compilation happens before the main query compilation. It's limited to SQLite, as PostgreSQL, Oracle and other vendors have defense in depth against this by type checking. Testing against SQLite (the most permissive of the built-in databases) demonstrates that the problem doesn't exist while keeping the test simple. """ queryset = Company.objects.filter(name__in=[F('num_chairs') + '1)) OR ((1==1']) self.assertQuerysetEqual(queryset, [], ordered=False) def test_in_lookup_allows_F_expressions_and_expressions_for_datetimes(self): start = datetime.datetime(2016, 2, 3, 15, 0, 0) end = datetime.datetime(2016, 2, 5, 15, 0, 0) experiment_1 = Experiment.objects.create( name='Integrity testing', assigned=start.date(), start=start, end=end, completed=end.date(), estimated_time=end - start, ) experiment_2 = Experiment.objects.create( name='Taste testing', assigned=start.date(), start=start, end=end, completed=end.date(), estimated_time=end - start, ) Result.objects.create( experiment=experiment_1, result_time=datetime.datetime(2016, 2, 4, 15, 0, 0), ) Result.objects.create( experiment=experiment_1, result_time=datetime.datetime(2016, 3, 10, 2, 0, 0), ) Result.objects.create( experiment=experiment_2, result_time=datetime.datetime(2016, 1, 8, 5, 0, 0), ) within_experiment_time = [F('experiment__start'), F('experiment__end')] queryset = Result.objects.filter(result_time__range=within_experiment_time) self.assertQuerysetEqual(queryset, ["<Result: Result at 2016-02-04 15:00:00>"]) within_experiment_time = [F('experiment__start'), F('experiment__end')] queryset = Result.objects.filter(result_time__range=within_experiment_time) self.assertQuerysetEqual(queryset, ["<Result: Result at 2016-02-04 15:00:00>"]) class FTests(SimpleTestCase): def test_deepcopy(self): f = F("foo") g = deepcopy(f) self.assertEqual(f.name, g.name) def test_deconstruct(self): f = F('name') path, args, kwargs = f.deconstruct() self.assertEqual(path, 'django.db.models.expressions.F') self.assertEqual(args, (f.name,)) self.assertEqual(kwargs, {}) def test_equal(self): f = F('name') same_f = F('name') other_f = F('username') self.assertEqual(f, same_f) self.assertNotEqual(f, other_f) def test_hash(self): d = {F('name'): 'Bob'} self.assertIn(F('name'), d) self.assertEqual(d[F('name')], 'Bob') def test_not_equal_Value(self): f = F('name') value = Value('name') self.assertNotEqual(f, value) self.assertNotEqual(value, f) class ExpressionsTests(TestCase): def test_F_reuse(self): f = F('id') n = Number.objects.create(integer=-1) c = Company.objects.create( name="Example Inc.", num_employees=2300, num_chairs=5, ceo=Employee.objects.create(firstname="Joe", lastname="Smith") ) c_qs = Company.objects.filter(id=f) self.assertEqual(c_qs.get(), c) # Reuse the same F-object for another queryset n_qs = Number.objects.filter(id=f) self.assertEqual(n_qs.get(), n) # The original query still works correctly self.assertEqual(c_qs.get(), c) def test_patterns_escape(self): r""" Special characters (e.g. %, _ and \) stored in database are properly escaped when using a pattern lookup with an expression refs #16731 """ Employee.objects.bulk_create([ Employee(firstname="%Joh\\nny", lastname="%Joh\\n"), Employee(firstname="Johnny", lastname="%John"), Employee(firstname="Jean-Claude", lastname="Claud_"), Employee(firstname="Jean-Claude", lastname="Claude"), Employee(firstname="Jean-Claude", lastname="Claude%"), Employee(firstname="Johnny", lastname="Joh\\n"), Employee(firstname="Johnny", lastname="John"), Employee(firstname="Johnny", lastname="_ohn"), ]) self.assertQuerysetEqual( Employee.objects.filter(firstname__contains=F('lastname')), ["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Jean-Claude Claude>", "<Employee: Johnny John>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__startswith=F('lastname')), ["<Employee: %Joh\\nny %Joh\\n>", "<Employee: Johnny John>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__endswith=F('lastname')), ["<Employee: Jean-Claude Claude>"], ordered=False, ) def test_insensitive_patterns_escape(self): r""" Special characters (e.g. %, _ and \) stored in database are properly escaped when using a case insensitive pattern lookup with an expression -- refs #16731 """ Employee.objects.bulk_create([ Employee(firstname="%Joh\\nny", lastname="%joh\\n"), Employee(firstname="Johnny", lastname="%john"), Employee(firstname="Jean-Claude", lastname="claud_"), Employee(firstname="Jean-Claude", lastname="claude"), Employee(firstname="Jean-Claude", lastname="claude%"), Employee(firstname="Johnny", lastname="joh\\n"), Employee(firstname="Johnny", lastname="john"), Employee(firstname="Johnny", lastname="_ohn"), ]) self.assertQuerysetEqual( Employee.objects.filter(firstname__icontains=F('lastname')), ["<Employee: %Joh\\nny %joh\\n>", "<Employee: Jean-Claude claude>", "<Employee: Johnny john>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__istartswith=F('lastname')), ["<Employee: %Joh\\nny %joh\\n>", "<Employee: Johnny john>"], ordered=False, ) self.assertQuerysetEqual( Employee.objects.filter(firstname__iendswith=F('lastname')), ["<Employee: Jean-Claude claude>"], ordered=False, ) @isolate_apps('expressions') class SimpleExpressionTests(SimpleTestCase): def test_equal(self): self.assertEqual(Expression(), Expression()) self.assertEqual( Expression(IntegerField()), Expression(output_field=IntegerField()) ) self.assertEqual(Expression(IntegerField()), mock.ANY) self.assertNotEqual( Expression(IntegerField()), Expression(CharField()) ) class TestModel(Model): field = IntegerField() other_field = IntegerField() self.assertNotEqual( Expression(TestModel._meta.get_field('field')), Expression(TestModel._meta.get_field('other_field')), ) def test_hash(self): self.assertEqual(hash(Expression()), hash(Expression())) self.assertEqual( hash(Expression(IntegerField())), hash(Expression(output_field=IntegerField())) ) self.assertNotEqual( hash(Expression(IntegerField())), hash(Expression(CharField())), ) class TestModel(Model): field = IntegerField() other_field = IntegerField() self.assertNotEqual( hash(Expression(TestModel._meta.get_field('field'))), hash(Expression(TestModel._meta.get_field('other_field'))), ) class ExpressionsNumericTests(TestCase): @classmethod def setUpTestData(cls): Number(integer=-1).save() Number(integer=42).save() Number(integer=1337).save() Number.objects.update(float=F('integer')) def test_fill_with_value_from_same_object(self): """ We can fill a value in all objects with an other value of the same object. """ self.assertQuerysetEqual( Number.objects.all(), ['<Number: -1, -1.000>', '<Number: 42, 42.000>', '<Number: 1337, 1337.000>'], ordered=False ) def test_increment_value(self): """ We can increment a value of all objects in a query set. """ self.assertEqual(Number.objects.filter(integer__gt=0).update(integer=F('integer') + 1), 2) self.assertQuerysetEqual( Number.objects.all(), ['<Number: -1, -1.000>', '<Number: 43, 42.000>', '<Number: 1338, 1337.000>'], ordered=False ) def test_filter_not_equals_other_field(self): """ We can filter for objects, where a value is not equals the value of an other field. """ self.assertEqual(Number.objects.filter(integer__gt=0).update(integer=F('integer') + 1), 2) self.assertQuerysetEqual( Number.objects.exclude(float=F('integer')), ['<Number: 43, 42.000>', '<Number: 1338, 1337.000>'], ordered=False ) def test_complex_expressions(self): """ Complex expressions of different connection types are possible. """ n = Number.objects.create(integer=10, float=123.45) self.assertEqual(Number.objects.filter(pk=n.pk).update( float=F('integer') + F('float') * 2), 1) self.assertEqual(Number.objects.get(pk=n.pk).integer, 10) self.assertEqual(Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3)) class ExpressionOperatorTests(TestCase): @classmethod def setUpTestData(cls): cls.n = Number.objects.create(integer=42, float=15.5) cls.n1 = Number.objects.create(integer=-42, float=-15.5) def test_lefthand_addition(self): # LH Addition of floats and integers Number.objects.filter(pk=self.n.pk).update( integer=F('integer') + 15, float=F('float') + 42.7 ) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3)) def test_lefthand_subtraction(self): # LH Subtraction of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') - 15, float=F('float') - 42.7) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3)) def test_lefthand_multiplication(self): # Multiplication of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') * 15, float=F('float') * 42.7) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3)) def test_lefthand_division(self): # LH Division of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') / 2, float=F('float') / 42.7) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3)) def test_lefthand_modulo(self): # LH Modulo arithmetic on integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') % 20) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2) def test_lefthand_bitwise_and(self): # LH Bitwise ands on integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitand(56)) Number.objects.filter(pk=self.n1.pk).update(integer=F('integer').bitand(-56)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -64) def test_lefthand_bitwise_left_shift_operator(self): Number.objects.update(integer=F('integer').bitleftshift(2)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 168) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -168) def test_lefthand_bitwise_right_shift_operator(self): Number.objects.update(integer=F('integer').bitrightshift(2)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 10) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -11) def test_lefthand_bitwise_or(self): # LH Bitwise or on integers Number.objects.update(integer=F('integer').bitor(48)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -10) def test_lefthand_power(self): # LH Power arithmetic operation on floats and integers Number.objects.filter(pk=self.n.pk).update(integer=F('integer') ** 2, float=F('float') ** 1.5) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 1764) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(61.02, places=2)) @unittest.skipIf(connection.vendor == 'oracle', "Oracle doesn't support bitwise XOR.") def test_lefthand_bitwise_xor(self): Number.objects.update(integer=F('integer').bitxor(48)) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 26) self.assertEqual(Number.objects.get(pk=self.n1.pk).integer, -26) @unittest.skipIf(connection.vendor == 'oracle', "Oracle doesn't support bitwise XOR.") def test_lefthand_bitwise_xor_null(self): employee = Employee.objects.create(firstname='John', lastname='Doe') Employee.objects.update(salary=F('salary').bitxor(48)) employee.refresh_from_db() self.assertIsNone(employee.salary) @unittest.skipUnless(connection.vendor == 'oracle', "Oracle doesn't support bitwise XOR.") def test_lefthand_bitwise_xor_not_supported(self): msg = 'Bitwise XOR is not supported in Oracle.' with self.assertRaisesMessage(NotSupportedError, msg): Number.objects.update(integer=F('integer').bitxor(48)) def test_right_hand_addition(self): # Right hand operators Number.objects.filter(pk=self.n.pk).update(integer=15 + F('integer'), float=42.7 + F('float')) # RH Addition of floats and integers self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3)) def test_right_hand_subtraction(self): Number.objects.filter(pk=self.n.pk).update(integer=15 - F('integer'), float=42.7 - F('float')) # RH Subtraction of floats and integers self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3)) def test_right_hand_multiplication(self): # RH Multiplication of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=15 * F('integer'), float=42.7 * F('float')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3)) def test_right_hand_division(self): # RH Division of floats and integers Number.objects.filter(pk=self.n.pk).update(integer=640 / F('integer'), float=42.7 / F('float')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3)) def test_right_hand_modulo(self): # RH Modulo arithmetic on integers Number.objects.filter(pk=self.n.pk).update(integer=69 % F('integer')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27) def test_righthand_power(self): # RH Power arithmetic operation on floats and integers Number.objects.filter(pk=self.n.pk).update(integer=2 ** F('integer'), float=1.5 ** F('float')) self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 4398046511104) self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(536.308, places=3)) class FTimeDeltaTests(TestCase): @classmethod def setUpTestData(cls): cls.sday = sday = datetime.date(2010, 6, 25) cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000) midnight = datetime.time(0) delta0 = datetime.timedelta(0) delta1 = datetime.timedelta(microseconds=253000) delta2 = datetime.timedelta(seconds=44) delta3 = datetime.timedelta(hours=21, minutes=8) delta4 = datetime.timedelta(days=10) delta5 = datetime.timedelta(days=90) # Test data is set so that deltas and delays will be # strictly increasing. cls.deltas = [] cls.delays = [] cls.days_long = [] # e0: started same day as assigned, zero duration end = stime + delta0 cls.e0 = Experiment.objects.create( name='e0', assigned=sday, start=stime, end=end, completed=end.date(), estimated_time=delta0, ) cls.deltas.append(delta0) cls.delays.append(cls.e0.start - datetime.datetime.combine(cls.e0.assigned, midnight)) cls.days_long.append(cls.e0.completed - cls.e0.assigned) # e1: started one day after assigned, tiny duration, data # set so that end time has no fractional seconds, which # tests an edge case on sqlite. delay = datetime.timedelta(1) end = stime + delay + delta1 e1 = Experiment.objects.create( name='e1', assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta1, ) cls.deltas.append(delta1) cls.delays.append(e1.start - datetime.datetime.combine(e1.assigned, midnight)) cls.days_long.append(e1.completed - e1.assigned) # e2: started three days after assigned, small duration end = stime + delta2 e2 = Experiment.objects.create( name='e2', assigned=sday - datetime.timedelta(3), start=stime, end=end, completed=end.date(), estimated_time=datetime.timedelta(hours=1), ) cls.deltas.append(delta2) cls.delays.append(e2.start - datetime.datetime.combine(e2.assigned, midnight)) cls.days_long.append(e2.completed - e2.assigned) # e3: started four days after assigned, medium duration delay = datetime.timedelta(4) end = stime + delay + delta3 e3 = Experiment.objects.create( name='e3', assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta3, ) cls.deltas.append(delta3) cls.delays.append(e3.start - datetime.datetime.combine(e3.assigned, midnight)) cls.days_long.append(e3.completed - e3.assigned) # e4: started 10 days after assignment, long duration end = stime + delta4 e4 = Experiment.objects.create( name='e4', assigned=sday - datetime.timedelta(10), start=stime, end=end, completed=end.date(), estimated_time=delta4 - datetime.timedelta(1), ) cls.deltas.append(delta4) cls.delays.append(e4.start - datetime.datetime.combine(e4.assigned, midnight)) cls.days_long.append(e4.completed - e4.assigned) # e5: started a month after assignment, very long duration delay = datetime.timedelta(30) end = stime + delay + delta5 e5 = Experiment.objects.create( name='e5', assigned=sday, start=stime + delay, end=end, completed=end.date(), estimated_time=delta5, ) cls.deltas.append(delta5) cls.delays.append(e5.start - datetime.datetime.combine(e5.assigned, midnight)) cls.days_long.append(e5.completed - e5.assigned) cls.expnames = [e.name for e in Experiment.objects.all()] def test_multiple_query_compilation(self): # Ticket #21643 queryset = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1)) q1 = str(queryset.query) q2 = str(queryset.query) self.assertEqual(q1, q2) def test_query_clone(self): # Ticket #21643 - Crash when compiling query more than once qs = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1)) qs2 = qs.all() list(qs) list(qs2) # Intentionally no assert def test_delta_add(self): for i, delta in enumerate(self.deltas): test_set = [e.name for e in Experiment.objects.filter(end__lt=F('start') + delta)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(end__lt=delta + F('start'))] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(end__lte=F('start') + delta)] self.assertEqual(test_set, self.expnames[:i + 1]) def test_delta_subtract(self): for i, delta in enumerate(self.deltas): test_set = [e.name for e in Experiment.objects.filter(start__gt=F('end') - delta)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(start__gte=F('end') - delta)] self.assertEqual(test_set, self.expnames[:i + 1]) def test_exclude(self): for i, delta in enumerate(self.deltas): test_set = [e.name for e in Experiment.objects.exclude(end__lt=F('start') + delta)] self.assertEqual(test_set, self.expnames[i:]) test_set = [e.name for e in Experiment.objects.exclude(end__lte=F('start') + delta)] self.assertEqual(test_set, self.expnames[i + 1:]) def test_date_comparison(self): for i, days in enumerate(self.days_long): test_set = [e.name for e in Experiment.objects.filter(completed__lt=F('assigned') + days)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(completed__lte=F('assigned') + days)] self.assertEqual(test_set, self.expnames[:i + 1]) @skipUnlessDBFeature("supports_mixed_date_datetime_comparisons") def test_mixed_comparisons1(self): for i, delay in enumerate(self.delays): test_set = [e.name for e in Experiment.objects.filter(assigned__gt=F('start') - delay)] self.assertEqual(test_set, self.expnames[:i]) test_set = [e.name for e in Experiment.objects.filter(assigned__gte=F('start') - delay)] self.assertEqual(test_set, self.expnames[:i + 1]) def test_mixed_comparisons2(self): for i, delay in enumerate(self.delays): delay = datetime.timedelta(delay.days) test_set = [e.name for e in Experiment.objects.filter(start__lt=F('assigned') + delay)] self.assertEqual(test_set, self.expnames[:i]) test_set = [ e.name for e in Experiment.objects.filter(start__lte=F('assigned') + delay + datetime.timedelta(1)) ] self.assertEqual(test_set, self.expnames[:i + 1]) def test_delta_update(self): for delta in self.deltas: exps = Experiment.objects.all() expected_durations = [e.duration() for e in exps] expected_starts = [e.start + delta for e in exps] expected_ends = [e.end + delta for e in exps] Experiment.objects.update(start=F('start') + delta, end=F('end') + delta) exps = Experiment.objects.all() new_starts = [e.start for e in exps] new_ends = [e.end for e in exps] new_durations = [e.duration() for e in exps] self.assertEqual(expected_starts, new_starts) self.assertEqual(expected_ends, new_ends) self.assertEqual(expected_durations, new_durations) def test_invalid_operator(self): with self.assertRaises(DatabaseError): list(Experiment.objects.filter(start=F('start') * datetime.timedelta(0))) def test_durationfield_add(self): zeros = [e.name for e in Experiment.objects.filter(start=F('start') + F('estimated_time'))] self.assertEqual(zeros, ['e0']) end_less = [e.name for e in Experiment.objects.filter(end__lt=F('start') + F('estimated_time'))] self.assertEqual(end_less, ['e2']) delta_math = [ e.name for e in Experiment.objects.filter(end__gte=F('start') + F('estimated_time') + datetime.timedelta(hours=1)) ] self.assertEqual(delta_math, ['e4']) queryset = Experiment.objects.annotate(shifted=ExpressionWrapper( F('start') + Value(None, output_field=DurationField()), output_field=DateTimeField(), )) self.assertIsNone(queryset.first().shifted) def test_duration_expressions(self): for delta in self.deltas: qs = Experiment.objects.annotate(duration=F('estimated_time') + delta) for obj in qs: self.assertEqual(obj.duration, obj.estimated_time + delta) @skipUnlessDBFeature('supports_temporal_subtraction') def test_date_subtraction(self): queryset = Experiment.objects.annotate( completion_duration=F('completed') - F('assigned'), ) at_least_5_days = {e.name for e in queryset.filter(completion_duration__gte=datetime.timedelta(days=5))} self.assertEqual(at_least_5_days, {'e3', 'e4', 'e5'}) at_least_120_days = {e.name for e in queryset.filter(completion_duration__gte=datetime.timedelta(days=120))} self.assertEqual(at_least_120_days, {'e5'}) less_than_5_days = {e.name for e in queryset.filter(completion_duration__lt=datetime.timedelta(days=5))} self.assertEqual(less_than_5_days, {'e0', 'e1', 'e2'}) queryset = Experiment.objects.annotate( difference=F('completed') - Value(None, output_field=DateField()), ) self.assertIsNone(queryset.first().difference) queryset = Experiment.objects.annotate(shifted=ExpressionWrapper( F('completed') - Value(None, output_field=DurationField()), output_field=DateField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_date_subquery_subtraction(self): subquery = Experiment.objects.filter(pk=OuterRef('pk')).values('completed') queryset = Experiment.objects.annotate( difference=subquery - F('completed'), ).filter(difference=datetime.timedelta()) self.assertTrue(queryset.exists()) @skipUnlessDBFeature('supports_temporal_subtraction') def test_date_case_subtraction(self): queryset = Experiment.objects.annotate( date_case=Case( When(Q(name='e0'), then=F('completed')), output_field=DateField(), ), completed_value=Value( self.e0.completed, output_field=DateField(), ), difference=F('date_case') - F('completed_value'), ).filter(difference=datetime.timedelta()) self.assertEqual(queryset.get(), self.e0) @skipUnlessDBFeature('supports_temporal_subtraction') def test_time_subtraction(self): Time.objects.create(time=datetime.time(12, 30, 15, 2345)) queryset = Time.objects.annotate( difference=F('time') - Value(datetime.time(11, 15, 0)), ) self.assertEqual( queryset.get().difference, datetime.timedelta(hours=1, minutes=15, seconds=15, microseconds=2345) ) queryset = Time.objects.annotate( difference=F('time') - Value(None, output_field=TimeField()), ) self.assertIsNone(queryset.first().difference) queryset = Time.objects.annotate(shifted=ExpressionWrapper( F('time') - Value(None, output_field=DurationField()), output_field=TimeField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_time_subquery_subtraction(self): Time.objects.create(time=datetime.time(12, 30, 15, 2345)) subquery = Time.objects.filter(pk=OuterRef('pk')).values('time') queryset = Time.objects.annotate( difference=subquery - F('time'), ).filter(difference=datetime.timedelta()) self.assertTrue(queryset.exists()) @skipUnlessDBFeature('supports_temporal_subtraction') def test_datetime_subtraction(self): under_estimate = [ e.name for e in Experiment.objects.filter(estimated_time__gt=F('end') - F('start')) ] self.assertEqual(under_estimate, ['e2']) over_estimate = [ e.name for e in Experiment.objects.filter(estimated_time__lt=F('end') - F('start')) ] self.assertEqual(over_estimate, ['e4']) queryset = Experiment.objects.annotate( difference=F('start') - Value(None, output_field=DateTimeField()), ) self.assertIsNone(queryset.first().difference) queryset = Experiment.objects.annotate(shifted=ExpressionWrapper( F('start') - Value(None, output_field=DurationField()), output_field=DateTimeField(), )) self.assertIsNone(queryset.first().shifted) @skipUnlessDBFeature('supports_temporal_subtraction') def test_datetime_subquery_subtraction(self): subquery = Experiment.objects.filter(pk=OuterRef('pk')).values('start') queryset = Experiment.objects.annotate( difference=subquery - F('start'), ).filter(difference=datetime.timedelta()) self.assertTrue(queryset.exists()) @skipUnlessDBFeature('supports_temporal_subtraction') def test_datetime_subtraction_microseconds(self): delta = datetime.timedelta(microseconds=8999999999999999) Experiment.objects.update(end=F('start') + delta) qs = Experiment.objects.annotate(delta=F('end') - F('start')) for e in qs: self.assertEqual(e.delta, delta) def test_duration_with_datetime(self): # Exclude e1 which has very high precision so we can test this on all # backends regardless of whether or not it supports # microsecond_precision. over_estimate = Experiment.objects.exclude(name='e1').filter( completed__gt=self.stime + F('estimated_time'), ).order_by('name') self.assertQuerysetEqual(over_estimate, ['e3', 'e4', 'e5'], lambda e: e.name) def test_duration_with_datetime_microseconds(self): delta = datetime.timedelta(microseconds=8999999999999999) qs = Experiment.objects.annotate(dt=ExpressionWrapper( F('start') + delta, output_field=DateTimeField(), )) for e in qs: self.assertEqual(e.dt, e.start + delta) def test_date_minus_duration(self): more_than_4_days = Experiment.objects.filter( assigned__lt=F('completed') - Value(datetime.timedelta(days=4)) ) self.assertQuerysetEqual(more_than_4_days, ['e3', 'e4', 'e5'], lambda e: e.name) def test_negative_timedelta_update(self): # subtract 30 seconds, 30 minutes, 2 hours and 2 days experiments = Experiment.objects.filter(name='e0').annotate( start_sub_seconds=F('start') + datetime.timedelta(seconds=-30), ).annotate( start_sub_minutes=F('start_sub_seconds') + datetime.timedelta(minutes=-30), ).annotate( start_sub_hours=F('start_sub_minutes') + datetime.timedelta(hours=-2), ).annotate( new_start=F('start_sub_hours') + datetime.timedelta(days=-2), ) expected_start = datetime.datetime(2010, 6, 23, 9, 45, 0) # subtract 30 microseconds experiments = experiments.annotate(new_start=F('new_start') + datetime.timedelta(microseconds=-30)) expected_start += datetime.timedelta(microseconds=+746970) experiments.update(start=F('new_start')) e0 = Experiment.objects.get(name='e0') self.assertEqual(e0.start, expected_start) class ValueTests(TestCase): def test_update_TimeField_using_Value(self): Time.objects.create() Time.objects.update(time=Value(datetime.time(1), output_field=TimeField())) self.assertEqual(Time.objects.get().time, datetime.time(1)) def test_update_UUIDField_using_Value(self): UUID.objects.create() UUID.objects.update(uuid=Value(uuid.UUID('12345678901234567890123456789012'), output_field=UUIDField())) self.assertEqual(UUID.objects.get().uuid, uuid.UUID('12345678901234567890123456789012')) def test_deconstruct(self): value = Value('name') path, args, kwargs = value.deconstruct() self.assertEqual(path, 'django.db.models.expressions.Value') self.assertEqual(args, (value.value,)) self.assertEqual(kwargs, {}) def test_deconstruct_output_field(self): value = Value('name', output_field=CharField()) path, args, kwargs = value.deconstruct() self.assertEqual(path, 'django.db.models.expressions.Value') self.assertEqual(args, (value.value,)) self.assertEqual(len(kwargs), 1) self.assertEqual(kwargs['output_field'].deconstruct(), CharField().deconstruct()) def test_equal(self): value = Value('name') self.assertEqual(value, Value('name')) self.assertNotEqual(value, Value('username')) def test_hash(self): d = {Value('name'): 'Bob'} self.assertIn(Value('name'), d) self.assertEqual(d[Value('name')], 'Bob') def test_equal_output_field(self): value = Value('name', output_field=CharField()) same_value = Value('name', output_field=CharField()) other_value = Value('name', output_field=TimeField()) no_output_field = Value('name') self.assertEqual(value, same_value) self.assertNotEqual(value, other_value) self.assertNotEqual(value, no_output_field) def test_raise_empty_expressionlist(self): msg = 'ExpressionList requires at least one expression' with self.assertRaisesMessage(ValueError, msg): ExpressionList() def test_compile_unresolved(self): # This test might need to be revisited later on if #25425 is enforced. compiler = Time.objects.all().query.get_compiler(connection=connection) value = Value('foo') self.assertEqual(value.as_sql(compiler, connection), ('%s', ['foo'])) value = Value('foo', output_field=CharField()) self.assertEqual(value.as_sql(compiler, connection), ('%s', ['foo'])) def test_resolve_output_field(self): value_types = [ ('str', CharField), (True, BooleanField), (42, IntegerField), (3.14, FloatField), (datetime.date(2019, 5, 15), DateField), (datetime.datetime(2019, 5, 15), DateTimeField), (datetime.time(3, 16), TimeField), (datetime.timedelta(1), DurationField), (Decimal('3.14'), DecimalField), (b'', BinaryField), (uuid.uuid4(), UUIDField), ] for value, ouput_field_type in value_types: with self.subTest(type=type(value)): expr = Value(value) self.assertIsInstance(expr.output_field, ouput_field_type) def test_resolve_output_field_failure(self): msg = 'Cannot resolve expression type, unknown output_field' with self.assertRaisesMessage(FieldError, msg): Value(object()).output_field class FieldTransformTests(TestCase): @classmethod def setUpTestData(cls): cls.sday = sday = datetime.date(2010, 6, 25) cls.stime = stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000) cls.ex1 = Experiment.objects.create( name='Experiment 1', assigned=sday, completed=sday + datetime.timedelta(2), estimated_time=datetime.timedelta(2), start=stime, end=stime + datetime.timedelta(2), ) def test_month_aggregation(self): self.assertEqual( Experiment.objects.aggregate(month_count=Count('assigned__month')), {'month_count': 1} ) def test_transform_in_values(self): self.assertQuerysetEqual( Experiment.objects.values('assigned__month'), ["{'assigned__month': 6}"] ) def test_multiple_transforms_in_values(self): self.assertQuerysetEqual( Experiment.objects.values('end__date__month'), ["{'end__date__month': 6}"] ) class ReprTests(SimpleTestCase): def test_expressions(self): self.assertEqual( repr(Case(When(a=1))), "<Case: CASE WHEN <Q: (AND: ('a', 1))> THEN Value(None), ELSE Value(None)>" ) self.assertEqual( repr(When(Q(age__gte=18), then=Value('legal'))), "<When: WHEN <Q: (AND: ('age__gte', 18))> THEN Value(legal)>" ) self.assertEqual(repr(Col('alias', 'field')), "Col(alias, field)") self.assertEqual(repr(F('published')), "F(published)") self.assertEqual(repr(F('cost') + F('tax')), "<CombinedExpression: F(cost) + F(tax)>") self.assertEqual( repr(ExpressionWrapper(F('cost') + F('tax'), IntegerField())), "ExpressionWrapper(F(cost) + F(tax))" ) self.assertEqual(repr(Func('published', function='TO_CHAR')), "Func(F(published), function=TO_CHAR)") self.assertEqual(repr(OrderBy(Value(1))), 'OrderBy(Value(1), descending=False)') self.assertEqual(repr(Random()), "Random()") self.assertEqual(repr(RawSQL('table.col', [])), "RawSQL(table.col, [])") self.assertEqual(repr(Ref('sum_cost', Sum('cost'))), "Ref(sum_cost, Sum(F(cost)))") self.assertEqual(repr(Value(1)), "Value(1)") self.assertEqual( repr(ExpressionList(F('col'), F('anothercol'))), 'ExpressionList(F(col), F(anothercol))' ) self.assertEqual( repr(ExpressionList(OrderBy(F('col'), descending=False))), 'ExpressionList(OrderBy(F(col), descending=False))' ) def test_functions(self): self.assertEqual(repr(Coalesce('a', 'b')), "Coalesce(F(a), F(b))") self.assertEqual(repr(Concat('a', 'b')), "Concat(ConcatPair(F(a), F(b)))") self.assertEqual(repr(Length('a')), "Length(F(a))") self.assertEqual(repr(Lower('a')), "Lower(F(a))") self.assertEqual(repr(Substr('a', 1, 3)), "Substr(F(a), Value(1), Value(3))") self.assertEqual(repr(Upper('a')), "Upper(F(a))") def test_aggregates(self): self.assertEqual(repr(Avg('a')), "Avg(F(a))") self.assertEqual(repr(Count('a')), "Count(F(a))") self.assertEqual(repr(Count('*')), "Count('*')") self.assertEqual(repr(Max('a')), "Max(F(a))") self.assertEqual(repr(Min('a')), "Min(F(a))") self.assertEqual(repr(StdDev('a')), "StdDev(F(a), sample=False)") self.assertEqual(repr(Sum('a')), "Sum(F(a))") self.assertEqual(repr(Variance('a', sample=True)), "Variance(F(a), sample=True)") def test_distinct_aggregates(self): self.assertEqual(repr(Count('a', distinct=True)), "Count(F(a), distinct=True)") self.assertEqual(repr(Count('*', distinct=True)), "Count('*', distinct=True)") def test_filtered_aggregates(self): filter = Q(a=1) self.assertEqual(repr(Avg('a', filter=filter)), "Avg(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(Count('a', filter=filter)), "Count(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(Max('a', filter=filter)), "Max(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(Min('a', filter=filter)), "Min(F(a), filter=(AND: ('a', 1)))") self.assertEqual(repr(StdDev('a', filter=filter)), "StdDev(F(a), filter=(AND: ('a', 1)), sample=False)") self.assertEqual(repr(Sum('a', filter=filter)), "Sum(F(a), filter=(AND: ('a', 1)))") self.assertEqual( repr(Variance('a', sample=True, filter=filter)), "Variance(F(a), filter=(AND: ('a', 1)), sample=True)" ) self.assertEqual( repr(Count('a', filter=filter, distinct=True)), "Count(F(a), distinct=True, filter=(AND: ('a', 1)))" ) class CombinableTests(SimpleTestCase): bitwise_msg = 'Use .bitand() and .bitor() for bitwise logical operations.' def test_negation(self): c = Combinable() self.assertEqual(-c, c * -1) def test_and(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): Combinable() & Combinable() def test_or(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): Combinable() | Combinable() def test_reversed_and(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): object() & Combinable() def test_reversed_or(self): with self.assertRaisesMessage(NotImplementedError, self.bitwise_msg): object() | Combinable() class ExpressionWrapperTests(SimpleTestCase): def test_empty_group_by(self): expr = ExpressionWrapper(Value(3), output_field=IntegerField()) self.assertEqual(expr.get_group_by_cols(alias=None), []) def test_non_empty_group_by(self): value = Value('f') value.output_field = None expr = ExpressionWrapper(Lower(value), output_field=IntegerField()) group_by_cols = expr.get_group_by_cols(alias=None) self.assertEqual(group_by_cols, [expr.expression]) self.assertEqual(group_by_cols[0].output_field, expr.output_field)
02384f1f6e82787d7aab2c8e49e0a5eb82a4f3f2d711fa76104b5ec9a2ddbaff
from datetime import date from decimal import Decimal from unittest import mock from django.db import connection, transaction from django.db.models import ( Case, Count, DecimalField, F, FilteredRelation, Q, Sum, When, ) from django.test import TestCase from django.test.testcases import skipUnlessDBFeature from .models import ( Author, Book, BookDailySales, Borrower, Currency, Editor, ExchangeRate, RentalSession, Reservation, Seller, ) class FilteredRelationTests(TestCase): @classmethod def setUpTestData(cls): cls.author1 = Author.objects.create(name='Alice') cls.author2 = Author.objects.create(name='Jane') cls.editor_a = Editor.objects.create(name='a') cls.editor_b = Editor.objects.create(name='b') cls.book1 = Book.objects.create( title='Poem by Alice', editor=cls.editor_a, author=cls.author1, ) cls.book1.generic_author.set([cls.author2]) cls.book2 = Book.objects.create( title='The book by Jane A', editor=cls.editor_b, author=cls.author2, ) cls.book3 = Book.objects.create( title='The book by Jane B', editor=cls.editor_b, author=cls.author2, ) cls.book4 = Book.objects.create( title='The book by Alice', editor=cls.editor_a, author=cls.author1, ) cls.author1.favorite_books.add(cls.book2) cls.author1.favorite_books.add(cls.book3) def test_select_related(self): qs = Author.objects.annotate( book_join=FilteredRelation('book'), ).select_related('book_join__editor').order_by('pk', 'book_join__pk') with self.assertNumQueries(1): self.assertQuerysetEqual(qs, [ (self.author1, self.book1, self.editor_a, self.author1), (self.author1, self.book4, self.editor_a, self.author1), (self.author2, self.book2, self.editor_b, self.author2), (self.author2, self.book3, self.editor_b, self.author2), ], lambda x: (x, x.book_join, x.book_join.editor, x.book_join.author)) def test_select_related_multiple(self): qs = Book.objects.annotate( author_join=FilteredRelation('author'), editor_join=FilteredRelation('editor'), ).select_related('author_join', 'editor_join').order_by('pk') self.assertQuerysetEqual(qs, [ (self.book1, self.author1, self.editor_a), (self.book2, self.author2, self.editor_b), (self.book3, self.author2, self.editor_b), (self.book4, self.author1, self.editor_a), ], lambda x: (x, x.author_join, x.editor_join)) def test_select_related_with_empty_relation(self): qs = Author.objects.annotate( book_join=FilteredRelation('book', condition=Q(pk=-1)), ).select_related('book_join').order_by('pk') self.assertSequenceEqual(qs, [self.author1, self.author2]) def test_select_related_foreign_key(self): qs = Book.objects.annotate( author_join=FilteredRelation('author'), ).select_related('author_join').order_by('pk') with self.assertNumQueries(1): self.assertQuerysetEqual(qs, [ (self.book1, self.author1), (self.book2, self.author2), (self.book3, self.author2), (self.book4, self.author1), ], lambda x: (x, x.author_join)) @skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of') def test_select_related_foreign_key_for_update_of(self): with transaction.atomic(): qs = Book.objects.annotate( author_join=FilteredRelation('author'), ).select_related('author_join').select_for_update(of=('self',)).order_by('pk') with self.assertNumQueries(1): self.assertQuerysetEqual(qs, [ (self.book1, self.author1), (self.book2, self.author2), (self.book3, self.author2), (self.book4, self.author1), ], lambda x: (x, x.author_join)) def test_without_join(self): self.assertSequenceEqual( Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ), [self.author1, self.author2] ) def test_with_join(self): self.assertSequenceEqual( Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ).filter(book_alice__isnull=False), [self.author1] ) def test_with_exclude(self): self.assertSequenceEqual( Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ).exclude(book_alice__isnull=False), [self.author2], ) def test_with_join_and_complex_condition(self): self.assertSequenceEqual( Author.objects.annotate( book_alice=FilteredRelation( 'book', condition=Q( Q(book__title__iexact='poem by alice') | Q(book__state=Book.RENTED) ), ), ).filter(book_alice__isnull=False), [self.author1] ) def test_internal_queryset_alias_mapping(self): queryset = Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ).filter(book_alice__isnull=False) self.assertIn( 'INNER JOIN {} book_alice ON'.format(connection.ops.quote_name('filtered_relation_book')), str(queryset.query) ) def test_with_multiple_filter(self): self.assertSequenceEqual( Author.objects.annotate( book_editor_a=FilteredRelation( 'book', condition=Q(book__title__icontains='book', book__editor_id=self.editor_a.pk), ), ).filter(book_editor_a__isnull=False), [self.author1] ) def test_multiple_times(self): self.assertSequenceEqual( Author.objects.annotate( book_title_alice=FilteredRelation('book', condition=Q(book__title__icontains='alice')), ).filter(book_title_alice__isnull=False).filter(book_title_alice__isnull=False).distinct(), [self.author1] ) def test_exclude_relation_with_join(self): self.assertSequenceEqual( Author.objects.annotate( book_alice=FilteredRelation('book', condition=~Q(book__title__icontains='alice')), ).filter(book_alice__isnull=False).distinct(), [self.author2] ) def test_with_m2m(self): qs = Author.objects.annotate( favorite_books_written_by_jane=FilteredRelation( 'favorite_books', condition=Q(favorite_books__in=[self.book2]), ), ).filter(favorite_books_written_by_jane__isnull=False) self.assertSequenceEqual(qs, [self.author1]) def test_with_m2m_deep(self): qs = Author.objects.annotate( favorite_books_written_by_jane=FilteredRelation( 'favorite_books', condition=Q(favorite_books__author=self.author2), ), ).filter(favorite_books_written_by_jane__title='The book by Jane B') self.assertSequenceEqual(qs, [self.author1]) def test_with_m2m_multijoin(self): qs = Author.objects.annotate( favorite_books_written_by_jane=FilteredRelation( 'favorite_books', condition=Q(favorite_books__author=self.author2), ) ).filter(favorite_books_written_by_jane__editor__name='b').distinct() self.assertSequenceEqual(qs, [self.author1]) def test_values_list(self): self.assertSequenceEqual( Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ).filter(book_alice__isnull=False).values_list('book_alice__title', flat=True), ['Poem by Alice'] ) def test_values(self): self.assertSequenceEqual( Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ).filter(book_alice__isnull=False).values(), [{'id': self.author1.pk, 'name': 'Alice', 'content_type_id': None, 'object_id': None}] ) def test_extra(self): self.assertSequenceEqual( Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ).filter(book_alice__isnull=False).extra(where=['1 = 1']), [self.author1] ) @skipUnlessDBFeature('supports_select_union') def test_union(self): qs1 = Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ).filter(book_alice__isnull=False) qs2 = Author.objects.annotate( book_jane=FilteredRelation('book', condition=Q(book__title__iexact='the book by jane a')), ).filter(book_jane__isnull=False) self.assertSequenceEqual(qs1.union(qs2), [self.author1, self.author2]) @skipUnlessDBFeature('supports_select_intersection') def test_intersection(self): qs1 = Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ).filter(book_alice__isnull=False) qs2 = Author.objects.annotate( book_jane=FilteredRelation('book', condition=Q(book__title__iexact='the book by jane a')), ).filter(book_jane__isnull=False) self.assertSequenceEqual(qs1.intersection(qs2), []) @skipUnlessDBFeature('supports_select_difference') def test_difference(self): qs1 = Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ).filter(book_alice__isnull=False) qs2 = Author.objects.annotate( book_jane=FilteredRelation('book', condition=Q(book__title__iexact='the book by jane a')), ).filter(book_jane__isnull=False) self.assertSequenceEqual(qs1.difference(qs2), [self.author1]) def test_select_for_update(self): self.assertSequenceEqual( Author.objects.annotate( book_jane=FilteredRelation('book', condition=Q(book__title__iexact='the book by jane a')), ).filter(book_jane__isnull=False).select_for_update(), [self.author2] ) def test_defer(self): # One query for the list and one query for the deferred title. with self.assertNumQueries(2): self.assertQuerysetEqual( Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ).filter(book_alice__isnull=False).select_related('book_alice').defer('book_alice__title'), ['Poem by Alice'], lambda author: author.book_alice.title ) def test_only_not_supported(self): msg = 'only() is not supported with FilteredRelation.' with self.assertRaisesMessage(ValueError, msg): Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ).filter(book_alice__isnull=False).select_related('book_alice').only('book_alice__state') def test_as_subquery(self): inner_qs = Author.objects.annotate( book_alice=FilteredRelation('book', condition=Q(book__title__iexact='poem by alice')), ).filter(book_alice__isnull=False) qs = Author.objects.filter(id__in=inner_qs) self.assertSequenceEqual(qs, [self.author1]) def test_nested_foreign_key(self): qs = Author.objects.annotate( book_editor_worked_with=FilteredRelation( 'book__editor', condition=Q(book__title__icontains='book by'), ), ).filter( book_editor_worked_with__isnull=False, ).select_related( 'book_editor_worked_with', ).order_by('pk', 'book_editor_worked_with__pk') with self.assertNumQueries(1): self.assertQuerysetEqual(qs, [ (self.author1, self.editor_a), (self.author2, self.editor_b), (self.author2, self.editor_b), ], lambda x: (x, x.book_editor_worked_with)) def test_nested_foreign_key_nested_field(self): qs = Author.objects.annotate( book_editor_worked_with=FilteredRelation( 'book__editor', condition=Q(book__title__icontains='book by') ), ).filter( book_editor_worked_with__isnull=False, ).values( 'name', 'book_editor_worked_with__name', ).order_by('name', 'book_editor_worked_with__name').distinct() self.assertSequenceEqual(qs, [ {'name': self.author1.name, 'book_editor_worked_with__name': self.editor_a.name}, {'name': self.author2.name, 'book_editor_worked_with__name': self.editor_b.name}, ]) def test_nested_foreign_key_filtered_base_object(self): qs = Author.objects.annotate( alice_editors=FilteredRelation( 'book__editor', condition=Q(name='Alice'), ), ).values( 'name', 'alice_editors__pk', ).order_by('name', 'alice_editors__name').distinct() self.assertSequenceEqual(qs, [ {'name': self.author1.name, 'alice_editors__pk': self.editor_a.pk}, {'name': self.author2.name, 'alice_editors__pk': None}, ]) def test_nested_m2m_filtered(self): qs = Book.objects.annotate( favorite_book=FilteredRelation( 'author__favorite_books', condition=Q(author__favorite_books__title__icontains='book by') ), ).values( 'title', 'favorite_book__pk', ).order_by('title', 'favorite_book__title') self.assertSequenceEqual(qs, [ {'title': self.book1.title, 'favorite_book__pk': self.book2.pk}, {'title': self.book1.title, 'favorite_book__pk': self.book3.pk}, {'title': self.book4.title, 'favorite_book__pk': self.book2.pk}, {'title': self.book4.title, 'favorite_book__pk': self.book3.pk}, {'title': self.book2.title, 'favorite_book__pk': None}, {'title': self.book3.title, 'favorite_book__pk': None}, ]) def test_nested_chained_relations(self): qs = Author.objects.annotate( my_books=FilteredRelation( 'book', condition=Q(book__title__icontains='book by'), ), preferred_by_authors=FilteredRelation( 'my_books__preferred_by_authors', condition=Q(my_books__preferred_by_authors__name='Alice'), ), ).annotate( author=F('name'), book_title=F('my_books__title'), preferred_by_author_pk=F('preferred_by_authors'), ).order_by('author', 'book_title', 'preferred_by_author_pk') self.assertQuerysetEqual(qs, [ ('Alice', 'The book by Alice', None), ('Jane', 'The book by Jane A', self.author1.pk), ('Jane', 'The book by Jane B', self.author1.pk), ], lambda x: (x.author, x.book_title, x.preferred_by_author_pk)) def test_deep_nested_foreign_key(self): qs = Book.objects.annotate( author_favorite_book_editor=FilteredRelation( 'author__favorite_books__editor', condition=Q(author__favorite_books__title__icontains='Jane A'), ), ).filter( author_favorite_book_editor__isnull=False, ).select_related( 'author_favorite_book_editor', ).order_by('pk', 'author_favorite_book_editor__pk') with self.assertNumQueries(1): self.assertQuerysetEqual(qs, [ (self.book1, self.editor_b), (self.book4, self.editor_b), ], lambda x: (x, x.author_favorite_book_editor)) def test_relation_name_lookup(self): msg = ( "FilteredRelation's relation_name cannot contain lookups (got " "'book__title__icontains')." ) with self.assertRaisesMessage(ValueError, msg): Author.objects.annotate( book_title=FilteredRelation( 'book__title__icontains', condition=Q(book__title='Poem by Alice'), ), ) def test_condition_outside_relation_name(self): msg = ( "FilteredRelation's condition doesn't support relations outside " "the 'book__editor' (got 'book__author__name__icontains')." ) with self.assertRaisesMessage(ValueError, msg): Author.objects.annotate( book_editor=FilteredRelation( 'book__editor', condition=Q(book__author__name__icontains='book'), ), ) def test_condition_deeper_relation_name(self): msg = ( "FilteredRelation's condition doesn't support nested relations " "deeper than the relation_name (got " "'book__editor__name__icontains' for 'book')." ) with self.assertRaisesMessage(ValueError, msg): Author.objects.annotate( book_editor=FilteredRelation( 'book', condition=Q(book__editor__name__icontains='b'), ), ) def test_with_empty_relation_name_error(self): with self.assertRaisesMessage(ValueError, 'relation_name cannot be empty.'): FilteredRelation('', condition=Q(blank='')) def test_with_condition_as_expression_error(self): msg = 'condition argument must be a Q() instance.' expression = Case( When(book__title__iexact='poem by alice', then=True), default=False, ) with self.assertRaisesMessage(ValueError, msg): FilteredRelation('book', condition=expression) def test_with_prefetch_related(self): msg = 'prefetch_related() is not supported with FilteredRelation.' qs = Author.objects.annotate( book_title_contains_b=FilteredRelation('book', condition=Q(book__title__icontains='b')), ).filter( book_title_contains_b__isnull=False, ) with self.assertRaisesMessage(ValueError, msg): qs.prefetch_related('book_title_contains_b') with self.assertRaisesMessage(ValueError, msg): qs.prefetch_related('book_title_contains_b__editor') def test_with_generic_foreign_key(self): self.assertSequenceEqual( Book.objects.annotate( generic_authored_book=FilteredRelation( 'generic_author', condition=Q(generic_author__isnull=False) ), ).filter(generic_authored_book__isnull=False), [self.book1] ) def test_eq(self): self.assertEqual(FilteredRelation('book', condition=Q(book__title='b')), mock.ANY) class FilteredRelationAggregationTests(TestCase): @classmethod def setUpTestData(cls): cls.author1 = Author.objects.create(name='Alice') cls.editor_a = Editor.objects.create(name='a') cls.book1 = Book.objects.create( title='Poem by Alice', editor=cls.editor_a, author=cls.author1, ) cls.borrower1 = Borrower.objects.create(name='Jenny') cls.borrower2 = Borrower.objects.create(name='Kevin') # borrower 1 reserves, rents, and returns book1. Reservation.objects.create( borrower=cls.borrower1, book=cls.book1, state=Reservation.STOPPED, ) RentalSession.objects.create( borrower=cls.borrower1, book=cls.book1, state=RentalSession.STOPPED, ) # borrower2 reserves, rents, and returns book1. Reservation.objects.create( borrower=cls.borrower2, book=cls.book1, state=Reservation.STOPPED, ) RentalSession.objects.create( borrower=cls.borrower2, book=cls.book1, state=RentalSession.STOPPED, ) def test_aggregate(self): """ filtered_relation() not only improves performance but also creates correct results when aggregating with multiple LEFT JOINs. Books can be reserved then rented by a borrower. Each reservation and rental session are recorded with Reservation and RentalSession models. Every time a reservation or a rental session is over, their state is changed to 'stopped'. Goal: Count number of books that are either currently reserved or rented by borrower1 or available. """ qs = Book.objects.annotate( is_reserved_or_rented_by=Case( When(reservation__state=Reservation.NEW, then=F('reservation__borrower__pk')), When(rental_session__state=RentalSession.NEW, then=F('rental_session__borrower__pk')), default=None, ) ).filter( Q(is_reserved_or_rented_by=self.borrower1.pk) | Q(state=Book.AVAILABLE) ).distinct() self.assertEqual(qs.count(), 1) # If count is equal to 1, the same aggregation should return in the # same result but it returns 4. self.assertSequenceEqual(qs.annotate(total=Count('pk')).values('total'), [{'total': 4}]) # With FilteredRelation, the result is as expected (1). qs = Book.objects.annotate( active_reservations=FilteredRelation( 'reservation', condition=Q( reservation__state=Reservation.NEW, reservation__borrower=self.borrower1, ) ), ).annotate( active_rental_sessions=FilteredRelation( 'rental_session', condition=Q( rental_session__state=RentalSession.NEW, rental_session__borrower=self.borrower1, ) ), ).filter( (Q(active_reservations__isnull=False) | Q(active_rental_sessions__isnull=False)) | Q(state=Book.AVAILABLE) ).distinct() self.assertEqual(qs.count(), 1) self.assertSequenceEqual(qs.annotate(total=Count('pk')).values('total'), [{'total': 1}]) class FilteredRelationAnalyticalAggregationTests(TestCase): @classmethod def setUpTestData(cls): author = Author.objects.create(name='Author') editor = Editor.objects.create(name='Editor') cls.book1 = Book.objects.create( title='Poem by Alice', editor=editor, author=author, ) cls.book2 = Book.objects.create( title='The book by Jane A', editor=editor, author=author, ) cls.book3 = Book.objects.create( title='The book by Jane B', editor=editor, author=author, ) cls.seller1 = Seller.objects.create(name='Seller 1') cls.seller2 = Seller.objects.create(name='Seller 2') cls.usd = Currency.objects.create(currency='USD') cls.eur = Currency.objects.create(currency='EUR') cls.sales_date1 = date(2020, 7, 6) cls.sales_date2 = date(2020, 7, 7) ExchangeRate.objects.bulk_create([ ExchangeRate( rate_date=cls.sales_date1, from_currency=cls.usd, to_currency=cls.eur, rate=0.40, ), ExchangeRate( rate_date=cls.sales_date1, from_currency=cls.eur, to_currency=cls.usd, rate=1.60, ), ExchangeRate( rate_date=cls.sales_date2, from_currency=cls.usd, to_currency=cls.eur, rate=0.50, ), ExchangeRate( rate_date=cls.sales_date2, from_currency=cls.eur, to_currency=cls.usd, rate=1.50, ), ExchangeRate( rate_date=cls.sales_date2, from_currency=cls.usd, to_currency=cls.usd, rate=1.00, ), ]) BookDailySales.objects.bulk_create([ BookDailySales( book=cls.book1, sale_date=cls.sales_date1, currency=cls.usd, sales=100.00, seller=cls.seller1, ), BookDailySales( book=cls.book2, sale_date=cls.sales_date1, currency=cls.eur, sales=200.00, seller=cls.seller1, ), BookDailySales( book=cls.book1, sale_date=cls.sales_date2, currency=cls.usd, sales=50.00, seller=cls.seller2, ), BookDailySales( book=cls.book2, sale_date=cls.sales_date2, currency=cls.eur, sales=100.00, seller=cls.seller2, ), ]) def test_aggregate(self): tests = [ Q(daily_sales__sale_date__gte=self.sales_date2), ~Q(daily_sales__seller=self.seller1), ] for condition in tests: with self.subTest(condition=condition): qs = Book.objects.annotate( recent_sales=FilteredRelation('daily_sales', condition=condition), recent_sales_rates=FilteredRelation( 'recent_sales__currency__rates_from', condition=Q( recent_sales__currency__rates_from__rate_date=F('recent_sales__sale_date'), recent_sales__currency__rates_from__to_currency=self.usd, ), ), ).annotate( sales_sum=Sum( F('recent_sales__sales') * F('recent_sales_rates__rate'), output_field=DecimalField(), ), ).values('title', 'sales_sum').order_by( F('sales_sum').desc(nulls_last=True), ) self.assertSequenceEqual(qs, [ {'title': self.book2.title, 'sales_sum': Decimal(150.00)}, {'title': self.book1.title, 'sales_sum': Decimal(50.00)}, {'title': self.book3.title, 'sales_sum': None}, ])
8499bf88db03742257e2198e5fd07344850055cf829bd23a3365b543d66c1e1a
from django.contrib.contenttypes.fields import ( GenericForeignKey, GenericRelation, ) from django.contrib.contenttypes.models import ContentType from django.db import models class Author(models.Model): name = models.CharField(max_length=50, unique=True) favorite_books = models.ManyToManyField( 'Book', related_name='preferred_by_authors', related_query_name='preferred_by_authors', ) content_type = models.ForeignKey(ContentType, models.CASCADE, null=True) object_id = models.PositiveIntegerField(null=True) content_object = GenericForeignKey() class Editor(models.Model): name = models.CharField(max_length=255) class Book(models.Model): AVAILABLE = 'available' RESERVED = 'reserved' RENTED = 'rented' STATES = ( (AVAILABLE, 'Available'), (RESERVED, 'reserved'), (RENTED, 'Rented'), ) title = models.CharField(max_length=255) author = models.ForeignKey( Author, models.CASCADE, related_name='books', related_query_name='book', ) editor = models.ForeignKey(Editor, models.CASCADE) generic_author = GenericRelation(Author) state = models.CharField(max_length=9, choices=STATES, default=AVAILABLE) class Borrower(models.Model): name = models.CharField(max_length=50, unique=True) class Reservation(models.Model): NEW = 'new' STOPPED = 'stopped' STATES = ( (NEW, 'New'), (STOPPED, 'Stopped'), ) borrower = models.ForeignKey( Borrower, models.CASCADE, related_name='reservations', related_query_name='reservation', ) book = models.ForeignKey( Book, models.CASCADE, related_name='reservations', related_query_name='reservation', ) state = models.CharField(max_length=7, choices=STATES, default=NEW) class RentalSession(models.Model): NEW = 'new' STOPPED = 'stopped' STATES = ( (NEW, 'New'), (STOPPED, 'Stopped'), ) borrower = models.ForeignKey( Borrower, models.CASCADE, related_name='rental_sessions', related_query_name='rental_session', ) book = models.ForeignKey( Book, models.CASCADE, related_name='rental_sessions', related_query_name='rental_session', ) state = models.CharField(max_length=7, choices=STATES, default=NEW) class Seller(models.Model): name = models.CharField(max_length=255) class Currency(models.Model): currency = models.CharField(max_length=3) class ExchangeRate(models.Model): rate_date = models.DateField() from_currency = models.ForeignKey( Currency, models.CASCADE, related_name='rates_from', ) to_currency = models.ForeignKey( Currency, models.CASCADE, related_name='rates_to', ) rate = models.DecimalField(max_digits=6, decimal_places=4) class BookDailySales(models.Model): book = models.ForeignKey(Book, models.CASCADE, related_name='daily_sales') sale_date = models.DateField() currency = models.ForeignKey(Currency, models.CASCADE) seller = models.ForeignKey(Seller, models.CASCADE) sales = models.DecimalField(max_digits=10, decimal_places=2)
de0adbff1fcb50b8c596efa00ad73c002eca30716f72acc6ed45bc9982432485
from datetime import date from django.forms import DateField, Form, SelectDateWidget from django.test import override_settings from django.utils import translation from django.utils.dates import MONTHS_AP from .base import WidgetTest class SelectDateWidgetTest(WidgetTest): maxDiff = None widget = SelectDateWidget( years=('2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016'), ) def test_render_empty(self): self.check_html(self.widget, 'mydate', '', html=( """ <select name="mydate_month" id="id_mydate_month"> <option selected value="">---</option> <option value="1">January</option> <option value="2">February</option> <option value="3">March</option> <option value="4">April</option> <option value="5">May</option> <option value="6">June</option> <option value="7">July</option> <option value="8">August</option> <option value="9">September</option> <option value="10">October</option> <option value="11">November</option> <option value="12">December</option> </select> <select name="mydate_day" id="id_mydate_day"> <option selected value="">---</option> <option value="1">1</option> <option value="2">2</option> <option value="3">3</option> <option value="4">4</option> <option value="5">5</option> <option value="6">6</option> <option value="7">7</option> <option value="8">8</option> <option value="9">9</option> <option value="10">10</option> <option value="11">11</option> <option value="12">12</option> <option value="13">13</option> <option value="14">14</option> <option value="15">15</option> <option value="16">16</option> <option value="17">17</option> <option value="18">18</option> <option value="19">19</option> <option value="20">20</option> <option value="21">21</option> <option value="22">22</option> <option value="23">23</option> <option value="24">24</option> <option value="25">25</option> <option value="26">26</option> <option value="27">27</option> <option value="28">28</option> <option value="29">29</option> <option value="30">30</option> <option value="31">31</option> </select> <select name="mydate_year" id="id_mydate_year"> <option selected value="">---</option> <option value="2007">2007</option> <option value="2008">2008</option> <option value="2009">2009</option> <option value="2010">2010</option> <option value="2011">2011</option> <option value="2012">2012</option> <option value="2013">2013</option> <option value="2014">2014</option> <option value="2015">2015</option> <option value="2016">2016</option> </select> """ )) def test_render_none(self): """ Rendering the None or '' values should yield the same output. """ self.assertHTMLEqual( self.widget.render('mydate', None), self.widget.render('mydate', ''), ) def test_render_string(self): self.check_html(self.widget, 'mydate', '2010-04-15', html=( """ <select name="mydate_month" id="id_mydate_month"> <option value="">---</option> <option value="1">January</option> <option value="2">February</option> <option value="3">March</option> <option value="4" selected>April</option> <option value="5">May</option> <option value="6">June</option> <option value="7">July</option> <option value="8">August</option> <option value="9">September</option> <option value="10">October</option> <option value="11">November</option> <option value="12">December</option> </select> <select name="mydate_day" id="id_mydate_day"> <option value="">---</option> <option value="1">1</option> <option value="2">2</option> <option value="3">3</option> <option value="4">4</option> <option value="5">5</option> <option value="6">6</option> <option value="7">7</option> <option value="8">8</option> <option value="9">9</option> <option value="10">10</option> <option value="11">11</option> <option value="12">12</option> <option value="13">13</option> <option value="14">14</option> <option value="15" selected>15</option> <option value="16">16</option> <option value="17">17</option> <option value="18">18</option> <option value="19">19</option> <option value="20">20</option> <option value="21">21</option> <option value="22">22</option> <option value="23">23</option> <option value="24">24</option> <option value="25">25</option> <option value="26">26</option> <option value="27">27</option> <option value="28">28</option> <option value="29">29</option> <option value="30">30</option> <option value="31">31</option> </select> <select name="mydate_year" id="id_mydate_year"> <option value="">---</option> <option value="2007">2007</option> <option value="2008">2008</option> <option value="2009">2009</option> <option value="2010" selected>2010</option> <option value="2011">2011</option> <option value="2012">2012</option> <option value="2013">2013</option> <option value="2014">2014</option> <option value="2015">2015</option> <option value="2016">2016</option> </select> """ )) def test_render_datetime(self): self.assertHTMLEqual( self.widget.render('mydate', date(2010, 4, 15)), self.widget.render('mydate', '2010-04-15'), ) def test_render_invalid_date(self): """ Invalid dates should still render the failed date. """ self.check_html(self.widget, 'mydate', '2010-02-31', html=( """ <select name="mydate_month" id="id_mydate_month"> <option value="">---</option> <option value="1">January</option> <option value="2" selected>February</option> <option value="3">March</option> <option value="4">April</option> <option value="5">May</option> <option value="6">June</option> <option value="7">July</option> <option value="8">August</option> <option value="9">September</option> <option value="10">October</option> <option value="11">November</option> <option value="12">December</option> </select> <select name="mydate_day" id="id_mydate_day"> <option value="">---</option> <option value="1">1</option> <option value="2">2</option> <option value="3">3</option> <option value="4">4</option> <option value="5">5</option> <option value="6">6</option> <option value="7">7</option> <option value="8">8</option> <option value="9">9</option> <option value="10">10</option> <option value="11">11</option> <option value="12">12</option> <option value="13">13</option> <option value="14">14</option> <option value="15">15</option> <option value="16">16</option> <option value="17">17</option> <option value="18">18</option> <option value="19">19</option> <option value="20">20</option> <option value="21">21</option> <option value="22">22</option> <option value="23">23</option> <option value="24">24</option> <option value="25">25</option> <option value="26">26</option> <option value="27">27</option> <option value="28">28</option> <option value="29">29</option> <option value="30">30</option> <option value="31" selected>31</option> </select> <select name="mydate_year" id="id_mydate_year"> <option value="">---</option> <option value="2007">2007</option> <option value="2008">2008</option> <option value="2009">2009</option> <option value="2010" selected>2010</option> <option value="2011">2011</option> <option value="2012">2012</option> <option value="2013">2013</option> <option value="2014">2014</option> <option value="2015">2015</option> <option value="2016">2016</option> </select> """ )) def test_custom_months(self): widget = SelectDateWidget(months=MONTHS_AP, years=('2013',)) self.check_html(widget, 'mydate', '', html=( """ <select name="mydate_month" id="id_mydate_month"> <option selected value="">---</option> <option value="1">Jan.</option> <option value="2">Feb.</option> <option value="3">March</option> <option value="4">April</option> <option value="5">May</option> <option value="6">June</option> <option value="7">July</option> <option value="8">Aug.</option> <option value="9">Sept.</option> <option value="10">Oct.</option> <option value="11">Nov.</option> <option value="12">Dec.</option> </select> <select name="mydate_day" id="id_mydate_day"> <option selected value="">---</option> <option value="1">1</option> <option value="2">2</option> <option value="3">3</option> <option value="4">4</option> <option value="5">5</option> <option value="6">6</option> <option value="7">7</option> <option value="8">8</option> <option value="9">9</option> <option value="10">10</option> <option value="11">11</option> <option value="12">12</option> <option value="13">13</option> <option value="14">14</option> <option value="15">15</option> <option value="16">16</option> <option value="17">17</option> <option value="18">18</option> <option value="19">19</option> <option value="20">20</option> <option value="21">21</option> <option value="22">22</option> <option value="23">23</option> <option value="24">24</option> <option value="25">25</option> <option value="26">26</option> <option value="27">27</option> <option value="28">28</option> <option value="29">29</option> <option value="30">30</option> <option value="31">31</option> </select> <select name="mydate_year" id="id_mydate_year"> <option selected value="">---</option> <option value="2013">2013</option> </select> """ )) def test_selectdate_required(self): class GetNotRequiredDate(Form): mydate = DateField(widget=SelectDateWidget, required=False) class GetRequiredDate(Form): mydate = DateField(widget=SelectDateWidget, required=True) self.assertFalse(GetNotRequiredDate().fields['mydate'].widget.is_required) self.assertTrue(GetRequiredDate().fields['mydate'].widget.is_required) def test_selectdate_empty_label(self): w = SelectDateWidget(years=('2014',), empty_label='empty_label') # Rendering the default state with empty_label set as string. self.assertInHTML('<option selected value="">empty_label</option>', w.render('mydate', ''), count=3) w = SelectDateWidget(years=('2014',), empty_label=('empty_year', 'empty_month', 'empty_day')) # Rendering the default state with empty_label tuple. self.assertHTMLEqual( w.render('mydate', ''), """ <select name="mydate_month" id="id_mydate_month"> <option selected value="">empty_month</option> <option value="1">January</option> <option value="2">February</option> <option value="3">March</option> <option value="4">April</option> <option value="5">May</option> <option value="6">June</option> <option value="7">July</option> <option value="8">August</option> <option value="9">September</option> <option value="10">October</option> <option value="11">November</option> <option value="12">December</option> </select> <select name="mydate_day" id="id_mydate_day"> <option selected value="">empty_day</option> <option value="1">1</option> <option value="2">2</option> <option value="3">3</option> <option value="4">4</option> <option value="5">5</option> <option value="6">6</option> <option value="7">7</option> <option value="8">8</option> <option value="9">9</option> <option value="10">10</option> <option value="11">11</option> <option value="12">12</option> <option value="13">13</option> <option value="14">14</option> <option value="15">15</option> <option value="16">16</option> <option value="17">17</option> <option value="18">18</option> <option value="19">19</option> <option value="20">20</option> <option value="21">21</option> <option value="22">22</option> <option value="23">23</option> <option value="24">24</option> <option value="25">25</option> <option value="26">26</option> <option value="27">27</option> <option value="28">28</option> <option value="29">29</option> <option value="30">30</option> <option value="31">31</option> </select> <select name="mydate_year" id="id_mydate_year"> <option selected value="">empty_year</option> <option value="2014">2014</option> </select> """, ) with self.assertRaisesMessage(ValueError, 'empty_label list/tuple must have 3 elements.'): SelectDateWidget(years=('2014',), empty_label=('not enough', 'values')) @override_settings(USE_L10N=True) @translation.override('nl') def test_l10n(self): w = SelectDateWidget( years=('2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016') ) self.assertEqual( w.value_from_datadict({'date_year': '2010', 'date_month': '8', 'date_day': '13'}, {}, 'date'), '13-08-2010', ) self.assertHTMLEqual( w.render('date', '13-08-2010'), """ <select name="date_day" id="id_date_day"> <option value="">---</option> <option value="1">1</option> <option value="2">2</option> <option value="3">3</option> <option value="4">4</option> <option value="5">5</option> <option value="6">6</option> <option value="7">7</option> <option value="8">8</option> <option value="9">9</option> <option value="10">10</option> <option value="11">11</option> <option value="12">12</option> <option value="13" selected>13</option> <option value="14">14</option> <option value="15">15</option> <option value="16">16</option> <option value="17">17</option> <option value="18">18</option> <option value="19">19</option> <option value="20">20</option> <option value="21">21</option> <option value="22">22</option> <option value="23">23</option> <option value="24">24</option> <option value="25">25</option> <option value="26">26</option> <option value="27">27</option> <option value="28">28</option> <option value="29">29</option> <option value="30">30</option> <option value="31">31</option> </select> <select name="date_month" id="id_date_month"> <option value="">---</option> <option value="1">januari</option> <option value="2">februari</option> <option value="3">maart</option> <option value="4">april</option> <option value="5">mei</option> <option value="6">juni</option> <option value="7">juli</option> <option value="8" selected>augustus</option> <option value="9">september</option> <option value="10">oktober</option> <option value="11">november</option> <option value="12">december</option> </select> <select name="date_year" id="id_date_year"> <option value="">---</option> <option value="2007">2007</option> <option value="2008">2008</option> <option value="2009">2009</option> <option value="2010" selected>2010</option> <option value="2011">2011</option> <option value="2012">2012</option> <option value="2013">2013</option> <option value="2014">2014</option> <option value="2015">2015</option> <option value="2016">2016</option> </select> """, ) # Even with an invalid date, the widget should reflect the entered value (#17401). self.assertEqual(w.render('mydate', '2010-02-30').count('selected'), 3) # Years before 1900 should work. w = SelectDateWidget(years=('1899',)) self.assertEqual( w.value_from_datadict({'date_year': '1899', 'date_month': '8', 'date_day': '13'}, {}, 'date'), '13-08-1899', ) # And years before 1000 (demonstrating the need for datetime_safe). w = SelectDateWidget(years=('0001',)) self.assertEqual( w.value_from_datadict({'date_year': '0001', 'date_month': '8', 'date_day': '13'}, {}, 'date'), '13-08-0001', ) @override_settings(USE_L10N=False, DATE_INPUT_FORMATS=['%d.%m.%Y']) def test_custom_input_format(self): w = SelectDateWidget(years=('0001', '1899', '2009', '2010')) for values, expected_value in ( (('0001', '8', '13'), '13.08.0001'), (('1899', '7', '11'), '11.07.1899'), (('2009', '3', '7'), '07.03.2009'), ): with self.subTest(values=values): data = { 'field_%s' % field: value for field, value in zip(('year', 'month', 'day'), values) } self.assertEqual(w.value_from_datadict(data, {}, 'field'), expected_value) expected_dict = { field: int(value) for field, value in zip(('year', 'month', 'day'), values) } self.assertEqual(w.format_value(expected_value), expected_dict) def test_format_value(self): valid_formats = [ '2000-1-1', '2000-10-15', '2000-01-01', '2000-01-0', '2000-0-01', '2000-0-0', '0-01-01', '0-01-0', '0-0-01', '0-0-0', ] for value in valid_formats: year, month, day = (int(x) or '' for x in value.split('-')) with self.subTest(value=value): self.assertEqual(self.widget.format_value(value), {'day': day, 'month': month, 'year': year}) invalid_formats = [ '2000-01-001', '2000-001-01', '2-01-01', '20-01-01', '200-01-01', '20000-01-01', ] for value in invalid_formats: with self.subTest(value=value): self.assertEqual(self.widget.format_value(value), {'day': None, 'month': None, 'year': None}) def test_value_from_datadict(self): tests = [ (('2000', '12', '1'), '2000-12-01'), (('', '12', '1'), '0-12-1'), (('2000', '', '1'), '2000-0-1'), (('2000', '12', ''), '2000-12-0'), (('', '', '', ''), None), ((None, '12', '1'), None), (('2000', None, '1'), None), (('2000', '12', None), None), ] for values, expected in tests: with self.subTest(values=values): data = {} for field_name, value in zip(('year', 'month', 'day'), values): if value is not None: data['field_%s' % field_name] = value self.assertEqual(self.widget.value_from_datadict(data, {}, 'field'), expected) def test_value_omitted_from_data(self): self.assertIs(self.widget.value_omitted_from_data({}, {}, 'field'), True) self.assertIs(self.widget.value_omitted_from_data({'field_month': '12'}, {}, 'field'), False) self.assertIs(self.widget.value_omitted_from_data({'field_year': '2000'}, {}, 'field'), False) self.assertIs(self.widget.value_omitted_from_data({'field_day': '1'}, {}, 'field'), False) data = {'field_day': '1', 'field_month': '12', 'field_year': '2000'} self.assertIs(self.widget.value_omitted_from_data(data, {}, 'field'), False) @override_settings(USE_THOUSAND_SEPARATOR=True, USE_L10N=True) def test_years_rendered_without_separator(self): widget = SelectDateWidget(years=(2007,)) self.check_html(widget, 'mydate', '', html=( """ <select name="mydate_month" id="id_mydate_month"> <option selected value="">---</option> <option value="1">January</option> <option value="2">February</option> <option value="3">March</option> <option value="4">April</option> <option value="5">May</option> <option value="6">June</option> <option value="7">July</option> <option value="8">August</option> <option value="9">September</option> <option value="10">October</option> <option value="11">November</option> <option value="12">December</option> </select> <select name="mydate_day" id="id_mydate_day"> <option selected value="">---</option> <option value="1">1</option> <option value="2">2</option> <option value="3">3</option> <option value="4">4</option> <option value="5">5</option> <option value="6">6</option> <option value="7">7</option> <option value="8">8</option> <option value="9">9</option> <option value="10">10</option> <option value="11">11</option> <option value="12">12</option> <option value="13">13</option> <option value="14">14</option> <option value="15">15</option> <option value="16">16</option> <option value="17">17</option> <option value="18">18</option> <option value="19">19</option> <option value="20">20</option> <option value="21">21</option> <option value="22">22</option> <option value="23">23</option> <option value="24">24</option> <option value="25">25</option> <option value="26">26</option> <option value="27">27</option> <option value="28">28</option> <option value="29">29</option> <option value="30">30</option> <option value="31">31</option> </select> <select name="mydate_year" id="id_mydate_year"> <option selected value="">---</option> <option value="2007">2007</option> </select> """ ))
beb7523deaaf0b8096745f1db0dfe0fd02b716580185b587a53410fb65becda1
import importlib import inspect import os import re import sys import tempfile import threading from io import StringIO from pathlib import Path from unittest import mock from django.core import mail from django.core.files.uploadedfile import SimpleUploadedFile from django.db import DatabaseError, connection from django.http import Http404 from django.shortcuts import render from django.template import TemplateDoesNotExist from django.test import RequestFactory, SimpleTestCase, override_settings from django.test.utils import LoggingCaptureMixin from django.urls import path, reverse from django.urls.converters import IntConverter from django.utils.functional import SimpleLazyObject from django.utils.regex_helper import _lazy_re_compile from django.utils.safestring import mark_safe from django.views.debug import ( CallableSettingWrapper, ExceptionCycleWarning, ExceptionReporter, Path as DebugPath, SafeExceptionReporterFilter, default_urlconf, get_default_exception_reporter_filter, technical_404_response, technical_500_response, ) from django.views.decorators.debug import ( sensitive_post_parameters, sensitive_variables, ) from ..views import ( custom_exception_reporter_filter_view, index_page, multivalue_dict_key_error, non_sensitive_view, paranoid_view, sensitive_args_function_caller, sensitive_kwargs_function_caller, sensitive_method_view, sensitive_view, ) class User: def __str__(self): return 'jacob' class WithoutEmptyPathUrls: urlpatterns = [path('url/', index_page, name='url')] class CallableSettingWrapperTests(SimpleTestCase): """ Unittests for CallableSettingWrapper """ def test_repr(self): class WrappedCallable: def __repr__(self): return "repr from the wrapped callable" def __call__(self): pass actual = repr(CallableSettingWrapper(WrappedCallable())) self.assertEqual(actual, "repr from the wrapped callable") @override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls') class DebugViewTests(SimpleTestCase): def test_files(self): with self.assertLogs('django.request', 'ERROR'): response = self.client.get('/raises/') self.assertEqual(response.status_code, 500) data = { 'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'), } with self.assertLogs('django.request', 'ERROR'): response = self.client.post('/raises/', data) self.assertContains(response, 'file_data.txt', status_code=500) self.assertNotContains(response, 'haha', status_code=500) def test_400(self): # When DEBUG=True, technical_500_template() is called. with self.assertLogs('django.security', 'WARNING'): response = self.client.get('/raises400/') self.assertContains(response, '<div class="context" id="', status_code=400) # Ensure no 403.html template exists to test the default case. @override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', }]) def test_403(self): response = self.client.get('/raises403/') self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403) # Set up a test 403.html template. @override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'OPTIONS': { 'loaders': [ ('django.template.loaders.locmem.Loader', { '403.html': 'This is a test template for a 403 error ({{ exception }}).', }), ], }, }]) def test_403_template(self): response = self.client.get('/raises403/') self.assertContains(response, 'test template', status_code=403) self.assertContains(response, '(Insufficient Permissions).', status_code=403) def test_404(self): response = self.client.get('/raises404/') self.assertEqual(response.status_code, 404) self.assertContains(response, '<code>not-in-urls</code>, didn’t match', status_code=404) def test_404_not_in_urls(self): response = self.client.get('/not-in-urls') self.assertNotContains(response, "Raised by:", status_code=404) self.assertContains(response, "Django tried these URL patterns", status_code=404) self.assertContains(response, '<code>not-in-urls</code>, didn’t match', status_code=404) # Pattern and view name of a RegexURLPattern appear. self.assertContains(response, r"^regex-post/(?P&lt;pk&gt;[0-9]+)/$", status_code=404) self.assertContains(response, "[name='regex-post']", status_code=404) # Pattern and view name of a RoutePattern appear. self.assertContains(response, r"path-post/&lt;int:pk&gt;/", status_code=404) self.assertContains(response, "[name='path-post']", status_code=404) @override_settings(ROOT_URLCONF=WithoutEmptyPathUrls) def test_404_empty_path_not_in_urls(self): response = self.client.get('/') self.assertContains(response, 'The empty path didn’t match any of these.', status_code=404) def test_technical_404(self): response = self.client.get('/technical404/') self.assertContains(response, "Raised by:", status_code=404) self.assertContains(response, "view_tests.views.technical404", status_code=404) def test_classbased_technical_404(self): response = self.client.get('/classbased404/') self.assertContains(response, "Raised by:", status_code=404) self.assertContains(response, "view_tests.views.Http404View", status_code=404) def test_non_l10ned_numeric_ids(self): """ Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized. """ with self.settings(DEBUG=True, USE_L10N=True): with self.assertLogs('django.request', 'ERROR'): response = self.client.get('/raises500/') # We look for a HTML fragment of the form # '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"' self.assertContains(response, '<div class="context" id="', status_code=500) match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content) self.assertIsNotNone(match) id_repr = match['id'] self.assertFalse( re.search(b'[^c0-9]', id_repr), "Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr.decode() ) def test_template_exceptions(self): with self.assertLogs('django.request', 'ERROR'): try: self.client.get(reverse('template_exception')) except Exception: raising_loc = inspect.trace()[-1][-2][0].strip() self.assertNotEqual( raising_loc.find("raise Exception('boom')"), -1, "Failed to find 'raise Exception' in last frame of " "traceback, instead found: %s" % raising_loc ) def test_template_loader_postmortem(self): """Tests for not existing file""" template_name = "notfound.html" with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile: tempdir = os.path.dirname(tmpfile.name) template_path = os.path.join(tempdir, template_name) with override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [tempdir], }]), self.assertLogs('django.request', 'ERROR'): response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name})) self.assertContains(response, "%s (Source does not exist)" % template_path, status_code=500, count=2) # Assert as HTML. self.assertContains( response, '<li><code>django.template.loaders.filesystem.Loader</code>: ' '%s (Source does not exist)</li>' % os.path.join(tempdir, 'notfound.html'), status_code=500, html=True, ) def test_no_template_source_loaders(self): """ Make sure if you don't specify a template, the debug view doesn't blow up. """ with self.assertLogs('django.request', 'ERROR'): with self.assertRaises(TemplateDoesNotExist): self.client.get('/render_no_template/') @override_settings(ROOT_URLCONF='view_tests.default_urls') def test_default_urlconf_template(self): """ Make sure that the default URLconf template is shown shown instead of the technical 404 page, if the user has not altered their URLconf yet. """ response = self.client.get('/') self.assertContains( response, "<h2>The install worked successfully! Congratulations!</h2>" ) @override_settings(ROOT_URLCONF='view_tests.regression_21530_urls') def test_regression_21530(self): """ Regression test for bug #21530. If the admin app include is replaced with exactly one url pattern, then the technical 404 template should be displayed. The bug here was that an AttributeError caused a 500 response. """ response = self.client.get('/') self.assertContains( response, "Page not found <span>(404)</span>", status_code=404 ) def test_template_encoding(self): """ The templates are loaded directly, not via a template loader, and should be opened as utf-8 charset as is the default specified on template engines. """ with mock.patch.object(DebugPath, 'open') as m: default_urlconf(None) m.assert_called_once_with(encoding='utf-8') m.reset_mock() technical_404_response(mock.MagicMock(), mock.Mock()) m.assert_called_once_with(encoding='utf-8') def test_technical_404_converter_raise_404(self): with mock.patch.object(IntConverter, 'to_python', side_effect=Http404): response = self.client.get('/path-post/1/') self.assertContains(response, 'Page not found', status_code=404) def test_exception_reporter_from_request(self): with self.assertLogs('django.request', 'ERROR'): response = self.client.get('/custom_reporter_class_view/') self.assertContains(response, 'custom traceback text', status_code=500) @override_settings(DEFAULT_EXCEPTION_REPORTER='view_tests.views.CustomExceptionReporter') def test_exception_reporter_from_settings(self): with self.assertLogs('django.request', 'ERROR'): response = self.client.get('/raises500/') self.assertContains(response, 'custom traceback text', status_code=500) class DebugViewQueriesAllowedTests(SimpleTestCase): # May need a query to initialize MySQL connection databases = {'default'} def test_handle_db_exception(self): """ Ensure the debug view works when a database exception is raised by performing an invalid query and passing the exception to the debug view. """ with connection.cursor() as cursor: try: cursor.execute('INVALID SQL') except DatabaseError: exc_info = sys.exc_info() rf = RequestFactory() response = technical_500_response(rf.get('/'), *exc_info) self.assertContains(response, 'OperationalError at /', status_code=500) @override_settings( DEBUG=True, ROOT_URLCONF='view_tests.urls', # No template directories are configured, so no templates will be found. TEMPLATES=[{ 'BACKEND': 'django.template.backends.dummy.TemplateStrings', }], ) class NonDjangoTemplatesDebugViewTests(SimpleTestCase): def test_400(self): # When DEBUG=True, technical_500_template() is called. with self.assertLogs('django.security', 'WARNING'): response = self.client.get('/raises400/') self.assertContains(response, '<div class="context" id="', status_code=400) def test_403(self): response = self.client.get('/raises403/') self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403) def test_404(self): response = self.client.get('/raises404/') self.assertEqual(response.status_code, 404) def test_template_not_found_error(self): # Raises a TemplateDoesNotExist exception and shows the debug view. url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"}) with self.assertLogs('django.request', 'ERROR'): response = self.client.get(url) self.assertContains(response, '<div class="context" id="', status_code=500) class ExceptionReporterTests(SimpleTestCase): rf = RequestFactory() def test_request_and_exception(self): "A simple exception report can be generated" try: request = self.rf.get('/test_view/') request.user = User() raise ValueError("Can't find my keys") except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(request, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertInHTML('<h1>ValueError at /test_view/</h1>', html) self.assertIn('<pre class="exception_value">Can&#x27;t find my keys</pre>', html) self.assertIn('<th>Request Method:</th>', html) self.assertIn('<th>Request URL:</th>', html) self.assertIn('<h3 id="user-info">USER</h3>', html) self.assertIn('<p>jacob</p>', html) self.assertIn('<th>Exception Type:</th>', html) self.assertIn('<th>Exception Value:</th>', html) self.assertIn('<h2>Traceback ', html) self.assertIn('<h2>Request information</h2>', html) self.assertNotIn('<p>Request data not supplied</p>', html) self.assertIn('<p>No POST data</p>', html) def test_no_request(self): "An exception report can be generated without request" try: raise ValueError("Can't find my keys") except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertInHTML('<h1>ValueError</h1>', html) self.assertIn('<pre class="exception_value">Can&#x27;t find my keys</pre>', html) self.assertNotIn('<th>Request Method:</th>', html) self.assertNotIn('<th>Request URL:</th>', html) self.assertNotIn('<h3 id="user-info">USER</h3>', html) self.assertIn('<th>Exception Type:</th>', html) self.assertIn('<th>Exception Value:</th>', html) self.assertIn('<h2>Traceback ', html) self.assertIn('<h2>Request information</h2>', html) self.assertIn('<p>Request data not supplied</p>', html) def test_eol_support(self): """The ExceptionReporter supports Unix, Windows and Macintosh EOL markers""" LINES = ['print %d' % i for i in range(1, 6)] reporter = ExceptionReporter(None, None, None, None) for newline in ['\n', '\r\n', '\r']: fd, filename = tempfile.mkstemp(text=False) os.write(fd, (newline.join(LINES) + newline).encode()) os.close(fd) try: self.assertEqual( reporter._get_lines_from_file(filename, 3, 2), (1, LINES[1:3], LINES[3], LINES[4:]) ) finally: os.unlink(filename) def test_no_exception(self): "An exception report can be generated for just a request" request = self.rf.get('/test_view/') reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertInHTML('<h1>Report at /test_view/</h1>', html) self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html) self.assertIn('<th>Request Method:</th>', html) self.assertIn('<th>Request URL:</th>', html) self.assertNotIn('<th>Exception Type:</th>', html) self.assertNotIn('<th>Exception Value:</th>', html) self.assertNotIn('<h2>Traceback ', html) self.assertIn('<h2>Request information</h2>', html) self.assertNotIn('<p>Request data not supplied</p>', html) def test_suppressed_context(self): try: try: raise RuntimeError("Can't find my keys") except RuntimeError: raise ValueError("Can't find my keys") from None except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertInHTML('<h1>ValueError</h1>', html) self.assertIn('<pre class="exception_value">Can&#x27;t find my keys</pre>', html) self.assertIn('<th>Exception Type:</th>', html) self.assertIn('<th>Exception Value:</th>', html) self.assertIn('<h2>Traceback ', html) self.assertIn('<h2>Request information</h2>', html) self.assertIn('<p>Request data not supplied</p>', html) self.assertNotIn('During handling of the above exception', html) def test_reporting_of_nested_exceptions(self): request = self.rf.get('/test_view/') try: try: raise AttributeError(mark_safe('<p>Top level</p>')) except AttributeError as explicit: try: raise ValueError(mark_safe('<p>Second exception</p>')) from explicit except ValueError: raise IndexError(mark_safe('<p>Final exception</p>')) except Exception: # Custom exception handler, just pass it into ExceptionReporter exc_type, exc_value, tb = sys.exc_info() explicit_exc = 'The above exception ({0}) was the direct cause of the following exception:' implicit_exc = 'During handling of the above exception ({0}), another exception occurred:' reporter = ExceptionReporter(request, exc_type, exc_value, tb) html = reporter.get_traceback_html() # Both messages are twice on page -- one rendered as html, # one as plain text (for pastebin) self.assertEqual(2, html.count(explicit_exc.format('&lt;p&gt;Top level&lt;/p&gt;'))) self.assertEqual(2, html.count(implicit_exc.format('&lt;p&gt;Second exception&lt;/p&gt;'))) self.assertEqual(10, html.count('&lt;p&gt;Final exception&lt;/p&gt;')) text = reporter.get_traceback_text() self.assertIn(explicit_exc.format('<p>Top level</p>'), text) self.assertIn(implicit_exc.format('<p>Second exception</p>'), text) self.assertEqual(3, text.count('<p>Final exception</p>')) def test_reporting_frames_without_source(self): try: source = "def funcName():\n raise Error('Whoops')\nfuncName()" namespace = {} code = compile(source, 'generated', 'exec') exec(code, namespace) except Exception: exc_type, exc_value, tb = sys.exc_info() request = self.rf.get('/test_view/') reporter = ExceptionReporter(request, exc_type, exc_value, tb) frames = reporter.get_traceback_frames() last_frame = frames[-1] self.assertEqual(last_frame['context_line'], '<source code not available>') self.assertEqual(last_frame['filename'], 'generated') self.assertEqual(last_frame['function'], 'funcName') self.assertEqual(last_frame['lineno'], 2) html = reporter.get_traceback_html() self.assertIn( '<span class="fname">generated</span>, line 2, in funcName', html, ) self.assertIn( '<code class="fname">generated</code>, line 2, in funcName', html, ) self.assertIn( '"generated", line 2, in funcName\n' ' &lt;source code not available&gt;', html, ) text = reporter.get_traceback_text() self.assertIn( '"generated", line 2, in funcName\n' ' <source code not available>', text, ) def test_reporting_frames_source_not_match(self): try: source = "def funcName():\n raise Error('Whoops')\nfuncName()" namespace = {} code = compile(source, 'generated', 'exec') exec(code, namespace) except Exception: exc_type, exc_value, tb = sys.exc_info() with mock.patch( 'django.views.debug.ExceptionReporter._get_source', return_value=['wrong source'], ): request = self.rf.get('/test_view/') reporter = ExceptionReporter(request, exc_type, exc_value, tb) frames = reporter.get_traceback_frames() last_frame = frames[-1] self.assertEqual(last_frame['context_line'], '<source code not available>') self.assertEqual(last_frame['filename'], 'generated') self.assertEqual(last_frame['function'], 'funcName') self.assertEqual(last_frame['lineno'], 2) html = reporter.get_traceback_html() self.assertIn( '<span class="fname">generated</span>, line 2, in funcName', html, ) self.assertIn( '<code class="fname">generated</code>, line 2, in funcName', html, ) self.assertIn( '"generated", line 2, in funcName\n' ' &lt;source code not available&gt;', html, ) text = reporter.get_traceback_text() self.assertIn( '"generated", line 2, in funcName\n' ' <source code not available>', text, ) def test_reporting_frames_for_cyclic_reference(self): try: def test_func(): try: raise RuntimeError('outer') from RuntimeError('inner') except RuntimeError as exc: raise exc.__cause__ test_func() except Exception: exc_type, exc_value, tb = sys.exc_info() request = self.rf.get('/test_view/') reporter = ExceptionReporter(request, exc_type, exc_value, tb) def generate_traceback_frames(*args, **kwargs): nonlocal tb_frames tb_frames = reporter.get_traceback_frames() tb_frames = None tb_generator = threading.Thread(target=generate_traceback_frames, daemon=True) msg = ( "Cycle in the exception chain detected: exception 'inner' " "encountered again." ) with self.assertWarnsMessage(ExceptionCycleWarning, msg): tb_generator.start() tb_generator.join(timeout=5) if tb_generator.is_alive(): # tb_generator is a daemon that runs until the main thread/process # exits. This is resource heavy when running the full test suite. # Setting the following values to None makes # reporter.get_traceback_frames() exit early. exc_value.__traceback__ = exc_value.__context__ = exc_value.__cause__ = None tb_generator.join() self.fail('Cyclic reference in Exception Reporter.get_traceback_frames()') if tb_frames is None: # can happen if the thread generating traceback got killed # or exception while generating the traceback self.fail('Traceback generation failed') last_frame = tb_frames[-1] self.assertIn('raise exc.__cause__', last_frame['context_line']) self.assertEqual(last_frame['filename'], __file__) self.assertEqual(last_frame['function'], 'test_func') def test_request_and_message(self): "A message can be provided in addition to a request" request = self.rf.get('/test_view/') reporter = ExceptionReporter(request, None, "I'm a little teapot", None) html = reporter.get_traceback_html() self.assertInHTML('<h1>Report at /test_view/</h1>', html) self.assertIn('<pre class="exception_value">I&#x27;m a little teapot</pre>', html) self.assertIn('<th>Request Method:</th>', html) self.assertIn('<th>Request URL:</th>', html) self.assertNotIn('<th>Exception Type:</th>', html) self.assertNotIn('<th>Exception Value:</th>', html) self.assertNotIn('<h2>Traceback ', html) self.assertIn('<h2>Request information</h2>', html) self.assertNotIn('<p>Request data not supplied</p>', html) def test_message_only(self): reporter = ExceptionReporter(None, None, "I'm a little teapot", None) html = reporter.get_traceback_html() self.assertInHTML('<h1>Report</h1>', html) self.assertIn('<pre class="exception_value">I&#x27;m a little teapot</pre>', html) self.assertNotIn('<th>Request Method:</th>', html) self.assertNotIn('<th>Request URL:</th>', html) self.assertNotIn('<th>Exception Type:</th>', html) self.assertNotIn('<th>Exception Value:</th>', html) self.assertNotIn('<h2>Traceback ', html) self.assertIn('<h2>Request information</h2>', html) self.assertIn('<p>Request data not supplied</p>', html) def test_non_utf8_values_handling(self): "Non-UTF-8 exceptions/values should not make the output generation choke." try: class NonUtf8Output(Exception): def __repr__(self): return b'EXC\xe9EXC' somevar = b'VAL\xe9VAL' # NOQA raise NonUtf8Output() except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertIn('VAL\\xe9VAL', html) self.assertIn('EXC\\xe9EXC', html) def test_local_variable_escaping(self): """Safe strings in local variables are escaped.""" try: local = mark_safe('<p>Local variable</p>') raise ValueError(local) except Exception: exc_type, exc_value, tb = sys.exc_info() html = ExceptionReporter(None, exc_type, exc_value, tb).get_traceback_html() self.assertIn('<td class="code"><pre>&#x27;&lt;p&gt;Local variable&lt;/p&gt;&#x27;</pre></td>', html) def test_unprintable_values_handling(self): "Unprintable values should not make the output generation choke." try: class OomOutput: def __repr__(self): raise MemoryError('OOM') oomvalue = OomOutput() # NOQA raise ValueError() except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertIn('<td class="code"><pre>Error in formatting', html) def test_too_large_values_handling(self): "Large values should not create a large HTML." large = 256 * 1024 repr_of_str_adds = len(repr('')) try: class LargeOutput: def __repr__(self): return repr('A' * large) largevalue = LargeOutput() # NOQA raise ValueError() except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb self.assertIn('&lt;trimmed %d bytes string&gt;' % (large + repr_of_str_adds,), html) def test_encoding_error(self): """ A UnicodeError displays a portion of the problematic string. HTML in safe strings is escaped. """ try: mark_safe('abcdefghijkl<p>mnὀp</p>qrstuwxyz').encode('ascii') except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertIn('<h2>Unicode error hint</h2>', html) self.assertIn('The string that could not be encoded/decoded was: ', html) self.assertIn('<strong>&lt;p&gt;mnὀp&lt;/p&gt;</strong>', html) def test_unfrozen_importlib(self): """ importlib is not a frozen app, but its loader thinks it's frozen which results in an ImportError. Refs #21443. """ try: request = self.rf.get('/test_view/') importlib.import_module('abc.def.invalid.name') except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(request, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertInHTML('<h1>ModuleNotFoundError at /test_view/</h1>', html) def test_ignore_traceback_evaluation_exceptions(self): """ Don't trip over exceptions generated by crafted objects when evaluating them while cleansing (#24455). """ class BrokenEvaluation(Exception): pass def broken_setup(): raise BrokenEvaluation request = self.rf.get('/test_view/') broken_lazy = SimpleLazyObject(broken_setup) try: bool(broken_lazy) except BrokenEvaluation: exc_type, exc_value, tb = sys.exc_info() self.assertIn( "BrokenEvaluation", ExceptionReporter(request, exc_type, exc_value, tb).get_traceback_html(), "Evaluation exception reason not mentioned in traceback" ) @override_settings(ALLOWED_HOSTS='example.com') def test_disallowed_host(self): "An exception report can be generated even for a disallowed host." request = self.rf.get('/', HTTP_HOST='evil.com') reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertIn("http://evil.com/", html) def test_request_with_items_key(self): """ An exception report can be generated for requests with 'items' in request GET, POST, FILES, or COOKIES QueryDicts. """ value = '<td>items</td><td class="code"><pre>&#x27;Oops&#x27;</pre></td>' # GET request = self.rf.get('/test_view/?items=Oops') reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertInHTML(value, html) # POST request = self.rf.post('/test_view/', data={'items': 'Oops'}) reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertInHTML(value, html) # FILES fp = StringIO('filecontent') request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp}) reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertInHTML( '<td>items</td><td class="code"><pre>&lt;InMemoryUploadedFile: ' 'items (application/octet-stream)&gt;</pre></td>', html ) # COOKIES rf = RequestFactory() rf.cookies['items'] = 'Oops' request = rf.get('/test_view/') reporter = ExceptionReporter(request, None, None, None) html = reporter.get_traceback_html() self.assertInHTML('<td>items</td><td class="code"><pre>&#x27;Oops&#x27;</pre></td>', html) def test_exception_fetching_user(self): """ The error page can be rendered if the current user can't be retrieved (such as when the database is unavailable). """ class ExceptionUser: def __str__(self): raise Exception() request = self.rf.get('/test_view/') request.user = ExceptionUser() try: raise ValueError('Oops') except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(request, exc_type, exc_value, tb) html = reporter.get_traceback_html() self.assertInHTML('<h1>ValueError at /test_view/</h1>', html) self.assertIn('<pre class="exception_value">Oops</pre>', html) self.assertIn('<h3 id="user-info">USER</h3>', html) self.assertIn('<p>[unable to retrieve the current user]</p>', html) text = reporter.get_traceback_text() self.assertIn('USER: [unable to retrieve the current user]', text) def test_template_encoding(self): """ The templates are loaded directly, not via a template loader, and should be opened as utf-8 charset as is the default specified on template engines. """ reporter = ExceptionReporter(None, None, None, None) with mock.patch.object(DebugPath, 'open') as m: reporter.get_traceback_html() m.assert_called_once_with(encoding='utf-8') m.reset_mock() reporter.get_traceback_text() m.assert_called_once_with(encoding='utf-8') class PlainTextReportTests(SimpleTestCase): rf = RequestFactory() def test_request_and_exception(self): "A simple exception report can be generated" try: request = self.rf.get('/test_view/') request.user = User() raise ValueError("Can't find my keys") except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(request, exc_type, exc_value, tb) text = reporter.get_traceback_text() self.assertIn('ValueError at /test_view/', text) self.assertIn("Can't find my keys", text) self.assertIn('Request Method:', text) self.assertIn('Request URL:', text) self.assertIn('USER: jacob', text) self.assertIn('Exception Type:', text) self.assertIn('Exception Value:', text) self.assertIn('Traceback (most recent call last):', text) self.assertIn('Request information:', text) self.assertNotIn('Request data not supplied', text) def test_no_request(self): "An exception report can be generated without request" try: raise ValueError("Can't find my keys") except ValueError: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(None, exc_type, exc_value, tb) text = reporter.get_traceback_text() self.assertIn('ValueError', text) self.assertIn("Can't find my keys", text) self.assertNotIn('Request Method:', text) self.assertNotIn('Request URL:', text) self.assertNotIn('USER:', text) self.assertIn('Exception Type:', text) self.assertIn('Exception Value:', text) self.assertIn('Traceback (most recent call last):', text) self.assertIn('Request data not supplied', text) def test_no_exception(self): "An exception report can be generated for just a request" request = self.rf.get('/test_view/') reporter = ExceptionReporter(request, None, None, None) reporter.get_traceback_text() def test_request_and_message(self): "A message can be provided in addition to a request" request = self.rf.get('/test_view/') reporter = ExceptionReporter(request, None, "I'm a little teapot", None) reporter.get_traceback_text() @override_settings(DEBUG=True) def test_template_exception(self): request = self.rf.get('/test_view/') try: render(request, 'debug/template_error.html') except Exception: exc_type, exc_value, tb = sys.exc_info() reporter = ExceptionReporter(request, exc_type, exc_value, tb) text = reporter.get_traceback_text() templ_path = Path(Path(__file__).parents[1], 'templates', 'debug', 'template_error.html') self.assertIn( 'Template error:\n' 'In template %(path)s, error at line 2\n' ' \'cycle\' tag requires at least two arguments\n' ' 1 : Template with error:\n' ' 2 : {%% cycle %%} \n' ' 3 : ' % {'path': templ_path}, text ) def test_request_with_items_key(self): """ An exception report can be generated for requests with 'items' in request GET, POST, FILES, or COOKIES QueryDicts. """ # GET request = self.rf.get('/test_view/?items=Oops') reporter = ExceptionReporter(request, None, None, None) text = reporter.get_traceback_text() self.assertIn("items = 'Oops'", text) # POST request = self.rf.post('/test_view/', data={'items': 'Oops'}) reporter = ExceptionReporter(request, None, None, None) text = reporter.get_traceback_text() self.assertIn("items = 'Oops'", text) # FILES fp = StringIO('filecontent') request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp}) reporter = ExceptionReporter(request, None, None, None) text = reporter.get_traceback_text() self.assertIn('items = <InMemoryUploadedFile:', text) # COOKIES rf = RequestFactory() rf.cookies['items'] = 'Oops' request = rf.get('/test_view/') reporter = ExceptionReporter(request, None, None, None) text = reporter.get_traceback_text() self.assertIn("items = 'Oops'", text) def test_message_only(self): reporter = ExceptionReporter(None, None, "I'm a little teapot", None) reporter.get_traceback_text() @override_settings(ALLOWED_HOSTS='example.com') def test_disallowed_host(self): "An exception report can be generated even for a disallowed host." request = self.rf.get('/', HTTP_HOST='evil.com') reporter = ExceptionReporter(request, None, None, None) text = reporter.get_traceback_text() self.assertIn("http://evil.com/", text) class ExceptionReportTestMixin: # Mixin used in the ExceptionReporterFilterTests and # AjaxResponseExceptionReporterFilter tests below breakfast_data = { 'sausage-key': 'sausage-value', 'baked-beans-key': 'baked-beans-value', 'hash-brown-key': 'hash-brown-value', 'bacon-key': 'bacon-value', } def verify_unsafe_response(self, view, check_for_vars=True, check_for_POST_params=True): """ Asserts that potentially sensitive info are displayed in the response. """ request = self.rf.post('/some_url/', self.breakfast_data) response = view(request) if check_for_vars: # All variables are shown. self.assertContains(response, 'cooked_eggs', status_code=500) self.assertContains(response, 'scrambled', status_code=500) self.assertContains(response, 'sauce', status_code=500) self.assertContains(response, 'worcestershire', status_code=500) if check_for_POST_params: for k, v in self.breakfast_data.items(): # All POST parameters are shown. self.assertContains(response, k, status_code=500) self.assertContains(response, v, status_code=500) def verify_safe_response(self, view, check_for_vars=True, check_for_POST_params=True): """ Asserts that certain sensitive info are not displayed in the response. """ request = self.rf.post('/some_url/', self.breakfast_data) response = view(request) if check_for_vars: # Non-sensitive variable's name and value are shown. self.assertContains(response, 'cooked_eggs', status_code=500) self.assertContains(response, 'scrambled', status_code=500) # Sensitive variable's name is shown but not its value. self.assertContains(response, 'sauce', status_code=500) self.assertNotContains(response, 'worcestershire', status_code=500) if check_for_POST_params: for k in self.breakfast_data: # All POST parameters' names are shown. self.assertContains(response, k, status_code=500) # Non-sensitive POST parameters' values are shown. self.assertContains(response, 'baked-beans-value', status_code=500) self.assertContains(response, 'hash-brown-value', status_code=500) # Sensitive POST parameters' values are not shown. self.assertNotContains(response, 'sausage-value', status_code=500) self.assertNotContains(response, 'bacon-value', status_code=500) def verify_paranoid_response(self, view, check_for_vars=True, check_for_POST_params=True): """ Asserts that no variables or POST parameters are displayed in the response. """ request = self.rf.post('/some_url/', self.breakfast_data) response = view(request) if check_for_vars: # Show variable names but not their values. self.assertContains(response, 'cooked_eggs', status_code=500) self.assertNotContains(response, 'scrambled', status_code=500) self.assertContains(response, 'sauce', status_code=500) self.assertNotContains(response, 'worcestershire', status_code=500) if check_for_POST_params: for k, v in self.breakfast_data.items(): # All POST parameters' names are shown. self.assertContains(response, k, status_code=500) # No POST parameters' values are shown. self.assertNotContains(response, v, status_code=500) def verify_unsafe_email(self, view, check_for_POST_params=True): """ Asserts that potentially sensitive info are displayed in the email report. """ with self.settings(ADMINS=[('Admin', '[email protected]')]): mail.outbox = [] # Empty outbox request = self.rf.post('/some_url/', self.breakfast_data) view(request) self.assertEqual(len(mail.outbox), 1) email = mail.outbox[0] # Frames vars are never shown in plain text email reports. body_plain = str(email.body) self.assertNotIn('cooked_eggs', body_plain) self.assertNotIn('scrambled', body_plain) self.assertNotIn('sauce', body_plain) self.assertNotIn('worcestershire', body_plain) # Frames vars are shown in html email reports. body_html = str(email.alternatives[0][0]) self.assertIn('cooked_eggs', body_html) self.assertIn('scrambled', body_html) self.assertIn('sauce', body_html) self.assertIn('worcestershire', body_html) if check_for_POST_params: for k, v in self.breakfast_data.items(): # All POST parameters are shown. self.assertIn(k, body_plain) self.assertIn(v, body_plain) self.assertIn(k, body_html) self.assertIn(v, body_html) def verify_safe_email(self, view, check_for_POST_params=True): """ Asserts that certain sensitive info are not displayed in the email report. """ with self.settings(ADMINS=[('Admin', '[email protected]')]): mail.outbox = [] # Empty outbox request = self.rf.post('/some_url/', self.breakfast_data) view(request) self.assertEqual(len(mail.outbox), 1) email = mail.outbox[0] # Frames vars are never shown in plain text email reports. body_plain = str(email.body) self.assertNotIn('cooked_eggs', body_plain) self.assertNotIn('scrambled', body_plain) self.assertNotIn('sauce', body_plain) self.assertNotIn('worcestershire', body_plain) # Frames vars are shown in html email reports. body_html = str(email.alternatives[0][0]) self.assertIn('cooked_eggs', body_html) self.assertIn('scrambled', body_html) self.assertIn('sauce', body_html) self.assertNotIn('worcestershire', body_html) if check_for_POST_params: for k in self.breakfast_data: # All POST parameters' names are shown. self.assertIn(k, body_plain) # Non-sensitive POST parameters' values are shown. self.assertIn('baked-beans-value', body_plain) self.assertIn('hash-brown-value', body_plain) self.assertIn('baked-beans-value', body_html) self.assertIn('hash-brown-value', body_html) # Sensitive POST parameters' values are not shown. self.assertNotIn('sausage-value', body_plain) self.assertNotIn('bacon-value', body_plain) self.assertNotIn('sausage-value', body_html) self.assertNotIn('bacon-value', body_html) def verify_paranoid_email(self, view): """ Asserts that no variables or POST parameters are displayed in the email report. """ with self.settings(ADMINS=[('Admin', '[email protected]')]): mail.outbox = [] # Empty outbox request = self.rf.post('/some_url/', self.breakfast_data) view(request) self.assertEqual(len(mail.outbox), 1) email = mail.outbox[0] # Frames vars are never shown in plain text email reports. body = str(email.body) self.assertNotIn('cooked_eggs', body) self.assertNotIn('scrambled', body) self.assertNotIn('sauce', body) self.assertNotIn('worcestershire', body) for k, v in self.breakfast_data.items(): # All POST parameters' names are shown. self.assertIn(k, body) # No POST parameters' values are shown. self.assertNotIn(v, body) @override_settings(ROOT_URLCONF='view_tests.urls') class ExceptionReporterFilterTests(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase): """ Sensitive information can be filtered out of error reports (#14614). """ rf = RequestFactory() def test_non_sensitive_request(self): """ Everything (request info and frame variables) can bee seen in the default error reports for non-sensitive requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(non_sensitive_view) self.verify_unsafe_email(non_sensitive_view) with self.settings(DEBUG=False): self.verify_unsafe_response(non_sensitive_view) self.verify_unsafe_email(non_sensitive_view) def test_sensitive_request(self): """ Sensitive POST parameters and frame variables cannot be seen in the default error reports for sensitive requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_view) self.verify_unsafe_email(sensitive_view) with self.settings(DEBUG=False): self.verify_safe_response(sensitive_view) self.verify_safe_email(sensitive_view) def test_paranoid_request(self): """ No POST parameters and frame variables can be seen in the default error reports for "paranoid" requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(paranoid_view) self.verify_unsafe_email(paranoid_view) with self.settings(DEBUG=False): self.verify_paranoid_response(paranoid_view) self.verify_paranoid_email(paranoid_view) def test_multivalue_dict_key_error(self): """ #21098 -- Sensitive POST parameters cannot be seen in the error reports for if request.POST['nonexistent_key'] throws an error. """ with self.settings(DEBUG=True): self.verify_unsafe_response(multivalue_dict_key_error) self.verify_unsafe_email(multivalue_dict_key_error) with self.settings(DEBUG=False): self.verify_safe_response(multivalue_dict_key_error) self.verify_safe_email(multivalue_dict_key_error) def test_custom_exception_reporter_filter(self): """ It's possible to assign an exception reporter filter to the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER. """ with self.settings(DEBUG=True): self.verify_unsafe_response(custom_exception_reporter_filter_view) self.verify_unsafe_email(custom_exception_reporter_filter_view) with self.settings(DEBUG=False): self.verify_unsafe_response(custom_exception_reporter_filter_view) self.verify_unsafe_email(custom_exception_reporter_filter_view) def test_sensitive_method(self): """ The sensitive_variables decorator works with object methods. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_method_view, check_for_POST_params=False) self.verify_unsafe_email(sensitive_method_view, check_for_POST_params=False) with self.settings(DEBUG=False): self.verify_safe_response(sensitive_method_view, check_for_POST_params=False) self.verify_safe_email(sensitive_method_view, check_for_POST_params=False) def test_sensitive_function_arguments(self): """ Sensitive variables don't leak in the sensitive_variables decorator's frame, when those variables are passed as arguments to the decorated function. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_args_function_caller) self.verify_unsafe_email(sensitive_args_function_caller) with self.settings(DEBUG=False): self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False) self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False) def test_sensitive_function_keyword_arguments(self): """ Sensitive variables don't leak in the sensitive_variables decorator's frame, when those variables are passed as keyword arguments to the decorated function. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_kwargs_function_caller) self.verify_unsafe_email(sensitive_kwargs_function_caller) with self.settings(DEBUG=False): self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False) self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False) def test_callable_settings(self): """ Callable settings should not be evaluated in the debug page (#21345). """ def callable_setting(): return "This should not be displayed" with self.settings(DEBUG=True, FOOBAR=callable_setting): response = self.client.get('/raises500/') self.assertNotContains(response, "This should not be displayed", status_code=500) def test_callable_settings_forbidding_to_set_attributes(self): """ Callable settings which forbid to set attributes should not break the debug page (#23070). """ class CallableSettingWithSlots: __slots__ = [] def __call__(self): return "This should not be displayed" with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()): response = self.client.get('/raises500/') self.assertNotContains(response, "This should not be displayed", status_code=500) def test_dict_setting_with_non_str_key(self): """ A dict setting containing a non-string key should not break the debug page (#12744). """ with self.settings(DEBUG=True, FOOBAR={42: None}): response = self.client.get('/raises500/') self.assertContains(response, 'FOOBAR', status_code=500) def test_sensitive_settings(self): """ The debug page should not show some sensitive settings (password, secret key, ...). """ sensitive_settings = [ 'SECRET_KEY', 'PASSWORD', 'API_KEY', 'AUTH_TOKEN', ] for setting in sensitive_settings: with self.settings(DEBUG=True, **{setting: "should not be displayed"}): response = self.client.get('/raises500/') self.assertNotContains(response, 'should not be displayed', status_code=500) def test_settings_with_sensitive_keys(self): """ The debug page should filter out some sensitive information found in dict settings. """ sensitive_settings = [ 'SECRET_KEY', 'PASSWORD', 'API_KEY', 'AUTH_TOKEN', ] for setting in sensitive_settings: FOOBAR = { setting: "should not be displayed", 'recursive': {setting: "should not be displayed"}, } with self.settings(DEBUG=True, FOOBAR=FOOBAR): response = self.client.get('/raises500/') self.assertNotContains(response, 'should not be displayed', status_code=500) def test_cleanse_setting_basic(self): reporter_filter = SafeExceptionReporterFilter() self.assertEqual(reporter_filter.cleanse_setting('TEST', 'TEST'), 'TEST') self.assertEqual( reporter_filter.cleanse_setting('PASSWORD', 'super_secret'), reporter_filter.cleansed_substitute, ) def test_cleanse_setting_ignore_case(self): reporter_filter = SafeExceptionReporterFilter() self.assertEqual( reporter_filter.cleanse_setting('password', 'super_secret'), reporter_filter.cleansed_substitute, ) def test_cleanse_setting_recurses_in_dictionary(self): reporter_filter = SafeExceptionReporterFilter() initial = {'login': 'cooper', 'password': 'secret'} self.assertEqual( reporter_filter.cleanse_setting('SETTING_NAME', initial), {'login': 'cooper', 'password': reporter_filter.cleansed_substitute}, ) def test_cleanse_setting_recurses_in_list_tuples(self): reporter_filter = SafeExceptionReporterFilter() initial = [ { 'login': 'cooper', 'password': 'secret', 'apps': ( {'name': 'app1', 'api_key': 'a06b-c462cffae87a'}, {'name': 'app2', 'api_key': 'a9f4-f152e97ad808'}, ), 'tokens': ['98b37c57-ec62-4e39', '8690ef7d-8004-4916'], }, {'SECRET_KEY': 'c4d77c62-6196-4f17-a06b-c462cffae87a'}, ] cleansed = [ { 'login': 'cooper', 'password': reporter_filter.cleansed_substitute, 'apps': ( {'name': 'app1', 'api_key': reporter_filter.cleansed_substitute}, {'name': 'app2', 'api_key': reporter_filter.cleansed_substitute}, ), 'tokens': reporter_filter.cleansed_substitute, }, {'SECRET_KEY': reporter_filter.cleansed_substitute}, ] self.assertEqual( reporter_filter.cleanse_setting('SETTING_NAME', initial), cleansed, ) self.assertEqual( reporter_filter.cleanse_setting('SETTING_NAME', tuple(initial)), tuple(cleansed), ) def test_request_meta_filtering(self): request = self.rf.get('/', HTTP_SECRET_HEADER='super_secret') reporter_filter = SafeExceptionReporterFilter() self.assertEqual( reporter_filter.get_safe_request_meta(request)['HTTP_SECRET_HEADER'], reporter_filter.cleansed_substitute, ) def test_exception_report_uses_meta_filtering(self): response = self.client.get('/raises500/', HTTP_SECRET_HEADER='super_secret') self.assertNotIn(b'super_secret', response.content) response = self.client.get( '/raises500/', HTTP_SECRET_HEADER='super_secret', HTTP_ACCEPT='application/json', ) self.assertNotIn(b'super_secret', response.content) class CustomExceptionReporterFilter(SafeExceptionReporterFilter): cleansed_substitute = 'XXXXXXXXXXXXXXXXXXXX' hidden_settings = _lazy_re_compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE|DATABASE_URL', flags=re.I) @override_settings( ROOT_URLCONF='view_tests.urls', DEFAULT_EXCEPTION_REPORTER_FILTER='%s.CustomExceptionReporterFilter' % __name__, ) class CustomExceptionReporterFilterTests(SimpleTestCase): def setUp(self): get_default_exception_reporter_filter.cache_clear() def tearDown(self): get_default_exception_reporter_filter.cache_clear() def test_setting_allows_custom_subclass(self): self.assertIsInstance( get_default_exception_reporter_filter(), CustomExceptionReporterFilter, ) def test_cleansed_substitute_override(self): reporter_filter = get_default_exception_reporter_filter() self.assertEqual( reporter_filter.cleanse_setting('password', 'super_secret'), reporter_filter.cleansed_substitute, ) def test_hidden_settings_override(self): reporter_filter = get_default_exception_reporter_filter() self.assertEqual( reporter_filter.cleanse_setting('database_url', 'super_secret'), reporter_filter.cleansed_substitute, ) class NonHTMLResponseExceptionReporterFilter(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase): """ Sensitive information can be filtered out of error reports. The plain text 500 debug-only error page is served when it has been detected the request doesn't accept HTML content. Don't check for (non)existence of frames vars in the traceback information section of the response content because they're not included in these error pages. Refs #14614. """ rf = RequestFactory(HTTP_ACCEPT='application/json') def test_non_sensitive_request(self): """ Request info can bee seen in the default error reports for non-sensitive requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(non_sensitive_view, check_for_vars=False) with self.settings(DEBUG=False): self.verify_unsafe_response(non_sensitive_view, check_for_vars=False) def test_sensitive_request(self): """ Sensitive POST parameters cannot be seen in the default error reports for sensitive requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(sensitive_view, check_for_vars=False) with self.settings(DEBUG=False): self.verify_safe_response(sensitive_view, check_for_vars=False) def test_paranoid_request(self): """ No POST parameters can be seen in the default error reports for "paranoid" requests. """ with self.settings(DEBUG=True): self.verify_unsafe_response(paranoid_view, check_for_vars=False) with self.settings(DEBUG=False): self.verify_paranoid_response(paranoid_view, check_for_vars=False) def test_custom_exception_reporter_filter(self): """ It's possible to assign an exception reporter filter to the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER. """ with self.settings(DEBUG=True): self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False) with self.settings(DEBUG=False): self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False) @override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls') def test_non_html_response_encoding(self): response = self.client.get('/raises500/', HTTP_ACCEPT='application/json') self.assertEqual(response['Content-Type'], 'text/plain; charset=utf-8') class DecoratorsTests(SimpleTestCase): def test_sensitive_variables_not_called(self): msg = ( 'sensitive_variables() must be called to use it as a decorator, ' 'e.g., use @sensitive_variables(), not @sensitive_variables.' ) with self.assertRaisesMessage(TypeError, msg): @sensitive_variables def test_func(password): pass def test_sensitive_post_parameters_not_called(self): msg = ( 'sensitive_post_parameters() must be called to use it as a ' 'decorator, e.g., use @sensitive_post_parameters(), not ' '@sensitive_post_parameters.' ) with self.assertRaisesMessage(TypeError, msg): @sensitive_post_parameters def test_func(request): return index_page(request)
8e668b06c9501bb3ee3eaf99eb1abf58e7dfe612d980450dea59f41d384d938a
import json import math import re from decimal import Decimal from django.contrib.gis.db.models import GeometryField, PolygonField, functions from django.contrib.gis.geos import ( GEOSGeometry, LineString, Point, Polygon, fromstr, ) from django.contrib.gis.measure import Area from django.db import NotSupportedError, connection from django.db.models import IntegerField, Sum, Value from django.test import TestCase, skipUnlessDBFeature from ..utils import FuncTestMixin, mariadb, mysql, oracle, postgis, spatialite from .models import City, Country, CountryWebMercator, State, Track class GISFunctionsTests(FuncTestMixin, TestCase): """ Testing functions from django/contrib/gis/db/models/functions.py. Area/Distance/Length/Perimeter are tested in distapp/tests. Please keep the tests in function's alphabetic order. """ fixtures = ['initial'] def test_asgeojson(self): if not connection.features.has_AsGeoJSON_function: with self.assertRaises(NotSupportedError): list(Country.objects.annotate(json=functions.AsGeoJSON('mpoly'))) return pueblo_json = '{"type":"Point","coordinates":[-104.609252,38.255001]}' houston_json = json.loads( '{"type":"Point","crs":{"type":"name","properties":' '{"name":"EPSG:4326"}},"coordinates":[-95.363151,29.763374]}' ) victoria_json = json.loads( '{"type":"Point","bbox":[-123.30519600,48.46261100,-123.30519600,48.46261100],' '"coordinates":[-123.305196,48.462611]}' ) chicago_json = json.loads( '{"type":"Point","crs":{"type":"name","properties":{"name":"EPSG:4326"}},' '"bbox":[-87.65018,41.85039,-87.65018,41.85039],"coordinates":[-87.65018,41.85039]}' ) # MySQL and Oracle ignore the crs option. if mysql or oracle: del houston_json['crs'] del chicago_json['crs'] # Oracle ignores also the bbox and precision options. if oracle: del chicago_json['bbox'] del victoria_json['bbox'] chicago_json['coordinates'] = [-87.650175, 41.850385] # Precision argument should only be an integer with self.assertRaises(TypeError): City.objects.annotate(geojson=functions.AsGeoJSON('point', precision='foo')) # Reference queries and values. # SELECT ST_AsGeoJson("geoapp_city"."point", 8, 0) # FROM "geoapp_city" WHERE "geoapp_city"."name" = 'Pueblo'; self.assertJSONEqual( pueblo_json, City.objects.annotate(geojson=functions.AsGeoJSON('point')).get(name='Pueblo').geojson ) # SELECT ST_AsGeoJson("geoapp_city"."point", 8, 2) FROM "geoapp_city" # WHERE "geoapp_city"."name" = 'Houston'; # This time we want to include the CRS by using the `crs` keyword. self.assertJSONEqual( City.objects.annotate(json=functions.AsGeoJSON('point', crs=True)).get(name='Houston').json, houston_json, ) # SELECT ST_AsGeoJson("geoapp_city"."point", 8, 1) FROM "geoapp_city" # WHERE "geoapp_city"."name" = 'Houston'; # This time we include the bounding box by using the `bbox` keyword. self.assertJSONEqual( City.objects.annotate( geojson=functions.AsGeoJSON('point', bbox=True) ).get(name='Victoria').geojson, victoria_json, ) # SELECT ST_AsGeoJson("geoapp_city"."point", 5, 3) FROM "geoapp_city" # WHERE "geoapp_city"."name" = 'Chicago'; # Finally, we set every available keyword. # MariaDB doesn't limit the number of decimals in bbox. if mariadb: chicago_json['bbox'] = [-87.650175, 41.850385, -87.650175, 41.850385] self.assertJSONEqual( City.objects.annotate( geojson=functions.AsGeoJSON('point', bbox=True, crs=True, precision=5) ).get(name='Chicago').geojson, chicago_json, ) @skipUnlessDBFeature("has_AsGML_function") def test_asgml(self): # Should throw a TypeError when trying to obtain GML from a # non-geometry field. qs = City.objects.all() with self.assertRaises(TypeError): qs.annotate(gml=functions.AsGML('name')) ptown = City.objects.annotate(gml=functions.AsGML('point', precision=9)).get(name='Pueblo') if oracle: # No precision parameter for Oracle :-/ gml_regex = re.compile( r'^<gml:Point srsName="EPSG:4326" xmlns:gml="http://www.opengis.net/gml">' r'<gml:coordinates decimal="\." cs="," ts=" ">-104.60925\d+,38.25500\d+ ' r'</gml:coordinates></gml:Point>' ) else: gml_regex = re.compile( r'^<gml:Point srsName="EPSG:4326"><gml:coordinates>' r'-104\.60925\d+,38\.255001</gml:coordinates></gml:Point>' ) self.assertTrue(gml_regex.match(ptown.gml)) self.assertIn( '<gml:pos srsDimension="2">', City.objects.annotate(gml=functions.AsGML('point', version=3)).get(name='Pueblo').gml ) @skipUnlessDBFeature("has_AsKML_function") def test_askml(self): # Should throw a TypeError when trying to obtain KML from a # non-geometry field. with self.assertRaises(TypeError): City.objects.annotate(kml=functions.AsKML('name')) # Ensuring the KML is as expected. ptown = City.objects.annotate(kml=functions.AsKML('point', precision=9)).get(name='Pueblo') self.assertEqual('<Point><coordinates>-104.609252,38.255001</coordinates></Point>', ptown.kml) @skipUnlessDBFeature("has_AsSVG_function") def test_assvg(self): with self.assertRaises(TypeError): City.objects.annotate(svg=functions.AsSVG('point', precision='foo')) # SELECT AsSVG(geoapp_city.point, 0, 8) FROM geoapp_city WHERE name = 'Pueblo'; svg1 = 'cx="-104.609252" cy="-38.255001"' # Even though relative, only one point so it's practically the same except for # the 'c' letter prefix on the x,y values. svg2 = svg1.replace('c', '') self.assertEqual(svg1, City.objects.annotate(svg=functions.AsSVG('point')).get(name='Pueblo').svg) self.assertEqual(svg2, City.objects.annotate(svg=functions.AsSVG('point', relative=5)).get(name='Pueblo').svg) @skipUnlessDBFeature('has_AsWKB_function') def test_aswkb(self): wkb = City.objects.annotate( wkb=functions.AsWKB(Point(1, 2, srid=4326)), ).first().wkb # WKB is either XDR or NDR encoded. self.assertIn( bytes(wkb), ( b'\x00\x00\x00\x00\x01?\xf0\x00\x00\x00\x00\x00\x00@\x00\x00' b'\x00\x00\x00\x00\x00', b'\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf0?\x00\x00' b'\x00\x00\x00\x00\x00@', ), ) @skipUnlessDBFeature('has_AsWKT_function') def test_aswkt(self): wkt = City.objects.annotate( wkt=functions.AsWKT(Point(1, 2, srid=4326)), ).first().wkt self.assertEqual(wkt, 'POINT (1.0 2.0)' if oracle else 'POINT(1 2)') @skipUnlessDBFeature("has_Azimuth_function") def test_azimuth(self): # Returns the azimuth in radians. azimuth_expr = functions.Azimuth(Point(0, 0, srid=4326), Point(1, 1, srid=4326)) self.assertAlmostEqual(City.objects.annotate(azimuth=azimuth_expr).first().azimuth, math.pi / 4) # Returns None if the two points are coincident. azimuth_expr = functions.Azimuth(Point(0, 0, srid=4326), Point(0, 0, srid=4326)) self.assertIsNone(City.objects.annotate(azimuth=azimuth_expr).first().azimuth) @skipUnlessDBFeature("has_BoundingCircle_function") def test_bounding_circle(self): def circle_num_points(num_seg): # num_seg is the number of segments per quarter circle. return (4 * num_seg) + 1 expected_areas = (169, 136) if postgis else (171, 126) qs = Country.objects.annotate(circle=functions.BoundingCircle('mpoly')).order_by('name') self.assertAlmostEqual(qs[0].circle.area, expected_areas[0], 0) self.assertAlmostEqual(qs[1].circle.area, expected_areas[1], 0) if postgis: # By default num_seg=48. self.assertEqual(qs[0].circle.num_points, circle_num_points(48)) self.assertEqual(qs[1].circle.num_points, circle_num_points(48)) tests = [12, Value(12, output_field=IntegerField())] for num_seq in tests: with self.subTest(num_seq=num_seq): qs = Country.objects.annotate( circle=functions.BoundingCircle('mpoly', num_seg=num_seq), ).order_by('name') if postgis: self.assertGreater(qs[0].circle.area, 168.4, 0) self.assertLess(qs[0].circle.area, 169.5, 0) self.assertAlmostEqual(qs[1].circle.area, 136, 0) self.assertEqual(qs[0].circle.num_points, circle_num_points(12)) self.assertEqual(qs[1].circle.num_points, circle_num_points(12)) else: self.assertAlmostEqual(qs[0].circle.area, expected_areas[0], 0) self.assertAlmostEqual(qs[1].circle.area, expected_areas[1], 0) @skipUnlessDBFeature("has_Centroid_function") def test_centroid(self): qs = State.objects.exclude(poly__isnull=True).annotate(centroid=functions.Centroid('poly')) tol = 1.8 if mysql else (0.1 if oracle else 0.00001) for state in qs: self.assertTrue(state.poly.centroid.equals_exact(state.centroid, tol)) with self.assertRaisesMessage(TypeError, "'Centroid' takes exactly 1 argument (2 given)"): State.objects.annotate(centroid=functions.Centroid('poly', 'poly')) @skipUnlessDBFeature("has_Difference_function") def test_difference(self): geom = Point(5, 23, srid=4326) qs = Country.objects.annotate(diff=functions.Difference('mpoly', geom)) # Oracle does something screwy with the Texas geometry. if oracle: qs = qs.exclude(name='Texas') for c in qs: self.assertTrue(c.mpoly.difference(geom).equals(c.diff)) @skipUnlessDBFeature("has_Difference_function", "has_Transform_function") def test_difference_mixed_srid(self): """Testing with mixed SRID (Country has default 4326).""" geom = Point(556597.4, 2632018.6, srid=3857) # Spherical Mercator qs = Country.objects.annotate(difference=functions.Difference('mpoly', geom)) # Oracle does something screwy with the Texas geometry. if oracle: qs = qs.exclude(name='Texas') for c in qs: self.assertTrue(c.mpoly.difference(geom).equals(c.difference)) @skipUnlessDBFeature("has_Envelope_function") def test_envelope(self): countries = Country.objects.annotate(envelope=functions.Envelope('mpoly')) for country in countries: self.assertTrue(country.envelope.equals(country.mpoly.envelope)) @skipUnlessDBFeature("has_ForcePolygonCW_function") def test_force_polygon_cw(self): rings = ( ((0, 0), (5, 0), (0, 5), (0, 0)), ((1, 1), (1, 3), (3, 1), (1, 1)), ) rhr_rings = ( ((0, 0), (0, 5), (5, 0), (0, 0)), ((1, 1), (3, 1), (1, 3), (1, 1)), ) State.objects.create(name='Foo', poly=Polygon(*rings)) st = State.objects.annotate(force_polygon_cw=functions.ForcePolygonCW('poly')).get(name='Foo') self.assertEqual(rhr_rings, st.force_polygon_cw.coords) @skipUnlessDBFeature("has_GeoHash_function") def test_geohash(self): # Reference query: # SELECT ST_GeoHash(point) FROM geoapp_city WHERE name='Houston'; # SELECT ST_GeoHash(point, 5) FROM geoapp_city WHERE name='Houston'; ref_hash = '9vk1mfq8jx0c8e0386z6' h1 = City.objects.annotate(geohash=functions.GeoHash('point')).get(name='Houston') h2 = City.objects.annotate(geohash=functions.GeoHash('point', precision=5)).get(name='Houston') self.assertEqual(ref_hash, h1.geohash[:len(ref_hash)]) self.assertEqual(ref_hash[:5], h2.geohash) @skipUnlessDBFeature('has_GeometryDistance_function') def test_geometry_distance(self): point = Point(-90, 40, srid=4326) qs = City.objects.annotate(distance=functions.GeometryDistance('point', point)).order_by('distance') distances = ( 2.99091995527296, 5.33507274054713, 9.33852187483721, 9.91769193646233, 11.556465744884, 14.713098433352, 34.3635252198568, 276.987855073372, ) for city, expected_distance in zip(qs, distances): with self.subTest(city=city): self.assertAlmostEqual(city.distance, expected_distance) @skipUnlessDBFeature("has_Intersection_function") def test_intersection(self): geom = Point(5, 23, srid=4326) qs = Country.objects.annotate(inter=functions.Intersection('mpoly', geom)) for c in qs: if spatialite or (mysql and not connection.features.supports_empty_geometry_collection) or oracle: # When the intersection is empty, some databases return None. expected = None else: expected = c.mpoly.intersection(geom) self.assertEqual(c.inter, expected) @skipUnlessDBFeature("has_IsValid_function") def test_isvalid(self): valid_geom = fromstr('POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))') invalid_geom = fromstr('POLYGON((0 0, 0 1, 1 1, 1 0, 1 1, 1 0, 0 0))') State.objects.create(name='valid', poly=valid_geom) State.objects.create(name='invalid', poly=invalid_geom) valid = State.objects.filter(name='valid').annotate(isvalid=functions.IsValid('poly')).first() invalid = State.objects.filter(name='invalid').annotate(isvalid=functions.IsValid('poly')).first() self.assertIs(valid.isvalid, True) self.assertIs(invalid.isvalid, False) @skipUnlessDBFeature("has_Area_function") def test_area_with_regular_aggregate(self): # Create projected country objects, for this test to work on all backends. for c in Country.objects.all(): CountryWebMercator.objects.create(name=c.name, mpoly=c.mpoly.transform(3857, clone=True)) # Test in projected coordinate system qs = CountryWebMercator.objects.annotate(area_sum=Sum(functions.Area('mpoly'))) # Some backends (e.g. Oracle) cannot group by multipolygon values, so # defer such fields in the aggregation query. for c in qs.defer('mpoly'): result = c.area_sum # If the result is a measure object, get value. if isinstance(result, Area): result = result.sq_m self.assertAlmostEqual((result - c.mpoly.area) / c.mpoly.area, 0) @skipUnlessDBFeature("has_Area_function") def test_area_lookups(self): # Create projected countries so the test works on all backends. CountryWebMercator.objects.bulk_create( CountryWebMercator(name=c.name, mpoly=c.mpoly.transform(3857, clone=True)) for c in Country.objects.all() ) qs = CountryWebMercator.objects.annotate(area=functions.Area('mpoly')) self.assertEqual(qs.get(area__lt=Area(sq_km=500000)), CountryWebMercator.objects.get(name='New Zealand')) with self.assertRaisesMessage(ValueError, 'AreaField only accepts Area measurement objects.'): qs.get(area__lt=500000) @skipUnlessDBFeature("has_LineLocatePoint_function") def test_line_locate_point(self): pos_expr = functions.LineLocatePoint(LineString((0, 0), (0, 3), srid=4326), Point(0, 1, srid=4326)) self.assertAlmostEqual(State.objects.annotate(pos=pos_expr).first().pos, 0.3333333) @skipUnlessDBFeature("has_MakeValid_function") def test_make_valid(self): invalid_geom = fromstr('POLYGON((0 0, 0 1, 1 1, 1 0, 1 1, 1 0, 0 0))') State.objects.create(name='invalid', poly=invalid_geom) invalid = State.objects.filter(name='invalid').annotate(repaired=functions.MakeValid('poly')).first() self.assertIs(invalid.repaired.valid, True) self.assertEqual(invalid.repaired, fromstr('POLYGON((0 0, 0 1, 1 1, 1 0, 0 0))', srid=invalid.poly.srid)) @skipUnlessDBFeature('has_MakeValid_function') def test_make_valid_multipolygon(self): invalid_geom = fromstr( 'POLYGON((0 0, 0 1 , 1 1 , 1 0, 0 0), ' '(10 0, 10 1, 11 1, 11 0, 10 0))' ) State.objects.create(name='invalid', poly=invalid_geom) invalid = State.objects.filter(name='invalid').annotate( repaired=functions.MakeValid('poly'), ).get() self.assertIs(invalid.repaired.valid, True) self.assertEqual(invalid.repaired, fromstr( 'MULTIPOLYGON (((0 0, 0 1, 1 1, 1 0, 0 0)), ' '((10 0, 10 1, 11 1, 11 0, 10 0)))', srid=invalid.poly.srid, )) self.assertEqual(len(invalid.repaired), 2) @skipUnlessDBFeature('has_MakeValid_function') def test_make_valid_output_field(self): # output_field is GeometryField instance because different geometry # types can be returned. output_field = functions.MakeValid( Value(Polygon(), PolygonField(srid=42)), ).output_field self.assertIs(output_field.__class__, GeometryField) self.assertEqual(output_field.srid, 42) @skipUnlessDBFeature("has_MemSize_function") def test_memsize(self): ptown = City.objects.annotate(size=functions.MemSize('point')).get(name='Pueblo') self.assertTrue(20 <= ptown.size <= 40) # Exact value may depend on PostGIS version @skipUnlessDBFeature("has_NumGeom_function") def test_num_geom(self): # Both 'countries' only have two geometries. for c in Country.objects.annotate(num_geom=functions.NumGeometries('mpoly')): self.assertEqual(2, c.num_geom) qs = City.objects.filter(point__isnull=False).annotate(num_geom=functions.NumGeometries('point')) for city in qs: # Oracle and PostGIS return 1 for the number of geometries on # non-collections, whereas MySQL returns None. if mysql: self.assertIsNone(city.num_geom) else: self.assertEqual(1, city.num_geom) @skipUnlessDBFeature("has_NumPoint_function") def test_num_points(self): coords = [(-95.363151, 29.763374), (-95.448601, 29.713803)] Track.objects.create(name='Foo', line=LineString(coords)) qs = Track.objects.annotate(num_points=functions.NumPoints('line')) self.assertEqual(qs.first().num_points, 2) mpoly_qs = Country.objects.annotate(num_points=functions.NumPoints('mpoly')) if not connection.features.supports_num_points_poly: for c in mpoly_qs: self.assertIsNone(c.num_points) return for c in mpoly_qs: self.assertEqual(c.mpoly.num_points, c.num_points) for c in City.objects.annotate(num_points=functions.NumPoints('point')): self.assertEqual(c.num_points, 1) @skipUnlessDBFeature("has_PointOnSurface_function") def test_point_on_surface(self): qs = Country.objects.annotate(point_on_surface=functions.PointOnSurface('mpoly')) for country in qs: self.assertTrue(country.mpoly.intersection(country.point_on_surface)) @skipUnlessDBFeature("has_Reverse_function") def test_reverse_geom(self): coords = [(-95.363151, 29.763374), (-95.448601, 29.713803)] Track.objects.create(name='Foo', line=LineString(coords)) track = Track.objects.annotate(reverse_geom=functions.Reverse('line')).get(name='Foo') coords.reverse() self.assertEqual(tuple(coords), track.reverse_geom.coords) @skipUnlessDBFeature("has_Scale_function") def test_scale(self): xfac, yfac = 2, 3 tol = 5 # The low precision tolerance is for SpatiaLite qs = Country.objects.annotate(scaled=functions.Scale('mpoly', xfac, yfac)) for country in qs: for p1, p2 in zip(country.mpoly, country.scaled): for r1, r2 in zip(p1, p2): for c1, c2 in zip(r1.coords, r2.coords): self.assertAlmostEqual(c1[0] * xfac, c2[0], tol) self.assertAlmostEqual(c1[1] * yfac, c2[1], tol) # Test float/Decimal values qs = Country.objects.annotate(scaled=functions.Scale('mpoly', 1.5, Decimal('2.5'))) self.assertGreater(qs[0].scaled.area, qs[0].mpoly.area) @skipUnlessDBFeature("has_SnapToGrid_function") def test_snap_to_grid(self): # Let's try and break snap_to_grid() with bad combinations of arguments. for bad_args in ((), range(3), range(5)): with self.assertRaises(ValueError): Country.objects.annotate(snap=functions.SnapToGrid('mpoly', *bad_args)) for bad_args in (('1.0',), (1.0, None), tuple(map(str, range(4)))): with self.assertRaises(TypeError): Country.objects.annotate(snap=functions.SnapToGrid('mpoly', *bad_args)) # Boundary for San Marino, courtesy of Bjorn Sandvik of thematicmapping.org # from the world borders dataset he provides. wkt = ('MULTIPOLYGON(((12.41580 43.95795,12.45055 43.97972,12.45389 43.98167,' '12.46250 43.98472,12.47167 43.98694,12.49278 43.98917,' '12.50555 43.98861,12.51000 43.98694,12.51028 43.98277,' '12.51167 43.94333,12.51056 43.93916,12.49639 43.92333,' '12.49500 43.91472,12.48778 43.90583,12.47444 43.89722,' '12.46472 43.89555,12.45917 43.89611,12.41639 43.90472,' '12.41222 43.90610,12.40782 43.91366,12.40389 43.92667,' '12.40500 43.94833,12.40889 43.95499,12.41580 43.95795)))') Country.objects.create(name='San Marino', mpoly=fromstr(wkt)) # Because floating-point arithmetic isn't exact, we set a tolerance # to pass into GEOS `equals_exact`. tol = 0.000000001 # SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.1)) FROM "geoapp_country" # WHERE "geoapp_country"."name" = 'San Marino'; ref = fromstr('MULTIPOLYGON(((12.4 44,12.5 44,12.5 43.9,12.4 43.9,12.4 44)))') self.assertTrue( ref.equals_exact( Country.objects.annotate( snap=functions.SnapToGrid('mpoly', 0.1) ).get(name='San Marino').snap, tol ) ) # SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.05, 0.23)) FROM "geoapp_country" # WHERE "geoapp_country"."name" = 'San Marino'; ref = fromstr('MULTIPOLYGON(((12.4 43.93,12.45 43.93,12.5 43.93,12.45 43.93,12.4 43.93)))') self.assertTrue( ref.equals_exact( Country.objects.annotate( snap=functions.SnapToGrid('mpoly', 0.05, 0.23) ).get(name='San Marino').snap, tol ) ) # SELECT AsText(ST_SnapToGrid("geoapp_country"."mpoly", 0.5, 0.17, 0.05, 0.23)) FROM "geoapp_country" # WHERE "geoapp_country"."name" = 'San Marino'; ref = fromstr( 'MULTIPOLYGON(((12.4 43.87,12.45 43.87,12.45 44.1,12.5 44.1,12.5 43.87,12.45 43.87,12.4 43.87)))' ) self.assertTrue( ref.equals_exact( Country.objects.annotate( snap=functions.SnapToGrid('mpoly', 0.05, 0.23, 0.5, 0.17) ).get(name='San Marino').snap, tol ) ) @skipUnlessDBFeature("has_SymDifference_function") def test_sym_difference(self): geom = Point(5, 23, srid=4326) qs = Country.objects.annotate(sym_difference=functions.SymDifference('mpoly', geom)) # Oracle does something screwy with the Texas geometry. if oracle: qs = qs.exclude(name='Texas') for country in qs: self.assertTrue(country.mpoly.sym_difference(geom).equals(country.sym_difference)) @skipUnlessDBFeature("has_Transform_function") def test_transform(self): # Pre-transformed points for Houston and Pueblo. ptown = fromstr('POINT(992363.390841912 481455.395105533)', srid=2774) prec = 3 # Precision is low due to version variations in PROJ and GDAL. # Asserting the result of the transform operation with the values in # the pre-transformed points. h = City.objects.annotate(pt=functions.Transform('point', ptown.srid)).get(name='Pueblo') self.assertEqual(2774, h.pt.srid) self.assertAlmostEqual(ptown.x, h.pt.x, prec) self.assertAlmostEqual(ptown.y, h.pt.y, prec) @skipUnlessDBFeature("has_Translate_function") def test_translate(self): xfac, yfac = 5, -23 qs = Country.objects.annotate(translated=functions.Translate('mpoly', xfac, yfac)) for c in qs: for p1, p2 in zip(c.mpoly, c.translated): for r1, r2 in zip(p1, p2): for c1, c2 in zip(r1.coords, r2.coords): # The low precision is for SpatiaLite self.assertAlmostEqual(c1[0] + xfac, c2[0], 5) self.assertAlmostEqual(c1[1] + yfac, c2[1], 5) # Some combined function tests @skipUnlessDBFeature( "has_Difference_function", "has_Intersection_function", "has_SymDifference_function", "has_Union_function") def test_diff_intersection_union(self): geom = Point(5, 23, srid=4326) qs = Country.objects.all().annotate( difference=functions.Difference('mpoly', geom), sym_difference=functions.SymDifference('mpoly', geom), union=functions.Union('mpoly', geom), intersection=functions.Intersection('mpoly', geom), ) if oracle: # Should be able to execute the queries; however, they won't be the same # as GEOS (because Oracle doesn't use GEOS internally like PostGIS or # SpatiaLite). return for c in qs: self.assertTrue(c.mpoly.difference(geom).equals(c.difference)) if not (spatialite or mysql): self.assertEqual(c.mpoly.intersection(geom), c.intersection) self.assertTrue(c.mpoly.sym_difference(geom).equals(c.sym_difference)) self.assertTrue(c.mpoly.union(geom).equals(c.union)) @skipUnlessDBFeature("has_Union_function") def test_union(self): """Union with all combinations of geometries/geometry fields.""" geom = Point(-95.363151, 29.763374, srid=4326) union = City.objects.annotate(union=functions.Union('point', geom)).get(name='Dallas').union expected = fromstr('MULTIPOINT(-96.801611 32.782057,-95.363151 29.763374)', srid=4326) self.assertTrue(expected.equals(union)) union = City.objects.annotate(union=functions.Union(geom, 'point')).get(name='Dallas').union self.assertTrue(expected.equals(union)) union = City.objects.annotate(union=functions.Union('point', 'point')).get(name='Dallas').union expected = GEOSGeometry('POINT(-96.801611 32.782057)', srid=4326) self.assertTrue(expected.equals(union)) union = City.objects.annotate(union=functions.Union(geom, geom)).get(name='Dallas').union self.assertTrue(geom.equals(union)) @skipUnlessDBFeature("has_Union_function", "has_Transform_function") def test_union_mixed_srid(self): """The result SRID depends on the order of parameters.""" geom = Point(61.42915, 55.15402, srid=4326) geom_3857 = geom.transform(3857, clone=True) tol = 0.001 for city in City.objects.annotate(union=functions.Union('point', geom_3857)): expected = city.point | geom self.assertTrue(city.union.equals_exact(expected, tol)) self.assertEqual(city.union.srid, 4326) for city in City.objects.annotate(union=functions.Union(geom_3857, 'point')): expected = geom_3857 | city.point.transform(3857, clone=True) self.assertTrue(expected.equals_exact(city.union, tol)) self.assertEqual(city.union.srid, 3857) def test_argument_validation(self): with self.assertRaisesMessage(ValueError, 'SRID is required for all geometries.'): City.objects.annotate(geo=functions.GeoFunc(Point(1, 1))) msg = 'GeoFunc function requires a GeometryField in position 1, got CharField.' with self.assertRaisesMessage(TypeError, msg): City.objects.annotate(geo=functions.GeoFunc('name')) msg = 'GeoFunc function requires a geometric argument in position 1.' with self.assertRaisesMessage(TypeError, msg): City.objects.annotate(union=functions.GeoFunc(1, 'point')).get(name='Dallas')
85fbd6da91a9859f8672e78ce14ea390ef30755f0ec4bfc9a7dcd35004cff484
import os import shutil import struct import tempfile from django.contrib.gis.gdal import GDAL_VERSION, GDALRaster, SpatialReference from django.contrib.gis.gdal.error import GDALException from django.contrib.gis.gdal.raster.band import GDALBand from django.contrib.gis.shortcuts import numpy from django.test import SimpleTestCase from ..data.rasters.textrasters import JSON_RASTER class GDALRasterTests(SimpleTestCase): """ Test a GDALRaster instance created from a file (GeoTiff). """ def setUp(self): self.rs_path = os.path.join(os.path.dirname(__file__), '../data/rasters/raster.tif') self.rs = GDALRaster(self.rs_path) def test_rs_name_repr(self): self.assertEqual(self.rs_path, self.rs.name) self.assertRegex(repr(self.rs), r"<Raster object at 0x\w+>") def test_rs_driver(self): self.assertEqual(self.rs.driver.name, 'GTiff') def test_rs_size(self): self.assertEqual(self.rs.width, 163) self.assertEqual(self.rs.height, 174) def test_rs_srs(self): self.assertEqual(self.rs.srs.srid, 3086) self.assertEqual(self.rs.srs.units, (1.0, 'metre')) def test_rs_srid(self): rast = GDALRaster({ 'width': 16, 'height': 16, 'srid': 4326, }) self.assertEqual(rast.srid, 4326) rast.srid = 3086 self.assertEqual(rast.srid, 3086) def test_geotransform_and_friends(self): # Assert correct values for file based raster self.assertEqual( self.rs.geotransform, [511700.4680706557, 100.0, 0.0, 435103.3771231986, 0.0, -100.0] ) self.assertEqual(self.rs.origin, [511700.4680706557, 435103.3771231986]) self.assertEqual(self.rs.origin.x, 511700.4680706557) self.assertEqual(self.rs.origin.y, 435103.3771231986) self.assertEqual(self.rs.scale, [100.0, -100.0]) self.assertEqual(self.rs.scale.x, 100.0) self.assertEqual(self.rs.scale.y, -100.0) self.assertEqual(self.rs.skew, [0, 0]) self.assertEqual(self.rs.skew.x, 0) self.assertEqual(self.rs.skew.y, 0) # Create in-memory rasters and change gtvalues rsmem = GDALRaster(JSON_RASTER) # geotransform accepts both floats and ints rsmem.geotransform = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0] self.assertEqual(rsmem.geotransform, [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) rsmem.geotransform = range(6) self.assertEqual(rsmem.geotransform, [float(x) for x in range(6)]) self.assertEqual(rsmem.origin, [0, 3]) self.assertEqual(rsmem.origin.x, 0) self.assertEqual(rsmem.origin.y, 3) self.assertEqual(rsmem.scale, [1, 5]) self.assertEqual(rsmem.scale.x, 1) self.assertEqual(rsmem.scale.y, 5) self.assertEqual(rsmem.skew, [2, 4]) self.assertEqual(rsmem.skew.x, 2) self.assertEqual(rsmem.skew.y, 4) self.assertEqual(rsmem.width, 5) self.assertEqual(rsmem.height, 5) def test_geotransform_bad_inputs(self): rsmem = GDALRaster(JSON_RASTER) error_geotransforms = [ [1, 2], [1, 2, 3, 4, 5, 'foo'], [1, 2, 3, 4, 5, 6, 'foo'], ] msg = 'Geotransform must consist of 6 numeric values.' for geotransform in error_geotransforms: with self.subTest(i=geotransform), self.assertRaisesMessage(ValueError, msg): rsmem.geotransform = geotransform def test_rs_extent(self): self.assertEqual( self.rs.extent, (511700.4680706557, 417703.3771231986, 528000.4680706557, 435103.3771231986) ) def test_rs_bands(self): self.assertEqual(len(self.rs.bands), 1) self.assertIsInstance(self.rs.bands[0], GDALBand) def test_memory_based_raster_creation(self): # Create uint8 raster with full pixel data range (0-255) rast = GDALRaster({ 'datatype': 1, 'width': 16, 'height': 16, 'srid': 4326, 'bands': [{ 'data': range(256), 'nodata_value': 255, }], }) # Get array from raster result = rast.bands[0].data() if numpy: result = result.flatten().tolist() # Assert data is same as original input self.assertEqual(result, list(range(256))) def test_file_based_raster_creation(self): # Prepare tempfile rstfile = tempfile.NamedTemporaryFile(suffix='.tif') # Create file-based raster from scratch GDALRaster({ 'datatype': self.rs.bands[0].datatype(), 'driver': 'tif', 'name': rstfile.name, 'width': 163, 'height': 174, 'nr_of_bands': 1, 'srid': self.rs.srs.wkt, 'origin': (self.rs.origin.x, self.rs.origin.y), 'scale': (self.rs.scale.x, self.rs.scale.y), 'skew': (self.rs.skew.x, self.rs.skew.y), 'bands': [{ 'data': self.rs.bands[0].data(), 'nodata_value': self.rs.bands[0].nodata_value, }], }) # Reload newly created raster from file restored_raster = GDALRaster(rstfile.name) # Presence of TOWGS84 depend on GDAL/Proj versions. self.assertEqual( restored_raster.srs.wkt.replace('TOWGS84[0,0,0,0,0,0,0],', ''), self.rs.srs.wkt.replace('TOWGS84[0,0,0,0,0,0,0],', '') ) self.assertEqual(restored_raster.geotransform, self.rs.geotransform) if numpy: numpy.testing.assert_equal( restored_raster.bands[0].data(), self.rs.bands[0].data() ) else: self.assertEqual(restored_raster.bands[0].data(), self.rs.bands[0].data()) def test_nonexistent_file(self): msg = 'Unable to read raster source input "nonexistent.tif".' with self.assertRaisesMessage(GDALException, msg): GDALRaster('nonexistent.tif') def test_vsi_raster_creation(self): # Open a raster as a file object. with open(self.rs_path, 'rb') as dat: # Instantiate a raster from the file binary buffer. vsimem = GDALRaster(dat.read()) # The data of the in-memory file is equal to the source file. result = vsimem.bands[0].data() target = self.rs.bands[0].data() if numpy: result = result.flatten().tolist() target = target.flatten().tolist() self.assertEqual(result, target) def test_vsi_raster_deletion(self): path = '/vsimem/raster.tif' # Create a vsi-based raster from scratch. vsimem = GDALRaster({ 'name': path, 'driver': 'tif', 'width': 4, 'height': 4, 'srid': 4326, 'bands': [{ 'data': range(16), }], }) # The virtual file exists. rst = GDALRaster(path) self.assertEqual(rst.width, 4) # Delete GDALRaster. del vsimem del rst # The virtual file has been removed. msg = 'Could not open the datasource at "/vsimem/raster.tif"' with self.assertRaisesMessage(GDALException, msg): GDALRaster(path) def test_vsi_invalid_buffer_error(self): msg = 'Failed creating VSI raster from the input buffer.' with self.assertRaisesMessage(GDALException, msg): GDALRaster(b'not-a-raster-buffer') def test_vsi_buffer_property(self): # Create a vsi-based raster from scratch. rast = GDALRaster({ 'name': '/vsimem/raster.tif', 'driver': 'tif', 'width': 4, 'height': 4, 'srid': 4326, 'bands': [{ 'data': range(16), }], }) # Do a round trip from raster to buffer to raster. result = GDALRaster(rast.vsi_buffer).bands[0].data() if numpy: result = result.flatten().tolist() # Band data is equal to nodata value except on input block of ones. self.assertEqual(result, list(range(16))) # The vsi buffer is None for rasters that are not vsi based. self.assertIsNone(self.rs.vsi_buffer) def test_offset_size_and_shape_on_raster_creation(self): rast = GDALRaster({ 'datatype': 1, 'width': 4, 'height': 4, 'srid': 4326, 'bands': [{ 'data': (1,), 'offset': (1, 1), 'size': (2, 2), 'shape': (1, 1), 'nodata_value': 2, }], }) # Get array from raster. result = rast.bands[0].data() if numpy: result = result.flatten().tolist() # Band data is equal to nodata value except on input block of ones. self.assertEqual( result, [2, 2, 2, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 2, 2, 2] ) def test_set_nodata_value_on_raster_creation(self): # Create raster filled with nodata values. rast = GDALRaster({ 'datatype': 1, 'width': 2, 'height': 2, 'srid': 4326, 'bands': [{'nodata_value': 23}], }) # Get array from raster. result = rast.bands[0].data() if numpy: result = result.flatten().tolist() # All band data is equal to nodata value. self.assertEqual(result, [23] * 4) def test_set_nodata_none_on_raster_creation(self): if GDAL_VERSION < (2, 1): self.skipTest("GDAL >= 2.1 is required for this test.") # Create raster without data and without nodata value. rast = GDALRaster({ 'datatype': 1, 'width': 2, 'height': 2, 'srid': 4326, 'bands': [{'nodata_value': None}], }) # Get array from raster. result = rast.bands[0].data() if numpy: result = result.flatten().tolist() # Band data is equal to zero because no nodata value has been specified. self.assertEqual(result, [0] * 4) def test_raster_metadata_property(self): data = self.rs.metadata self.assertEqual(data['DEFAULT'], {'AREA_OR_POINT': 'Area'}) self.assertEqual(data['IMAGE_STRUCTURE'], {'INTERLEAVE': 'BAND'}) # Create file-based raster from scratch source = GDALRaster({ 'datatype': 1, 'width': 2, 'height': 2, 'srid': 4326, 'bands': [{'data': range(4), 'nodata_value': 99}], }) # Set metadata on raster and on a band. metadata = { 'DEFAULT': {'OWNER': 'Django', 'VERSION': '1.0', 'AREA_OR_POINT': 'Point'}, } source.metadata = metadata source.bands[0].metadata = metadata self.assertEqual(source.metadata['DEFAULT'], metadata['DEFAULT']) self.assertEqual(source.bands[0].metadata['DEFAULT'], metadata['DEFAULT']) # Update metadata on raster. metadata = { 'DEFAULT': {'VERSION': '2.0'}, } source.metadata = metadata self.assertEqual(source.metadata['DEFAULT']['VERSION'], '2.0') # Remove metadata on raster. metadata = { 'DEFAULT': {'OWNER': None}, } source.metadata = metadata self.assertNotIn('OWNER', source.metadata['DEFAULT']) def test_raster_info_accessor(self): if GDAL_VERSION < (2, 1): msg = 'GDAL ≥ 2.1 is required for using the info property.' with self.assertRaisesMessage(ValueError, msg): self.rs.info return infos = self.rs.info # Data info_lines = [line.strip() for line in infos.split('\n') if line.strip() != ''] for line in [ 'Driver: GTiff/GeoTIFF', 'Files: {}'.format(self.rs_path), 'Size is 163, 174', 'Origin = (511700.468070655711927,435103.377123198588379)', 'Pixel Size = (100.000000000000000,-100.000000000000000)', 'Metadata:', 'AREA_OR_POINT=Area', 'Image Structure Metadata:', 'INTERLEAVE=BAND', 'Band 1 Block=163x50 Type=Byte, ColorInterp=Gray', 'NoData Value=15' ]: self.assertIn(line, info_lines) for line in [ r'Upper Left \( 511700.468, 435103.377\) \( 82d51\'46.1\d"W, 27d55\' 1.5\d"N\)', r'Lower Left \( 511700.468, 417703.377\) \( 82d51\'52.0\d"W, 27d45\'37.5\d"N\)', r'Upper Right \( 528000.468, 435103.377\) \( 82d41\'48.8\d"W, 27d54\'56.3\d"N\)', r'Lower Right \( 528000.468, 417703.377\) \( 82d41\'55.5\d"W, 27d45\'32.2\d"N\)', r'Center \( 519850.468, 426403.377\) \( 82d46\'50.6\d"W, 27d50\'16.9\d"N\)', ]: self.assertRegex(infos, line) # CRS (skip the name because string depends on the GDAL/Proj versions). self.assertIn("NAD83 / Florida GDL Albers", infos) def test_compressed_file_based_raster_creation(self): rstfile = tempfile.NamedTemporaryFile(suffix='.tif') # Make a compressed copy of an existing raster. compressed = self.rs.warp({'papsz_options': {'compress': 'packbits'}, 'name': rstfile.name}) # Check physically if compression worked. self.assertLess(os.path.getsize(compressed.name), os.path.getsize(self.rs.name)) # Create file-based raster with options from scratch. compressed = GDALRaster({ 'datatype': 1, 'driver': 'tif', 'name': rstfile.name, 'width': 40, 'height': 40, 'srid': 3086, 'origin': (500000, 400000), 'scale': (100, -100), 'skew': (0, 0), 'bands': [{ 'data': range(40 ^ 2), 'nodata_value': 255, }], 'papsz_options': { 'compress': 'packbits', 'pixeltype': 'signedbyte', 'blockxsize': 23, 'blockysize': 23, } }) # Check if options used on creation are stored in metadata. # Reopening the raster ensures that all metadata has been written # to the file. compressed = GDALRaster(compressed.name) self.assertEqual(compressed.metadata['IMAGE_STRUCTURE']['COMPRESSION'], 'PACKBITS',) self.assertEqual(compressed.bands[0].metadata['IMAGE_STRUCTURE']['PIXELTYPE'], 'SIGNEDBYTE') if GDAL_VERSION >= (2, 1): self.assertIn('Block=40x23', compressed.info) def test_raster_warp(self): # Create in memory raster source = GDALRaster({ 'datatype': 1, 'driver': 'MEM', 'name': 'sourceraster', 'width': 4, 'height': 4, 'nr_of_bands': 1, 'srid': 3086, 'origin': (500000, 400000), 'scale': (100, -100), 'skew': (0, 0), 'bands': [{ 'data': range(16), 'nodata_value': 255, }], }) # Test altering the scale, width, and height of a raster data = { 'scale': [200, -200], 'width': 2, 'height': 2, } target = source.warp(data) self.assertEqual(target.width, data['width']) self.assertEqual(target.height, data['height']) self.assertEqual(target.scale, data['scale']) self.assertEqual(target.bands[0].datatype(), source.bands[0].datatype()) self.assertEqual(target.name, 'sourceraster_copy.MEM') result = target.bands[0].data() if numpy: result = result.flatten().tolist() self.assertEqual(result, [5, 7, 13, 15]) # Test altering the name and datatype (to float) data = { 'name': '/path/to/targetraster.tif', 'datatype': 6, } target = source.warp(data) self.assertEqual(target.bands[0].datatype(), 6) self.assertEqual(target.name, '/path/to/targetraster.tif') self.assertEqual(target.driver.name, 'MEM') result = target.bands[0].data() if numpy: result = result.flatten().tolist() self.assertEqual( result, [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0] ) def test_raster_warp_nodata_zone(self): # Create in memory raster. source = GDALRaster({ 'datatype': 1, 'driver': 'MEM', 'width': 4, 'height': 4, 'srid': 3086, 'origin': (500000, 400000), 'scale': (100, -100), 'skew': (0, 0), 'bands': [{ 'data': range(16), 'nodata_value': 23, }], }) # Warp raster onto a location that does not cover any pixels of the original. result = source.warp({'origin': (200000, 200000)}).bands[0].data() if numpy: result = result.flatten().tolist() # The result is an empty raster filled with the correct nodata value. self.assertEqual(result, [23] * 16) def test_raster_transform(self): tests = [ 3086, '3086', SpatialReference(3086), ] for srs in tests: with self.subTest(srs=srs): # Prepare tempfile and nodata value. rstfile = tempfile.NamedTemporaryFile(suffix='.tif') ndv = 99 # Create in file based raster. source = GDALRaster({ 'datatype': 1, 'driver': 'tif', 'name': rstfile.name, 'width': 5, 'height': 5, 'nr_of_bands': 1, 'srid': 4326, 'origin': (-5, 5), 'scale': (2, -2), 'skew': (0, 0), 'bands': [{ 'data': range(25), 'nodata_value': ndv, }], }) target = source.transform(srs) # Reload data from disk. target = GDALRaster(target.name) self.assertEqual(target.srs.srid, 3086) self.assertEqual(target.width, 7) self.assertEqual(target.height, 7) self.assertEqual(target.bands[0].datatype(), source.bands[0].datatype()) self.assertAlmostEqual(target.origin[0], 9124842.791079799, 3) self.assertAlmostEqual(target.origin[1], 1589911.6476407414, 3) self.assertAlmostEqual(target.scale[0], 223824.82664250192, 3) self.assertAlmostEqual(target.scale[1], -223824.82664250192, 3) self.assertEqual(target.skew, [0, 0]) result = target.bands[0].data() if numpy: result = result.flatten().tolist() # The reprojection of a raster that spans over a large area # skews the data matrix and might introduce nodata values. self.assertEqual( result, [ ndv, ndv, ndv, ndv, 4, ndv, ndv, ndv, ndv, 2, 3, 9, ndv, ndv, ndv, 1, 2, 8, 13, 19, ndv, 0, 6, 6, 12, 18, 18, 24, ndv, 10, 11, 16, 22, 23, ndv, ndv, ndv, 15, 21, 22, ndv, ndv, ndv, ndv, 20, ndv, ndv, ndv, ndv, ], ) class GDALBandTests(SimpleTestCase): rs_path = os.path.join(os.path.dirname(__file__), '../data/rasters/raster.tif') def test_band_data(self): rs = GDALRaster(self.rs_path) band = rs.bands[0] self.assertEqual(band.width, 163) self.assertEqual(band.height, 174) self.assertEqual(band.description, '') self.assertEqual(band.datatype(), 1) self.assertEqual(band.datatype(as_string=True), 'GDT_Byte') self.assertEqual(band.color_interp(), 1) self.assertEqual(band.color_interp(as_string=True), 'GCI_GrayIndex') self.assertEqual(band.nodata_value, 15) if numpy: data = band.data() assert_array = numpy.loadtxt( os.path.join(os.path.dirname(__file__), '../data/rasters/raster.numpy.txt') ) numpy.testing.assert_equal(data, assert_array) self.assertEqual(data.shape, (band.height, band.width)) def test_band_statistics(self): with tempfile.TemporaryDirectory() as tmp_dir: rs_path = os.path.join(tmp_dir, 'raster.tif') shutil.copyfile(self.rs_path, rs_path) rs = GDALRaster(rs_path) band = rs.bands[0] pam_file = rs_path + '.aux.xml' smin, smax, smean, sstd = band.statistics(approximate=True) self.assertEqual(smin, 0) self.assertEqual(smax, 9) self.assertAlmostEqual(smean, 2.842331288343558) self.assertAlmostEqual(sstd, 2.3965567248965356) smin, smax, smean, sstd = band.statistics(approximate=False, refresh=True) self.assertEqual(smin, 0) self.assertEqual(smax, 9) self.assertAlmostEqual(smean, 2.828326634228898) self.assertAlmostEqual(sstd, 2.4260526986669095) self.assertEqual(band.min, 0) self.assertEqual(band.max, 9) self.assertAlmostEqual(band.mean, 2.828326634228898) self.assertAlmostEqual(band.std, 2.4260526986669095) # Statistics are persisted into PAM file on band close rs = band = None self.assertTrue(os.path.isfile(pam_file)) def test_read_mode_error(self): # Open raster in read mode rs = GDALRaster(self.rs_path, write=False) band = rs.bands[0] # Setting attributes in write mode raises exception in the _flush method with self.assertRaises(GDALException): setattr(band, 'nodata_value', 10) def test_band_data_setters(self): # Create in-memory raster and get band rsmem = GDALRaster({ 'datatype': 1, 'driver': 'MEM', 'name': 'mem_rst', 'width': 10, 'height': 10, 'nr_of_bands': 1, 'srid': 4326, }) bandmem = rsmem.bands[0] # Set nodata value bandmem.nodata_value = 99 self.assertEqual(bandmem.nodata_value, 99) # Set data for entire dataset bandmem.data(range(100)) if numpy: numpy.testing.assert_equal(bandmem.data(), numpy.arange(100).reshape(10, 10)) else: self.assertEqual(bandmem.data(), list(range(100))) # Prepare data for setting values in subsequent tests block = list(range(100, 104)) packed_block = struct.pack('<' + 'B B B B', *block) # Set data from list bandmem.data(block, (1, 1), (2, 2)) result = bandmem.data(offset=(1, 1), size=(2, 2)) if numpy: numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2)) else: self.assertEqual(result, block) # Set data from packed block bandmem.data(packed_block, (1, 1), (2, 2)) result = bandmem.data(offset=(1, 1), size=(2, 2)) if numpy: numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2)) else: self.assertEqual(result, block) # Set data from bytes bandmem.data(bytes(packed_block), (1, 1), (2, 2)) result = bandmem.data(offset=(1, 1), size=(2, 2)) if numpy: numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2)) else: self.assertEqual(result, block) # Set data from bytearray bandmem.data(bytearray(packed_block), (1, 1), (2, 2)) result = bandmem.data(offset=(1, 1), size=(2, 2)) if numpy: numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2)) else: self.assertEqual(result, block) # Set data from memoryview bandmem.data(memoryview(packed_block), (1, 1), (2, 2)) result = bandmem.data(offset=(1, 1), size=(2, 2)) if numpy: numpy.testing.assert_equal(result, numpy.array(block).reshape(2, 2)) else: self.assertEqual(result, block) # Set data from numpy array if numpy: bandmem.data(numpy.array(block, dtype='int8').reshape(2, 2), (1, 1), (2, 2)) numpy.testing.assert_equal( bandmem.data(offset=(1, 1), size=(2, 2)), numpy.array(block).reshape(2, 2) ) # Test json input data rsmemjson = GDALRaster(JSON_RASTER) bandmemjson = rsmemjson.bands[0] if numpy: numpy.testing.assert_equal( bandmemjson.data(), numpy.array(range(25)).reshape(5, 5) ) else: self.assertEqual(bandmemjson.data(), list(range(25))) def test_band_statistics_automatic_refresh(self): rsmem = GDALRaster({ 'srid': 4326, 'width': 2, 'height': 2, 'bands': [{'data': [0] * 4, 'nodata_value': 99}], }) band = rsmem.bands[0] # Populate statistics cache self.assertEqual(band.statistics(), (0, 0, 0, 0)) # Change data band.data([1, 1, 0, 0]) # Statistics are properly updated self.assertEqual(band.statistics(), (0.0, 1.0, 0.5, 0.5)) # Change nodata_value band.nodata_value = 0 # Statistics are properly updated self.assertEqual(band.statistics(), (1.0, 1.0, 1.0, 0.0)) def test_band_statistics_empty_band(self): rsmem = GDALRaster({ 'srid': 4326, 'width': 1, 'height': 1, 'bands': [{'data': [0], 'nodata_value': 0}], }) self.assertEqual(rsmem.bands[0].statistics(), (None, None, None, None)) def test_band_delete_nodata(self): rsmem = GDALRaster({ 'srid': 4326, 'width': 1, 'height': 1, 'bands': [{'data': [0], 'nodata_value': 1}], }) if GDAL_VERSION < (2, 1): msg = 'GDAL >= 2.1 required to delete nodata values.' with self.assertRaisesMessage(ValueError, msg): rsmem.bands[0].nodata_value = None else: rsmem.bands[0].nodata_value = None self.assertIsNone(rsmem.bands[0].nodata_value) def test_band_data_replication(self): band = GDALRaster({ 'srid': 4326, 'width': 3, 'height': 3, 'bands': [{'data': range(10, 19), 'nodata_value': 0}], }).bands[0] # Variations for input (data, shape, expected result). combos = ( ([1], (1, 1), [1] * 9), (range(3), (1, 3), [0, 0, 0, 1, 1, 1, 2, 2, 2]), (range(3), (3, 1), [0, 1, 2, 0, 1, 2, 0, 1, 2]), ) for combo in combos: band.data(combo[0], shape=combo[1]) if numpy: numpy.testing.assert_equal(band.data(), numpy.array(combo[2]).reshape(3, 3)) else: self.assertEqual(band.data(), list(combo[2]))
3e613bdbfa71ad7d1649c3fc70860a107ae51330c8a4ae79324a0951505b5c8e
import inspect import os import warnings from importlib import import_module from django.core.exceptions import ImproperlyConfigured from django.utils.deprecation import RemovedInDjango41Warning from django.utils.module_loading import import_string, module_has_submodule APPS_MODULE_NAME = 'apps' MODELS_MODULE_NAME = 'models' class AppConfig: """Class representing a Django application and its configuration.""" def __init__(self, app_name, app_module): # Full Python path to the application e.g. 'django.contrib.admin'. self.name = app_name # Root module for the application e.g. <module 'django.contrib.admin' # from 'django/contrib/admin/__init__.py'>. self.module = app_module # Reference to the Apps registry that holds this AppConfig. Set by the # registry when it registers the AppConfig instance. self.apps = None # The following attributes could be defined at the class level in a # subclass, hence the test-and-set pattern. # Last component of the Python path to the application e.g. 'admin'. # This value must be unique across a Django project. if not hasattr(self, 'label'): self.label = app_name.rpartition(".")[2] # Human-readable name for the application e.g. "Admin". if not hasattr(self, 'verbose_name'): self.verbose_name = self.label.title() # Filesystem path to the application directory e.g. # '/path/to/django/contrib/admin'. if not hasattr(self, 'path'): self.path = self._path_from_module(app_module) # Module containing models e.g. <module 'django.contrib.admin.models' # from 'django/contrib/admin/models.py'>. Set by import_models(). # None if the application doesn't have a models module. self.models_module = None # Mapping of lowercase model names to model classes. Initially set to # None to prevent accidental access before import_models() runs. self.models = None def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self.label) def _path_from_module(self, module): """Attempt to determine app's filesystem path from its module.""" # See #21874 for extended discussion of the behavior of this method in # various cases. # Convert paths to list because Python's _NamespacePath doesn't support # indexing. paths = list(getattr(module, '__path__', [])) if len(paths) != 1: filename = getattr(module, '__file__', None) if filename is not None: paths = [os.path.dirname(filename)] else: # For unknown reasons, sometimes the list returned by __path__ # contains duplicates that must be removed (#25246). paths = list(set(paths)) if len(paths) > 1: raise ImproperlyConfigured( "The app module %r has multiple filesystem locations (%r); " "you must configure this app with an AppConfig subclass " "with a 'path' class attribute." % (module, paths)) elif not paths: raise ImproperlyConfigured( "The app module %r has no filesystem location, " "you must configure this app with an AppConfig subclass " "with a 'path' class attribute." % module) return paths[0] @classmethod def create(cls, entry): """ Factory that creates an app config from an entry in INSTALLED_APPS. """ # create() eventually returns app_config_class(app_name, app_module). app_config_class = None app_name = None app_module = None # If import_module succeeds, entry points to the app module. try: app_module = import_module(entry) except Exception: pass else: # If app_module has an apps submodule that defines a single # AppConfig subclass, use it automatically. # To prevent this, an AppConfig subclass can declare a class # variable default = False. # If the apps module defines more than one AppConfig subclass, # the default one can declare default = True. if module_has_submodule(app_module, APPS_MODULE_NAME): mod_path = '%s.%s' % (entry, APPS_MODULE_NAME) mod = import_module(mod_path) # Check if there's exactly one AppConfig candidate, # excluding those that explicitly define default = False. app_configs = [ (name, candidate) for name, candidate in inspect.getmembers(mod, inspect.isclass) if ( issubclass(candidate, cls) and candidate is not cls and getattr(candidate, 'default', True) ) ] if len(app_configs) == 1: app_config_class = app_configs[0][1] app_config_name = '%s.%s' % (mod_path, app_configs[0][0]) else: # Check if there's exactly one AppConfig subclass, # among those that explicitly define default = True. app_configs = [ (name, candidate) for name, candidate in app_configs if getattr(candidate, 'default', False) ] if len(app_configs) > 1: candidates = [repr(name) for name, _ in app_configs] raise RuntimeError( '%r declares more than one default AppConfig: ' '%s.' % (mod_path, ', '.join(candidates)) ) elif len(app_configs) == 1: app_config_class = app_configs[0][1] app_config_name = '%s.%s' % (mod_path, app_configs[0][0]) # If app_module specifies a default_app_config, follow the link. # default_app_config is deprecated, but still takes over the # automatic detection for backwards compatibility during the # deprecation period. try: new_entry = app_module.default_app_config except AttributeError: # Use the default app config class if we didn't find anything. if app_config_class is None: app_config_class = cls app_name = entry else: message = ( '%r defines default_app_config = %r. ' % (entry, new_entry) ) if new_entry == app_config_name: message += ( 'Django now detects this configuration automatically. ' 'You can remove default_app_config.' ) else: message += ( "However, Django's automatic detection picked another " "configuration, %r. You should move the default " "config class to the apps submodule of your " "application and, if this module defines several " "config classes, mark the default one with default = " "True." % app_config_name ) warnings.warn(message, RemovedInDjango41Warning, stacklevel=2) entry = new_entry app_config_class = None # If import_string succeeds, entry is an app config class. if app_config_class is None: try: app_config_class = import_string(entry) except Exception: pass # If both import_module and import_string failed, it means that entry # doesn't have a valid value. if app_module is None and app_config_class is None: # If the last component of entry starts with an uppercase letter, # then it was likely intended to be an app config class; if not, # an app module. Provide a nice error message in both cases. mod_path, _, cls_name = entry.rpartition('.') if mod_path and cls_name[0].isupper(): # We could simply re-trigger the string import exception, but # we're going the extra mile and providing a better error # message for typos in INSTALLED_APPS. # This may raise ImportError, which is the best exception # possible if the module at mod_path cannot be imported. mod = import_module(mod_path) candidates = [ repr(name) for name, candidate in inspect.getmembers(mod, inspect.isclass) if issubclass(candidate, cls) and candidate is not cls ] msg = "Module '%s' does not contain a '%s' class." % (mod_path, cls_name) if candidates: msg += ' Choices are: %s.' % ', '.join(candidates) raise ImportError(msg) else: # Re-trigger the module import exception. import_module(entry) # Check for obvious errors. (This check prevents duck typing, but # it could be removed if it became a problem in practice.) if not issubclass(app_config_class, AppConfig): raise ImproperlyConfigured( "'%s' isn't a subclass of AppConfig." % entry) # Obtain app name here rather than in AppClass.__init__ to keep # all error checking for entries in INSTALLED_APPS in one place. if app_name is None: try: app_name = app_config_class.name except AttributeError: raise ImproperlyConfigured( "'%s' must supply a name attribute." % entry ) # Ensure app_name points to a valid module. try: app_module = import_module(app_name) except ImportError: raise ImproperlyConfigured( "Cannot import '%s'. Check that '%s.%s.name' is correct." % ( app_name, app_config_class.__module__, app_config_class.__qualname__, ) ) # Entry is a path to an app config class. return app_config_class(app_name, app_module) def get_model(self, model_name, require_ready=True): """ Return the model with the given case-insensitive model_name. Raise LookupError if no model exists with this name. """ if require_ready: self.apps.check_models_ready() else: self.apps.check_apps_ready() try: return self.models[model_name.lower()] except KeyError: raise LookupError( "App '%s' doesn't have a '%s' model." % (self.label, model_name)) def get_models(self, include_auto_created=False, include_swapped=False): """ Return an iterable of models. By default, the following models aren't included: - auto-created models for many-to-many relations without an explicit intermediate table, - models that have been swapped out. Set the corresponding keyword argument to True to include such models. Keyword arguments aren't documented; they're a private API. """ self.apps.check_models_ready() for model in self.models.values(): if model._meta.auto_created and not include_auto_created: continue if model._meta.swapped and not include_swapped: continue yield model def import_models(self): # Dictionary of models for this app, primarily maintained in the # 'all_models' attribute of the Apps this AppConfig is attached to. self.models = self.apps.all_models[self.label] if module_has_submodule(self.module, MODELS_MODULE_NAME): models_module_name = '%s.%s' % (self.name, MODELS_MODULE_NAME) self.models_module = import_module(models_module_name) def ready(self): """ Override this method in subclasses to run code when Django starts. """
936f9f44908a9f8e66c2a13897cae04a36d7e9ad8559253d90f5d75acb63d9ee
import calendar import datetime from django.utils.html import avoid_wrapping from django.utils.timezone import is_aware, utc from django.utils.translation import gettext, ngettext_lazy TIME_STRINGS = { 'year': ngettext_lazy('%d year', '%d years'), 'month': ngettext_lazy('%d month', '%d months'), 'week': ngettext_lazy('%d week', '%d weeks'), 'day': ngettext_lazy('%d day', '%d days'), 'hour': ngettext_lazy('%d hour', '%d hours'), 'minute': ngettext_lazy('%d minute', '%d minutes'), } TIMESINCE_CHUNKS = ( (60 * 60 * 24 * 365, 'year'), (60 * 60 * 24 * 30, 'month'), (60 * 60 * 24 * 7, 'week'), (60 * 60 * 24, 'day'), (60 * 60, 'hour'), (60, 'minute'), ) def timesince(d, now=None, reversed=False, time_strings=None, depth=2): """ Take two datetime objects and return the time between d and now as a nicely formatted string, e.g. "10 minutes". If d occurs after now, return "0 minutes". Units used are years, months, weeks, days, hours, and minutes. Seconds and microseconds are ignored. Up to `depth` adjacent units will be displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not. `time_strings` is an optional dict of strings to replace the default TIME_STRINGS dict. `depth` is an optional integer to control the number of adjacent time units returned. Adapted from https://web.archive.org/web/20060617175230/http://blog.natbat.co.uk/archive/2003/Jun/14/time_since """ if time_strings is None: time_strings = TIME_STRINGS if depth <= 0: raise ValueError('depth must be greater than 0.') # Convert datetime.date to datetime.datetime for comparison. if not isinstance(d, datetime.datetime): d = datetime.datetime(d.year, d.month, d.day) if now and not isinstance(now, datetime.datetime): now = datetime.datetime(now.year, now.month, now.day) now = now or datetime.datetime.now(utc if is_aware(d) else None) if reversed: d, now = now, d delta = now - d # Deal with leapyears by subtracing the number of leapdays leapdays = calendar.leapdays(d.year, now.year) if leapdays != 0: if calendar.isleap(d.year): leapdays -= 1 elif calendar.isleap(now.year): leapdays += 1 delta -= datetime.timedelta(leapdays) # ignore microseconds since = delta.days * 24 * 60 * 60 + delta.seconds if since <= 0: # d is in the future compared to now, stop processing. return avoid_wrapping(time_strings['minute'] % 0) for i, (seconds, name) in enumerate(TIMESINCE_CHUNKS): count = since // seconds if count != 0: break else: return avoid_wrapping(time_strings['minute'] % 0) result = [] current_depth = 0 while i < len(TIMESINCE_CHUNKS) and current_depth < depth: seconds, name = TIMESINCE_CHUNKS[i] count = since // seconds if count == 0: break result.append(avoid_wrapping(time_strings[name] % count)) since -= seconds * count current_depth += 1 i += 1 return gettext(', ').join(result) def timeuntil(d, now=None, time_strings=None, depth=2): """ Like timesince, but return a string measuring the time until the given time. """ return timesince(d, now, reversed=True, time_strings=time_strings, depth=depth)
9dcebffbe517b5060cf834fef6cc26409c5e552bcceabf23c079071f205948de
import datetime import json import mimetypes import os import re import sys import time from email.header import Header from http.client import responses from urllib.parse import quote, urlparse from django.conf import settings from django.core import signals, signing from django.core.exceptions import DisallowedRedirect from django.core.serializers.json import DjangoJSONEncoder from django.http.cookie import SimpleCookie from django.utils import timezone from django.utils.encoding import iri_to_uri from django.utils.http import http_date from django.utils.regex_helper import _lazy_re_compile _charset_from_content_type_re = _lazy_re_compile(r';\s*charset=(?P<charset>[^\s;]+)', re.I) class BadHeaderError(ValueError): pass class HttpResponseBase: """ An HTTP response base class with dictionary-accessed headers. This class doesn't handle content. It should not be used directly. Use the HttpResponse and StreamingHttpResponse subclasses instead. """ status_code = 200 def __init__(self, content_type=None, status=None, reason=None, charset=None): # _headers is a mapping of the lowercase name to the original case of # the header (required for working with legacy systems) and the header # value. Both the name of the header and its value are ASCII strings. self._headers = {} self._resource_closers = [] # This parameter is set by the handler. It's necessary to preserve the # historical behavior of request_finished. self._handler_class = None self.cookies = SimpleCookie() self.closed = False if status is not None: try: self.status_code = int(status) except (ValueError, TypeError): raise TypeError('HTTP status code must be an integer.') if not 100 <= self.status_code <= 599: raise ValueError('HTTP status code must be an integer from 100 to 599.') self._reason_phrase = reason self._charset = charset if content_type is None: content_type = 'text/html; charset=%s' % self.charset self['Content-Type'] = content_type @property def reason_phrase(self): if self._reason_phrase is not None: return self._reason_phrase # Leave self._reason_phrase unset in order to use the default # reason phrase for status code. return responses.get(self.status_code, 'Unknown Status Code') @reason_phrase.setter def reason_phrase(self, value): self._reason_phrase = value @property def charset(self): if self._charset is not None: return self._charset content_type = self.get('Content-Type', '') matched = _charset_from_content_type_re.search(content_type) if matched: # Extract the charset and strip its double quotes return matched['charset'].replace('"', '') return settings.DEFAULT_CHARSET @charset.setter def charset(self, value): self._charset = value def serialize_headers(self): """HTTP headers as a bytestring.""" def to_bytes(val, encoding): return val if isinstance(val, bytes) else val.encode(encoding) headers = [ (to_bytes(key, 'ascii') + b': ' + to_bytes(value, 'latin-1')) for key, value in self._headers.values() ] return b'\r\n'.join(headers) __bytes__ = serialize_headers @property def _content_type_for_repr(self): return ', "%s"' % self['Content-Type'] if 'Content-Type' in self else '' def _convert_to_charset(self, value, charset, mime_encode=False): """ Convert headers key/value to ascii/latin-1 native strings. `charset` must be 'ascii' or 'latin-1'. If `mime_encode` is True and `value` can't be represented in the given charset, apply MIME-encoding. """ if not isinstance(value, (bytes, str)): value = str(value) if ((isinstance(value, bytes) and (b'\n' in value or b'\r' in value)) or isinstance(value, str) and ('\n' in value or '\r' in value)): raise BadHeaderError("Header values can't contain newlines (got %r)" % value) try: if isinstance(value, str): # Ensure string is valid in given charset value.encode(charset) else: # Convert bytestring using given charset value = value.decode(charset) except UnicodeError as e: if mime_encode: value = Header(value, 'utf-8', maxlinelen=sys.maxsize).encode() else: e.reason += ', HTTP response headers must be in %s format' % charset raise return value def __setitem__(self, header, value): header = self._convert_to_charset(header, 'ascii') value = self._convert_to_charset(value, 'latin-1', mime_encode=True) self._headers[header.lower()] = (header, value) def __delitem__(self, header): self._headers.pop(header.lower(), False) def __getitem__(self, header): return self._headers[header.lower()][1] def has_header(self, header): """Case-insensitive check for a header.""" return header.lower() in self._headers __contains__ = has_header def items(self): return self._headers.values() def get(self, header, alternate=None): return self._headers.get(header.lower(), (None, alternate))[1] def set_cookie(self, key, value='', max_age=None, expires=None, path='/', domain=None, secure=False, httponly=False, samesite=None): """ Set a cookie. ``expires`` can be: - a string in the correct format, - a naive ``datetime.datetime`` object in UTC, - an aware ``datetime.datetime`` object in any time zone. If it is a ``datetime.datetime`` object then calculate ``max_age``. """ self.cookies[key] = value if expires is not None: if isinstance(expires, datetime.datetime): if timezone.is_aware(expires): expires = timezone.make_naive(expires, timezone.utc) delta = expires - expires.utcnow() # Add one second so the date matches exactly (a fraction of # time gets lost between converting to a timedelta and # then the date string). delta = delta + datetime.timedelta(seconds=1) # Just set max_age - the max_age logic will set expires. expires = None max_age = max(0, delta.days * 86400 + delta.seconds) else: self.cookies[key]['expires'] = expires else: self.cookies[key]['expires'] = '' if max_age is not None: self.cookies[key]['max-age'] = max_age # IE requires expires, so set it if hasn't been already. if not expires: self.cookies[key]['expires'] = http_date(time.time() + max_age) if path is not None: self.cookies[key]['path'] = path if domain is not None: self.cookies[key]['domain'] = domain if secure: self.cookies[key]['secure'] = True if httponly: self.cookies[key]['httponly'] = True if samesite: if samesite.lower() not in ('lax', 'none', 'strict'): raise ValueError('samesite must be "lax", "none", or "strict".') self.cookies[key]['samesite'] = samesite def setdefault(self, key, value): """Set a header unless it has already been set.""" if key not in self: self[key] = value def set_signed_cookie(self, key, value, salt='', **kwargs): value = signing.get_cookie_signer(salt=key + salt).sign(value) return self.set_cookie(key, value, **kwargs) def delete_cookie(self, key, path='/', domain=None, samesite=None): # Browsers can ignore the Set-Cookie header if the cookie doesn't use # the secure flag and: # - the cookie name starts with "__Host-" or "__Secure-", or # - the samesite is "none". secure = ( key.startswith(('__Secure-', '__Host-')) or (samesite and samesite.lower() == 'none') ) self.set_cookie( key, max_age=0, path=path, domain=domain, secure=secure, expires='Thu, 01 Jan 1970 00:00:00 GMT', samesite=samesite, ) # Common methods used by subclasses def make_bytes(self, value): """Turn a value into a bytestring encoded in the output charset.""" # Per PEP 3333, this response body must be bytes. To avoid returning # an instance of a subclass, this function returns `bytes(value)`. # This doesn't make a copy when `value` already contains bytes. # Handle string types -- we can't rely on force_bytes here because: # - Python attempts str conversion first # - when self._charset != 'utf-8' it re-encodes the content if isinstance(value, (bytes, memoryview)): return bytes(value) if isinstance(value, str): return bytes(value.encode(self.charset)) # Handle non-string types. return str(value).encode(self.charset) # These methods partially implement the file-like object interface. # See https://docs.python.org/library/io.html#io.IOBase # The WSGI server must call this method upon completion of the request. # See http://blog.dscpl.com.au/2012/10/obligations-for-calling-close-on.html def close(self): for closer in self._resource_closers: try: closer() except Exception: pass # Free resources that were still referenced. self._resource_closers.clear() self.closed = True signals.request_finished.send(sender=self._handler_class) def write(self, content): raise OSError('This %s instance is not writable' % self.__class__.__name__) def flush(self): pass def tell(self): raise OSError('This %s instance cannot tell its position' % self.__class__.__name__) # These methods partially implement a stream-like object interface. # See https://docs.python.org/library/io.html#io.IOBase def readable(self): return False def seekable(self): return False def writable(self): return False def writelines(self, lines): raise OSError('This %s instance is not writable' % self.__class__.__name__) class HttpResponse(HttpResponseBase): """ An HTTP response class with a string as content. This content that can be read, appended to, or replaced. """ streaming = False def __init__(self, content=b'', *args, **kwargs): super().__init__(*args, **kwargs) # Content is a bytestring. See the `content` property methods. self.content = content def __repr__(self): return '<%(cls)s status_code=%(status_code)d%(content_type)s>' % { 'cls': self.__class__.__name__, 'status_code': self.status_code, 'content_type': self._content_type_for_repr, } def serialize(self): """Full HTTP message, including headers, as a bytestring.""" return self.serialize_headers() + b'\r\n\r\n' + self.content __bytes__ = serialize @property def content(self): return b''.join(self._container) @content.setter def content(self, value): # Consume iterators upon assignment to allow repeated iteration. if hasattr(value, '__iter__') and not isinstance(value, (bytes, str)): content = b''.join(self.make_bytes(chunk) for chunk in value) if hasattr(value, 'close'): try: value.close() except Exception: pass else: content = self.make_bytes(value) # Create a list of properly encoded bytestrings to support write(). self._container = [content] def __iter__(self): return iter(self._container) def write(self, content): self._container.append(self.make_bytes(content)) def tell(self): return len(self.content) def getvalue(self): return self.content def writable(self): return True def writelines(self, lines): for line in lines: self.write(line) class StreamingHttpResponse(HttpResponseBase): """ A streaming HTTP response class with an iterator as content. This should only be iterated once, when the response is streamed to the client. However, it can be appended to or replaced with a new iterator that wraps the original content (or yields entirely new content). """ streaming = True def __init__(self, streaming_content=(), *args, **kwargs): super().__init__(*args, **kwargs) # `streaming_content` should be an iterable of bytestrings. # See the `streaming_content` property methods. self.streaming_content = streaming_content @property def content(self): raise AttributeError( "This %s instance has no `content` attribute. Use " "`streaming_content` instead." % self.__class__.__name__ ) @property def streaming_content(self): return map(self.make_bytes, self._iterator) @streaming_content.setter def streaming_content(self, value): self._set_streaming_content(value) def _set_streaming_content(self, value): # Ensure we can never iterate on "value" more than once. self._iterator = iter(value) if hasattr(value, 'close'): self._resource_closers.append(value.close) def __iter__(self): return self.streaming_content def getvalue(self): return b''.join(self.streaming_content) class FileResponse(StreamingHttpResponse): """ A streaming HTTP response class optimized for files. """ block_size = 4096 def __init__(self, *args, as_attachment=False, filename='', **kwargs): self.as_attachment = as_attachment self.filename = filename super().__init__(*args, **kwargs) def _set_streaming_content(self, value): if not hasattr(value, 'read'): self.file_to_stream = None return super()._set_streaming_content(value) self.file_to_stream = filelike = value if hasattr(filelike, 'close'): self._resource_closers.append(filelike.close) value = iter(lambda: filelike.read(self.block_size), b'') self.set_headers(filelike) super()._set_streaming_content(value) def set_headers(self, filelike): """ Set some common response headers (Content-Length, Content-Type, and Content-Disposition) based on the `filelike` response content. """ encoding_map = { 'bzip2': 'application/x-bzip', 'gzip': 'application/gzip', 'xz': 'application/x-xz', } filename = getattr(filelike, 'name', None) filename = filename if (isinstance(filename, str) and filename) else self.filename if os.path.isabs(filename): self['Content-Length'] = os.path.getsize(filelike.name) elif hasattr(filelike, 'getbuffer'): self['Content-Length'] = filelike.getbuffer().nbytes if self.get('Content-Type', '').startswith('text/html'): if filename: content_type, encoding = mimetypes.guess_type(filename) # Encoding isn't set to prevent browsers from automatically # uncompressing files. content_type = encoding_map.get(encoding, content_type) self['Content-Type'] = content_type or 'application/octet-stream' else: self['Content-Type'] = 'application/octet-stream' filename = self.filename or os.path.basename(filename) if filename: disposition = 'attachment' if self.as_attachment else 'inline' try: filename.encode('ascii') file_expr = 'filename="{}"'.format(filename) except UnicodeEncodeError: file_expr = "filename*=utf-8''{}".format(quote(filename)) self['Content-Disposition'] = '{}; {}'.format(disposition, file_expr) elif self.as_attachment: self['Content-Disposition'] = 'attachment' class HttpResponseRedirectBase(HttpResponse): allowed_schemes = ['http', 'https', 'ftp'] def __init__(self, redirect_to, *args, **kwargs): super().__init__(*args, **kwargs) self['Location'] = iri_to_uri(redirect_to) parsed = urlparse(str(redirect_to)) if parsed.scheme and parsed.scheme not in self.allowed_schemes: raise DisallowedRedirect("Unsafe redirect to URL with protocol '%s'" % parsed.scheme) url = property(lambda self: self['Location']) def __repr__(self): return '<%(cls)s status_code=%(status_code)d%(content_type)s, url="%(url)s">' % { 'cls': self.__class__.__name__, 'status_code': self.status_code, 'content_type': self._content_type_for_repr, 'url': self.url, } class HttpResponseRedirect(HttpResponseRedirectBase): status_code = 302 class HttpResponsePermanentRedirect(HttpResponseRedirectBase): status_code = 301 class HttpResponseNotModified(HttpResponse): status_code = 304 def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) del self['content-type'] @HttpResponse.content.setter def content(self, value): if value: raise AttributeError("You cannot set content to a 304 (Not Modified) response") self._container = [] class HttpResponseBadRequest(HttpResponse): status_code = 400 class HttpResponseNotFound(HttpResponse): status_code = 404 class HttpResponseForbidden(HttpResponse): status_code = 403 class HttpResponseNotAllowed(HttpResponse): status_code = 405 def __init__(self, permitted_methods, *args, **kwargs): super().__init__(*args, **kwargs) self['Allow'] = ', '.join(permitted_methods) def __repr__(self): return '<%(cls)s [%(methods)s] status_code=%(status_code)d%(content_type)s>' % { 'cls': self.__class__.__name__, 'status_code': self.status_code, 'content_type': self._content_type_for_repr, 'methods': self['Allow'], } class HttpResponseGone(HttpResponse): status_code = 410 class HttpResponseServerError(HttpResponse): status_code = 500 class Http404(Exception): pass class JsonResponse(HttpResponse): """ An HTTP response class that consumes data to be serialized to JSON. :param data: Data to be dumped into json. By default only ``dict`` objects are allowed to be passed due to a security flaw before EcmaScript 5. See the ``safe`` parameter for more information. :param encoder: Should be a json encoder class. Defaults to ``django.core.serializers.json.DjangoJSONEncoder``. :param safe: Controls if only ``dict`` objects may be serialized. Defaults to ``True``. :param json_dumps_params: A dictionary of kwargs passed to json.dumps(). """ def __init__(self, data, encoder=DjangoJSONEncoder, safe=True, json_dumps_params=None, **kwargs): if safe and not isinstance(data, dict): raise TypeError( 'In order to allow non-dict objects to be serialized set the ' 'safe parameter to False.' ) if json_dumps_params is None: json_dumps_params = {} kwargs.setdefault('content_type', 'application/json') data = json.dumps(data, cls=encoder, **json_dumps_params) super().__init__(content=data, **kwargs)
7dca66d253358f3dfbda3d68ceffb8b9fee4d0f47d8ddef2fa4d25e78ff1b05c
import pkgutil import sys from importlib import import_module, reload from django.apps import apps from django.conf import settings from django.db.migrations.graph import MigrationGraph from django.db.migrations.recorder import MigrationRecorder from .exceptions import ( AmbiguityError, BadMigrationError, InconsistentMigrationHistory, NodeNotFoundError, ) MIGRATIONS_MODULE_NAME = 'migrations' class MigrationLoader: """ Load migration files from disk and their status from the database. Migration files are expected to live in the "migrations" directory of an app. Their names are entirely unimportant from a code perspective, but will probably follow the 1234_name.py convention. On initialization, this class will scan those directories, and open and read the Python files, looking for a class called Migration, which should inherit from django.db.migrations.Migration. See django.db.migrations.migration for what that looks like. Some migrations will be marked as "replacing" another set of migrations. These are loaded into a separate set of migrations away from the main ones. If all the migrations they replace are either unapplied or missing from disk, then they are injected into the main set, replacing the named migrations. Any dependency pointers to the replaced migrations are re-pointed to the new migration. This does mean that this class MUST also talk to the database as well as to disk, but this is probably fine. We're already not just operating in memory. """ def __init__( self, connection, load=True, ignore_no_migrations=False, replace_migrations=True, ): self.connection = connection self.disk_migrations = None self.applied_migrations = None self.ignore_no_migrations = ignore_no_migrations self.replace_migrations = replace_migrations if load: self.build_graph() @classmethod def migrations_module(cls, app_label): """ Return the path to the migrations module for the specified app_label and a boolean indicating if the module is specified in settings.MIGRATION_MODULE. """ if app_label in settings.MIGRATION_MODULES: return settings.MIGRATION_MODULES[app_label], True else: app_package_name = apps.get_app_config(app_label).name return '%s.%s' % (app_package_name, MIGRATIONS_MODULE_NAME), False def load_disk(self): """Load the migrations from all INSTALLED_APPS from disk.""" self.disk_migrations = {} self.unmigrated_apps = set() self.migrated_apps = set() for app_config in apps.get_app_configs(): # Get the migrations module directory module_name, explicit = self.migrations_module(app_config.label) if module_name is None: self.unmigrated_apps.add(app_config.label) continue was_loaded = module_name in sys.modules try: module = import_module(module_name) except ModuleNotFoundError as e: if ( (explicit and self.ignore_no_migrations) or (not explicit and MIGRATIONS_MODULE_NAME in e.name.split('.')) ): self.unmigrated_apps.add(app_config.label) continue raise else: # Empty directories are namespaces. # getattr() needed on PY36 and older (replace w/attribute access). if getattr(module, '__file__', None) is None: self.unmigrated_apps.add(app_config.label) continue # Module is not a package (e.g. migrations.py). if not hasattr(module, '__path__'): self.unmigrated_apps.add(app_config.label) continue # Force a reload if it's already loaded (tests need this) if was_loaded: reload(module) self.migrated_apps.add(app_config.label) migration_names = { name for _, name, is_pkg in pkgutil.iter_modules(module.__path__) if not is_pkg and name[0] not in '_~' } # Load migrations for migration_name in migration_names: migration_path = '%s.%s' % (module_name, migration_name) try: migration_module = import_module(migration_path) except ImportError as e: if 'bad magic number' in str(e): raise ImportError( "Couldn't import %r as it appears to be a stale " ".pyc file." % migration_path ) from e else: raise if not hasattr(migration_module, "Migration"): raise BadMigrationError( "Migration %s in app %s has no Migration class" % (migration_name, app_config.label) ) self.disk_migrations[app_config.label, migration_name] = migration_module.Migration( migration_name, app_config.label, ) def get_migration(self, app_label, name_prefix): """Return the named migration or raise NodeNotFoundError.""" return self.graph.nodes[app_label, name_prefix] def get_migration_by_prefix(self, app_label, name_prefix): """ Return the migration(s) which match the given app label and name_prefix. """ # Do the search results = [] for migration_app_label, migration_name in self.disk_migrations: if migration_app_label == app_label and migration_name.startswith(name_prefix): results.append((migration_app_label, migration_name)) if len(results) > 1: raise AmbiguityError( "There is more than one migration for '%s' with the prefix '%s'" % (app_label, name_prefix) ) elif not results: raise KeyError("There no migrations for '%s' with the prefix '%s'" % (app_label, name_prefix)) else: return self.disk_migrations[results[0]] def check_key(self, key, current_app): if (key[1] != "__first__" and key[1] != "__latest__") or key in self.graph: return key # Special-case __first__, which means "the first migration" for # migrated apps, and is ignored for unmigrated apps. It allows # makemigrations to declare dependencies on apps before they even have # migrations. if key[0] == current_app: # Ignore __first__ references to the same app (#22325) return if key[0] in self.unmigrated_apps: # This app isn't migrated, but something depends on it. # The models will get auto-added into the state, though # so we're fine. return if key[0] in self.migrated_apps: try: if key[1] == "__first__": return self.graph.root_nodes(key[0])[0] else: # "__latest__" return self.graph.leaf_nodes(key[0])[0] except IndexError: if self.ignore_no_migrations: return None else: raise ValueError("Dependency on app with no migrations: %s" % key[0]) raise ValueError("Dependency on unknown app: %s" % key[0]) def add_internal_dependencies(self, key, migration): """ Internal dependencies need to be added first to ensure `__first__` dependencies find the correct root node. """ for parent in migration.dependencies: # Ignore __first__ references to the same app. if parent[0] == key[0] and parent[1] != '__first__': self.graph.add_dependency(migration, key, parent, skip_validation=True) def add_external_dependencies(self, key, migration): for parent in migration.dependencies: # Skip internal dependencies if key[0] == parent[0]: continue parent = self.check_key(parent, key[0]) if parent is not None: self.graph.add_dependency(migration, key, parent, skip_validation=True) for child in migration.run_before: child = self.check_key(child, key[0]) if child is not None: self.graph.add_dependency(migration, child, key, skip_validation=True) def build_graph(self): """ Build a migration dependency graph using both the disk and database. You'll need to rebuild the graph if you apply migrations. This isn't usually a problem as generally migration stuff runs in a one-shot process. """ # Load disk data self.load_disk() # Load database data if self.connection is None: self.applied_migrations = {} else: recorder = MigrationRecorder(self.connection) self.applied_migrations = recorder.applied_migrations() # To start, populate the migration graph with nodes for ALL migrations # and their dependencies. Also make note of replacing migrations at this step. self.graph = MigrationGraph() self.replacements = {} for key, migration in self.disk_migrations.items(): self.graph.add_node(key, migration) # Replacing migrations. if migration.replaces: self.replacements[key] = migration for key, migration in self.disk_migrations.items(): # Internal (same app) dependencies. self.add_internal_dependencies(key, migration) # Add external dependencies now that the internal ones have been resolved. for key, migration in self.disk_migrations.items(): self.add_external_dependencies(key, migration) # Carry out replacements where possible and if enabled. if self.replace_migrations: for key, migration in self.replacements.items(): # Get applied status of each of this migration's replacement # targets. applied_statuses = [(target in self.applied_migrations) for target in migration.replaces] # The replacing migration is only marked as applied if all of # its replacement targets are. if all(applied_statuses): self.applied_migrations[key] = migration else: self.applied_migrations.pop(key, None) # A replacing migration can be used if either all or none of # its replacement targets have been applied. if all(applied_statuses) or (not any(applied_statuses)): self.graph.remove_replaced_nodes(key, migration.replaces) else: # This replacing migration cannot be used because it is # partially applied. Remove it from the graph and remap # dependencies to it (#25945). self.graph.remove_replacement_node(key, migration.replaces) # Ensure the graph is consistent. try: self.graph.validate_consistency() except NodeNotFoundError as exc: # Check if the missing node could have been replaced by any squash # migration but wasn't because the squash migration was partially # applied before. In that case raise a more understandable exception # (#23556). # Get reverse replacements. reverse_replacements = {} for key, migration in self.replacements.items(): for replaced in migration.replaces: reverse_replacements.setdefault(replaced, set()).add(key) # Try to reraise exception with more detail. if exc.node in reverse_replacements: candidates = reverse_replacements.get(exc.node, set()) is_replaced = any(candidate in self.graph.nodes for candidate in candidates) if not is_replaced: tries = ', '.join('%s.%s' % c for c in candidates) raise NodeNotFoundError( "Migration {0} depends on nonexistent node ('{1}', '{2}'). " "Django tried to replace migration {1}.{2} with any of [{3}] " "but wasn't able to because some of the replaced migrations " "are already applied.".format( exc.origin, exc.node[0], exc.node[1], tries ), exc.node ) from exc raise self.graph.ensure_not_cyclic() def check_consistent_history(self, connection): """ Raise InconsistentMigrationHistory if any applied migrations have unapplied dependencies. """ recorder = MigrationRecorder(connection) applied = recorder.applied_migrations() for migration in applied: # If the migration is unknown, skip it. if migration not in self.graph.nodes: continue for parent in self.graph.node_map[migration].parents: if parent not in applied: # Skip unapplied squashed migrations that have all of their # `replaces` applied. if parent in self.replacements: if all(m in applied for m in self.replacements[parent].replaces): continue raise InconsistentMigrationHistory( "Migration {}.{} is applied before its dependency " "{}.{} on database '{}'.".format( migration[0], migration[1], parent[0], parent[1], connection.alias, ) ) def detect_conflicts(self): """ Look through the loaded graph and detect any conflicts - apps with more than one leaf migration. Return a dict of the app labels that conflict with the migration names that conflict. """ seen_apps = {} conflicting_apps = set() for app_label, migration_name in self.graph.leaf_nodes(): if app_label in seen_apps: conflicting_apps.add(app_label) seen_apps.setdefault(app_label, set()).add(migration_name) return {app_label: sorted(seen_apps[app_label]) for app_label in conflicting_apps} def project_state(self, nodes=None, at_end=True): """ Return a ProjectState object representing the most recent state that the loaded migrations represent. See graph.make_state() for the meaning of "nodes" and "at_end". """ return self.graph.make_state(nodes=nodes, at_end=at_end, real_apps=list(self.unmigrated_apps)) def collect_sql(self, plan): """ Take a migration plan and return a list of collected SQL statements that represent the best-efforts version of that plan. """ statements = [] state = None for migration, backwards in plan: with self.connection.schema_editor(collect_sql=True, atomic=migration.atomic) as schema_editor: if state is None: state = self.project_state((migration.app_label, migration.name), at_end=False) if not backwards: state = migration.apply(state, schema_editor, collect_sql=True) else: state = migration.unapply(state, schema_editor, collect_sql=True) statements.extend(schema_editor.collected_sql) return statements
b777f47282b2ffbd6c9a82ba5bc6a304f19d285cc486d3bd66a7395f94f1843e
import datetime import posixpath from django import forms from django.core import checks from django.core.files.base import File from django.core.files.images import ImageFile from django.core.files.storage import Storage, default_storage from django.db.models import signals from django.db.models.fields import Field from django.db.models.query_utils import DeferredAttribute from django.utils.translation import gettext_lazy as _ class FieldFile(File): def __init__(self, instance, field, name): super().__init__(None, name) self.instance = instance self.field = field self.storage = field.storage self._committed = True def __eq__(self, other): # Older code may be expecting FileField values to be simple strings. # By overriding the == operator, it can remain backwards compatibility. if hasattr(other, 'name'): return self.name == other.name return self.name == other def __hash__(self): return hash(self.name) # The standard File contains most of the necessary properties, but # FieldFiles can be instantiated without a name, so that needs to # be checked for here. def _require_file(self): if not self: raise ValueError("The '%s' attribute has no file associated with it." % self.field.name) def _get_file(self): self._require_file() if getattr(self, '_file', None) is None: self._file = self.storage.open(self.name, 'rb') return self._file def _set_file(self, file): self._file = file def _del_file(self): del self._file file = property(_get_file, _set_file, _del_file) @property def path(self): self._require_file() return self.storage.path(self.name) @property def url(self): self._require_file() return self.storage.url(self.name) @property def size(self): self._require_file() if not self._committed: return self.file.size return self.storage.size(self.name) def open(self, mode='rb'): self._require_file() if getattr(self, '_file', None) is None: self.file = self.storage.open(self.name, mode) else: self.file.open(mode) return self # open() doesn't alter the file's contents, but it does reset the pointer open.alters_data = True # In addition to the standard File API, FieldFiles have extra methods # to further manipulate the underlying file, as well as update the # associated model instance. def save(self, name, content, save=True): name = self.field.generate_filename(self.instance, name) self.name = self.storage.save(name, content, max_length=self.field.max_length) setattr(self.instance, self.field.name, self.name) self._committed = True # Save the object because it has changed, unless save is False if save: self.instance.save() save.alters_data = True def delete(self, save=True): if not self: return # Only close the file if it's already open, which we know by the # presence of self._file if hasattr(self, '_file'): self.close() del self.file self.storage.delete(self.name) self.name = None setattr(self.instance, self.field.name, self.name) self._committed = False if save: self.instance.save() delete.alters_data = True @property def closed(self): file = getattr(self, '_file', None) return file is None or file.closed def close(self): file = getattr(self, '_file', None) if file is not None: file.close() def __getstate__(self): # FieldFile needs access to its associated model field, an instance and # the file's name. Everything else will be restored later, by # FileDescriptor below. return { 'name': self.name, 'closed': False, '_committed': True, '_file': None, 'instance': self.instance, 'field': self.field, } def __setstate__(self, state): self.__dict__.update(state) self.storage = self.field.storage class FileDescriptor(DeferredAttribute): """ The descriptor for the file attribute on the model instance. Return a FieldFile when accessed so you can write code like:: >>> from myapp.models import MyModel >>> instance = MyModel.objects.get(pk=1) >>> instance.file.size Assign a file object on assignment so you can do:: >>> with open('/path/to/hello.world') as f: ... instance.file = File(f) """ def __get__(self, instance, cls=None): if instance is None: return self # This is slightly complicated, so worth an explanation. # instance.file`needs to ultimately return some instance of `File`, # probably a subclass. Additionally, this returned object needs to have # the FieldFile API so that users can easily do things like # instance.file.path and have that delegated to the file storage engine. # Easy enough if we're strict about assignment in __set__, but if you # peek below you can see that we're not. So depending on the current # value of the field we have to dynamically construct some sort of # "thing" to return. # The instance dict contains whatever was originally assigned # in __set__. file = super().__get__(instance, cls) # If this value is a string (instance.file = "path/to/file") or None # then we simply wrap it with the appropriate attribute class according # to the file field. [This is FieldFile for FileFields and # ImageFieldFile for ImageFields; it's also conceivable that user # subclasses might also want to subclass the attribute class]. This # object understands how to convert a path to a file, and also how to # handle None. if isinstance(file, str) or file is None: attr = self.field.attr_class(instance, self.field, file) instance.__dict__[self.field.attname] = attr # Other types of files may be assigned as well, but they need to have # the FieldFile interface added to them. Thus, we wrap any other type of # File inside a FieldFile (well, the field's attr_class, which is # usually FieldFile). elif isinstance(file, File) and not isinstance(file, FieldFile): file_copy = self.field.attr_class(instance, self.field, file.name) file_copy.file = file file_copy._committed = False instance.__dict__[self.field.attname] = file_copy # Finally, because of the (some would say boneheaded) way pickle works, # the underlying FieldFile might not actually itself have an associated # file. So we need to reset the details of the FieldFile in those cases. elif isinstance(file, FieldFile) and not hasattr(file, 'field'): file.instance = instance file.field = self.field file.storage = self.field.storage # Make sure that the instance is correct. elif isinstance(file, FieldFile) and instance is not file.instance: file.instance = instance # That was fun, wasn't it? return instance.__dict__[self.field.attname] def __set__(self, instance, value): instance.__dict__[self.field.attname] = value class FileField(Field): # The class to wrap instance attributes in. Accessing the file object off # the instance will always return an instance of attr_class. attr_class = FieldFile # The descriptor to use for accessing the attribute off of the class. descriptor_class = FileDescriptor description = _("File") def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs): self._primary_key_set_explicitly = 'primary_key' in kwargs self.storage = storage or default_storage if callable(self.storage): self.storage = self.storage() if not isinstance(self.storage, Storage): raise TypeError( "%s.storage must be a subclass/instance of %s.%s" % (self.__class__.__qualname__, Storage.__module__, Storage.__qualname__) ) self.upload_to = upload_to kwargs.setdefault('max_length', 100) super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_primary_key(), *self._check_upload_to(), ] def _check_primary_key(self): if self._primary_key_set_explicitly: return [ checks.Error( "'primary_key' is not a valid argument for a %s." % self.__class__.__name__, obj=self, id='fields.E201', ) ] else: return [] def _check_upload_to(self): if isinstance(self.upload_to, str) and self.upload_to.startswith('/'): return [ checks.Error( "%s's 'upload_to' argument must be a relative path, not an " "absolute path." % self.__class__.__name__, obj=self, id='fields.E202', hint='Remove the leading slash.', ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 100: del kwargs["max_length"] kwargs['upload_to'] = self.upload_to if self.storage is not default_storage: kwargs['storage'] = self.storage return name, path, args, kwargs def get_internal_type(self): return "FileField" def get_prep_value(self, value): value = super().get_prep_value(value) # Need to convert File objects provided via a form to string for database insertion if value is None: return None return str(value) def pre_save(self, model_instance, add): file = super().pre_save(model_instance, add) if file and not file._committed: # Commit the file to storage prior to saving the model file.save(file.name, file.file, save=False) return file def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) setattr(cls, self.attname, self.descriptor_class(self)) def generate_filename(self, instance, filename): """ Apply (if callable) or prepend (if a string) upload_to to the filename, then delegate further processing of the name to the storage backend. Until the storage layer, all file paths are expected to be Unix style (with forward slashes). """ if callable(self.upload_to): filename = self.upload_to(instance, filename) else: dirname = datetime.datetime.now().strftime(str(self.upload_to)) filename = posixpath.join(dirname, filename) return self.storage.generate_filename(filename) def save_form_data(self, instance, data): # Important: None means "no change", other false value means "clear" # This subtle distinction (rather than a more explicit marker) is # needed because we need to consume values that are also sane for a # regular (non Model-) Form to find in its cleaned_data dictionary. if data is not None: # This value will be converted to str and stored in the # database, so leaving False as-is is not acceptable. setattr(instance, self.name, data or '') def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.FileField, 'max_length': self.max_length, **kwargs, }) class ImageFileDescriptor(FileDescriptor): """ Just like the FileDescriptor, but for ImageFields. The only difference is assigning the width/height to the width_field/height_field, if appropriate. """ def __set__(self, instance, value): previous_file = instance.__dict__.get(self.field.attname) super().__set__(instance, value) # To prevent recalculating image dimensions when we are instantiating # an object from the database (bug #11084), only update dimensions if # the field had a value before this assignment. Since the default # value for FileField subclasses is an instance of field.attr_class, # previous_file will only be None when we are called from # Model.__init__(). The ImageField.update_dimension_fields method # hooked up to the post_init signal handles the Model.__init__() cases. # Assignment happening outside of Model.__init__() will trigger the # update right here. if previous_file is not None: self.field.update_dimension_fields(instance, force=True) class ImageFieldFile(ImageFile, FieldFile): def delete(self, save=True): # Clear the image dimensions cache if hasattr(self, '_dimensions_cache'): del self._dimensions_cache super().delete(save) class ImageField(FileField): attr_class = ImageFieldFile descriptor_class = ImageFileDescriptor description = _("Image") def __init__(self, verbose_name=None, name=None, width_field=None, height_field=None, **kwargs): self.width_field, self.height_field = width_field, height_field super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_image_library_installed(), ] def _check_image_library_installed(self): try: from PIL import Image # NOQA except ImportError: return [ checks.Error( 'Cannot use ImageField because Pillow is not installed.', hint=('Get Pillow at https://pypi.org/project/Pillow/ ' 'or run command "python -m pip install Pillow".'), obj=self, id='fields.E210', ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.width_field: kwargs['width_field'] = self.width_field if self.height_field: kwargs['height_field'] = self.height_field return name, path, args, kwargs def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) # Attach update_dimension_fields so that dimension fields declared # after their corresponding image field don't stay cleared by # Model.__init__, see bug #11196. # Only run post-initialization dimension update on non-abstract models if not cls._meta.abstract: signals.post_init.connect(self.update_dimension_fields, sender=cls) def update_dimension_fields(self, instance, force=False, *args, **kwargs): """ Update field's width and height fields, if defined. This method is hooked up to model's post_init signal to update dimensions after instantiating a model instance. However, dimensions won't be updated if the dimensions fields are already populated. This avoids unnecessary recalculation when loading an object from the database. Dimensions can be forced to update with force=True, which is how ImageFileDescriptor.__set__ calls this method. """ # Nothing to update if the field doesn't have dimension fields or if # the field is deferred. has_dimension_fields = self.width_field or self.height_field if not has_dimension_fields or self.attname not in instance.__dict__: return # getattr will call the ImageFileDescriptor's __get__ method, which # coerces the assigned value into an instance of self.attr_class # (ImageFieldFile in this case). file = getattr(instance, self.attname) # Nothing to update if we have no file and not being forced to update. if not file and not force: return dimension_fields_filled = not( (self.width_field and not getattr(instance, self.width_field)) or (self.height_field and not getattr(instance, self.height_field)) ) # When both dimension fields have values, we are most likely loading # data from the database or updating an image field that already had # an image stored. In the first case, we don't want to update the # dimension fields because we are already getting their values from the # database. In the second case, we do want to update the dimensions # fields and will skip this return because force will be True since we # were called from ImageFileDescriptor.__set__. if dimension_fields_filled and not force: return # file should be an instance of ImageFieldFile or should be None. if file: width = file.width height = file.height else: # No file, so clear dimensions fields. width = None height = None # Update the width and height fields. if self.width_field: setattr(instance, self.width_field, width) if self.height_field: setattr(instance, self.height_field, height) def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.ImageField, **kwargs, })
baeaa052f504aadd0575f534a5120580221266c9ef356a6ad060f2a19d60184a
import datetime import uuid from functools import lru_cache from django.conf import settings from django.db import DatabaseError, NotSupportedError from django.db.backends.base.operations import BaseDatabaseOperations from django.db.backends.utils import strip_quotes, truncate_name from django.db.models import AutoField, Exists, ExpressionWrapper from django.db.models.expressions import RawSQL from django.db.models.sql.where import WhereNode from django.utils import timezone from django.utils.encoding import force_bytes, force_str from django.utils.functional import cached_property from django.utils.regex_helper import _lazy_re_compile from .base import Database from .utils import BulkInsertMapper, InsertVar, Oracle_datetime class DatabaseOperations(BaseDatabaseOperations): # Oracle uses NUMBER(5), NUMBER(11), and NUMBER(19) for integer fields. # SmallIntegerField uses NUMBER(11) instead of NUMBER(5), which is used by # SmallAutoField, to preserve backward compatibility. integer_field_ranges = { 'SmallIntegerField': (-99999999999, 99999999999), 'IntegerField': (-99999999999, 99999999999), 'BigIntegerField': (-9999999999999999999, 9999999999999999999), 'PositiveBigIntegerField': (0, 9999999999999999999), 'PositiveSmallIntegerField': (0, 99999999999), 'PositiveIntegerField': (0, 99999999999), 'SmallAutoField': (-99999, 99999), 'AutoField': (-99999999999, 99999999999), 'BigAutoField': (-9999999999999999999, 9999999999999999999), } set_operators = {**BaseDatabaseOperations.set_operators, 'difference': 'MINUS'} # TODO: colorize this SQL code with style.SQL_KEYWORD(), etc. _sequence_reset_sql = """ DECLARE table_value integer; seq_value integer; seq_name user_tab_identity_cols.sequence_name%%TYPE; BEGIN BEGIN SELECT sequence_name INTO seq_name FROM user_tab_identity_cols WHERE table_name = '%(table_name)s' AND column_name = '%(column_name)s'; EXCEPTION WHEN NO_DATA_FOUND THEN seq_name := '%(no_autofield_sequence_name)s'; END; SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s; SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences WHERE sequence_name = seq_name; WHILE table_value > seq_value LOOP EXECUTE IMMEDIATE 'SELECT "'||seq_name||'".nextval FROM DUAL' INTO seq_value; END LOOP; END; /""" # Oracle doesn't support string without precision; use the max string size. cast_char_field_without_max_length = 'NVARCHAR2(2000)' cast_data_types = { 'AutoField': 'NUMBER(11)', 'BigAutoField': 'NUMBER(19)', 'SmallAutoField': 'NUMBER(5)', 'TextField': cast_char_field_without_max_length, } def cache_key_culling_sql(self): return 'SELECT cache_key FROM %s ORDER BY cache_key OFFSET %%s ROWS FETCH FIRST 1 ROWS ONLY' def date_extract_sql(self, lookup_type, field_name): if lookup_type == 'week_day': # TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday. return "TO_CHAR(%s, 'D')" % field_name elif lookup_type == 'iso_week_day': return "TO_CHAR(%s - 1, 'D')" % field_name elif lookup_type == 'week': # IW = ISO week number return "TO_CHAR(%s, 'IW')" % field_name elif lookup_type == 'quarter': return "TO_CHAR(%s, 'Q')" % field_name elif lookup_type == 'iso_year': return "TO_CHAR(%s, 'IYYY')" % field_name else: # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/EXTRACT-datetime.html return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name) def date_trunc_sql(self, lookup_type, field_name): # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/ROUND-and-TRUNC-Date-Functions.html if lookup_type in ('year', 'month'): return "TRUNC(%s, '%s')" % (field_name, lookup_type.upper()) elif lookup_type == 'quarter': return "TRUNC(%s, 'Q')" % field_name elif lookup_type == 'week': return "TRUNC(%s, 'IW')" % field_name else: return "TRUNC(%s)" % field_name # Oracle crashes with "ORA-03113: end-of-file on communication channel" # if the time zone name is passed in parameter. Use interpolation instead. # https://groups.google.com/forum/#!msg/django-developers/zwQju7hbG78/9l934yelwfsJ # This regexp matches all time zone names from the zoneinfo database. _tzname_re = _lazy_re_compile(r'^[\w/:+-]+$') def _prepare_tzname_delta(self, tzname): if '+' in tzname: return tzname[tzname.find('+'):] elif '-' in tzname: return tzname[tzname.find('-'):] return tzname def _convert_field_to_tz(self, field_name, tzname): if not settings.USE_TZ: return field_name if not self._tzname_re.match(tzname): raise ValueError("Invalid time zone name: %s" % tzname) # Convert from connection timezone to the local time, returning # TIMESTAMP WITH TIME ZONE and cast it back to TIMESTAMP to strip the # TIME ZONE details. if self.connection.timezone_name != tzname: return "CAST((FROM_TZ(%s, '%s') AT TIME ZONE '%s') AS TIMESTAMP)" % ( field_name, self.connection.timezone_name, self._prepare_tzname_delta(tzname), ) return field_name def datetime_cast_date_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return 'TRUNC(%s)' % field_name def datetime_cast_time_sql(self, field_name, tzname): # Since `TimeField` values are stored as TIMESTAMP where only the date # part is ignored, convert the field to the specified timezone. return self._convert_field_to_tz(field_name, tzname) def datetime_extract_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return self.date_extract_sql(lookup_type, field_name) def datetime_trunc_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/ROUND-and-TRUNC-Date-Functions.html if lookup_type in ('year', 'month'): sql = "TRUNC(%s, '%s')" % (field_name, lookup_type.upper()) elif lookup_type == 'quarter': sql = "TRUNC(%s, 'Q')" % field_name elif lookup_type == 'week': sql = "TRUNC(%s, 'IW')" % field_name elif lookup_type == 'day': sql = "TRUNC(%s)" % field_name elif lookup_type == 'hour': sql = "TRUNC(%s, 'HH24')" % field_name elif lookup_type == 'minute': sql = "TRUNC(%s, 'MI')" % field_name else: sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision. return sql def time_trunc_sql(self, lookup_type, field_name): # The implementation is similar to `datetime_trunc_sql` as both # `DateTimeField` and `TimeField` are stored as TIMESTAMP where # the date part of the later is ignored. if lookup_type == 'hour': sql = "TRUNC(%s, 'HH24')" % field_name elif lookup_type == 'minute': sql = "TRUNC(%s, 'MI')" % field_name elif lookup_type == 'second': sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision. return sql def get_db_converters(self, expression): converters = super().get_db_converters(expression) internal_type = expression.output_field.get_internal_type() if internal_type in ['JSONField', 'TextField']: converters.append(self.convert_textfield_value) elif internal_type == 'BinaryField': converters.append(self.convert_binaryfield_value) elif internal_type in ['BooleanField', 'NullBooleanField']: converters.append(self.convert_booleanfield_value) elif internal_type == 'DateTimeField': if settings.USE_TZ: converters.append(self.convert_datetimefield_value) elif internal_type == 'DateField': converters.append(self.convert_datefield_value) elif internal_type == 'TimeField': converters.append(self.convert_timefield_value) elif internal_type == 'UUIDField': converters.append(self.convert_uuidfield_value) # Oracle stores empty strings as null. If the field accepts the empty # string, undo this to adhere to the Django convention of using # the empty string instead of null. if expression.field.empty_strings_allowed: converters.append( self.convert_empty_bytes if internal_type == 'BinaryField' else self.convert_empty_string ) return converters def convert_textfield_value(self, value, expression, connection): if isinstance(value, Database.LOB): value = value.read() return value def convert_binaryfield_value(self, value, expression, connection): if isinstance(value, Database.LOB): value = force_bytes(value.read()) return value def convert_booleanfield_value(self, value, expression, connection): if value in (0, 1): value = bool(value) return value # cx_Oracle always returns datetime.datetime objects for # DATE and TIMESTAMP columns, but Django wants to see a # python datetime.date, .time, or .datetime. def convert_datetimefield_value(self, value, expression, connection): if value is not None: value = timezone.make_aware(value, self.connection.timezone) return value def convert_datefield_value(self, value, expression, connection): if isinstance(value, Database.Timestamp): value = value.date() return value def convert_timefield_value(self, value, expression, connection): if isinstance(value, Database.Timestamp): value = value.time() return value def convert_uuidfield_value(self, value, expression, connection): if value is not None: value = uuid.UUID(value) return value @staticmethod def convert_empty_string(value, expression, connection): return '' if value is None else value @staticmethod def convert_empty_bytes(value, expression, connection): return b'' if value is None else value def deferrable_sql(self): return " DEFERRABLE INITIALLY DEFERRED" def fetch_returned_insert_columns(self, cursor, returning_params): columns = [] for param in returning_params: value = param.get_value() if value is None or value == []: # cx_Oracle < 6.3 returns None, >= 6.3 returns empty list. raise DatabaseError( 'The database did not return a new row id. Probably ' '"ORA-1403: no data found" was raised internally but was ' 'hidden by the Oracle OCI library (see ' 'https://code.djangoproject.com/ticket/28859).' ) # cx_Oracle < 7 returns value, >= 7 returns list with single value. columns.append(value[0] if isinstance(value, list) else value) return tuple(columns) def field_cast_sql(self, db_type, internal_type): if db_type and db_type.endswith('LOB') and internal_type != 'JSONField': return "DBMS_LOB.SUBSTR(%s)" else: return "%s" def no_limit_value(self): return None def limit_offset_sql(self, low_mark, high_mark): fetch, offset = self._get_limit_offset_params(low_mark, high_mark) return ' '.join(sql for sql in ( ('OFFSET %d ROWS' % offset) if offset else None, ('FETCH FIRST %d ROWS ONLY' % fetch) if fetch else None, ) if sql) def last_executed_query(self, cursor, sql, params): # https://cx-oracle.readthedocs.io/en/latest/cursor.html#Cursor.statement # The DB API definition does not define this attribute. statement = cursor.statement # Unlike Psycopg's `query` and MySQLdb`'s `_executed`, cx_Oracle's # `statement` doesn't contain the query parameters. Substitute # parameters manually. if isinstance(params, (tuple, list)): for i, param in enumerate(params): statement = statement.replace(':arg%d' % i, force_str(param, errors='replace')) elif isinstance(params, dict): for key, param in params.items(): statement = statement.replace(':%s' % key, force_str(param, errors='replace')) return statement def last_insert_id(self, cursor, table_name, pk_name): sq_name = self._get_sequence_name(cursor, strip_quotes(table_name), pk_name) cursor.execute('"%s".currval' % sq_name) return cursor.fetchone()[0] def lookup_cast(self, lookup_type, internal_type=None): if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'): return "UPPER(%s)" if internal_type == 'JSONField' and lookup_type == 'exact': return 'DBMS_LOB.SUBSTR(%s)' return "%s" def max_in_list_size(self): return 1000 def max_name_length(self): return 30 def pk_default_value(self): return "NULL" def prep_for_iexact_query(self, x): return x def process_clob(self, value): if value is None: return '' return value.read() def quote_name(self, name): # SQL92 requires delimited (quoted) names to be case-sensitive. When # not quoted, Oracle has case-insensitive behavior for identifiers, but # always defaults to uppercase. # We simplify things by making Oracle identifiers always uppercase. if not name.startswith('"') and not name.endswith('"'): name = '"%s"' % truncate_name(name.upper(), self.max_name_length()) # Oracle puts the query text into a (query % args) construct, so % signs # in names need to be escaped. The '%%' will be collapsed back to '%' at # that stage so we aren't really making the name longer here. name = name.replace('%', '%%') return name.upper() def random_function_sql(self): return "DBMS_RANDOM.RANDOM" def regex_lookup(self, lookup_type): if lookup_type == 'regex': match_option = "'c'" else: match_option = "'i'" return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option def return_insert_columns(self, fields): if not fields: return '', () field_names = [] params = [] for field in fields: field_names.append('%s.%s' % ( self.quote_name(field.model._meta.db_table), self.quote_name(field.column), )) params.append(InsertVar(field)) return 'RETURNING %s INTO %s' % ( ', '.join(field_names), ', '.join(['%s'] * len(params)), ), tuple(params) def __foreign_key_constraints(self, table_name, recursive): with self.connection.cursor() as cursor: if recursive: cursor.execute(""" SELECT user_tables.table_name, rcons.constraint_name FROM user_tables JOIN user_constraints cons ON (user_tables.table_name = cons.table_name AND cons.constraint_type = ANY('P', 'U')) LEFT JOIN user_constraints rcons ON (user_tables.table_name = rcons.table_name AND rcons.constraint_type = 'R') START WITH user_tables.table_name = UPPER(%s) CONNECT BY NOCYCLE PRIOR cons.constraint_name = rcons.r_constraint_name GROUP BY user_tables.table_name, rcons.constraint_name HAVING user_tables.table_name != UPPER(%s) ORDER BY MAX(level) DESC """, (table_name, table_name)) else: cursor.execute(""" SELECT cons.table_name, cons.constraint_name FROM user_constraints cons WHERE cons.constraint_type = 'R' AND cons.table_name = UPPER(%s) """, (table_name,)) return cursor.fetchall() @cached_property def _foreign_key_constraints(self): # 512 is large enough to fit the ~330 tables (as of this writing) in # Django's test suite. return lru_cache(maxsize=512)(self.__foreign_key_constraints) def sql_flush(self, style, tables, *, reset_sequences=False, allow_cascade=False): if not tables: return [] truncated_tables = {table.upper() for table in tables} constraints = set() # Oracle's TRUNCATE CASCADE only works with ON DELETE CASCADE foreign # keys which Django doesn't define. Emulate the PostgreSQL behavior # which truncates all dependent tables by manually retrieving all # foreign key constraints and resolving dependencies. for table in tables: for foreign_table, constraint in self._foreign_key_constraints(table, recursive=allow_cascade): if allow_cascade: truncated_tables.add(foreign_table) constraints.add((foreign_table, constraint)) sql = [ '%s %s %s %s %s %s %s %s;' % ( style.SQL_KEYWORD('ALTER'), style.SQL_KEYWORD('TABLE'), style.SQL_FIELD(self.quote_name(table)), style.SQL_KEYWORD('DISABLE'), style.SQL_KEYWORD('CONSTRAINT'), style.SQL_FIELD(self.quote_name(constraint)), style.SQL_KEYWORD('KEEP'), style.SQL_KEYWORD('INDEX'), ) for table, constraint in constraints ] + [ '%s %s %s;' % ( style.SQL_KEYWORD('TRUNCATE'), style.SQL_KEYWORD('TABLE'), style.SQL_FIELD(self.quote_name(table)), ) for table in truncated_tables ] + [ '%s %s %s %s %s %s;' % ( style.SQL_KEYWORD('ALTER'), style.SQL_KEYWORD('TABLE'), style.SQL_FIELD(self.quote_name(table)), style.SQL_KEYWORD('ENABLE'), style.SQL_KEYWORD('CONSTRAINT'), style.SQL_FIELD(self.quote_name(constraint)), ) for table, constraint in constraints ] if reset_sequences: sequences = [ sequence for sequence in self.connection.introspection.sequence_list() if sequence['table'].upper() in truncated_tables ] # Since we've just deleted all the rows, running our sequence ALTER # code will reset the sequence to 0. sql.extend(self.sequence_reset_by_name_sql(style, sequences)) return sql def sequence_reset_by_name_sql(self, style, sequences): sql = [] for sequence_info in sequences: no_autofield_sequence_name = self._get_no_autofield_sequence_name(sequence_info['table']) table = self.quote_name(sequence_info['table']) column = self.quote_name(sequence_info['column'] or 'id') query = self._sequence_reset_sql % { 'no_autofield_sequence_name': no_autofield_sequence_name, 'table': table, 'column': column, 'table_name': strip_quotes(table), 'column_name': strip_quotes(column), } sql.append(query) return sql def sequence_reset_sql(self, style, model_list): output = [] query = self._sequence_reset_sql for model in model_list: for f in model._meta.local_fields: if isinstance(f, AutoField): no_autofield_sequence_name = self._get_no_autofield_sequence_name(model._meta.db_table) table = self.quote_name(model._meta.db_table) column = self.quote_name(f.column) output.append(query % { 'no_autofield_sequence_name': no_autofield_sequence_name, 'table': table, 'column': column, 'table_name': strip_quotes(table), 'column_name': strip_quotes(column), }) # Only one AutoField is allowed per model, so don't # continue to loop break return output def start_transaction_sql(self): return '' def tablespace_sql(self, tablespace, inline=False): if inline: return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace) else: return "TABLESPACE %s" % self.quote_name(tablespace) def adapt_datefield_value(self, value): """ Transform a date value to an object compatible with what is expected by the backend driver for date columns. The default implementation transforms the date to text, but that is not necessary for Oracle. """ return value def adapt_datetimefield_value(self, value): """ Transform a datetime value to an object compatible with what is expected by the backend driver for datetime columns. If naive datetime is passed assumes that is in UTC. Normally Django models.DateTimeField makes sure that if USE_TZ is True passed datetime is timezone aware. """ if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # cx_Oracle doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError("Oracle backend does not support timezone-aware datetimes when USE_TZ is False.") return Oracle_datetime.from_datetime(value) def adapt_timefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value if isinstance(value, str): return datetime.datetime.strptime(value, '%H:%M:%S') # Oracle doesn't support tz-aware times if timezone.is_aware(value): raise ValueError("Oracle backend does not support timezone-aware times.") return Oracle_datetime(1900, 1, 1, value.hour, value.minute, value.second, value.microsecond) def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None): return value def combine_expression(self, connector, sub_expressions): lhs, rhs = sub_expressions if connector == '%%': return 'MOD(%s)' % ','.join(sub_expressions) elif connector == '&': return 'BITAND(%s)' % ','.join(sub_expressions) elif connector == '|': return 'BITAND(-%(lhs)s-1,%(rhs)s)+%(lhs)s' % {'lhs': lhs, 'rhs': rhs} elif connector == '<<': return '(%(lhs)s * POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs} elif connector == '>>': return 'FLOOR(%(lhs)s / POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs} elif connector == '^': return 'POWER(%s)' % ','.join(sub_expressions) elif connector == '#': raise NotSupportedError('Bitwise XOR is not supported in Oracle.') return super().combine_expression(connector, sub_expressions) def _get_no_autofield_sequence_name(self, table): """ Manually created sequence name to keep backward compatibility for AutoFields that aren't Oracle identity columns. """ name_length = self.max_name_length() - 3 return '%s_SQ' % truncate_name(strip_quotes(table), name_length).upper() def _get_sequence_name(self, cursor, table, pk_name): cursor.execute(""" SELECT sequence_name FROM user_tab_identity_cols WHERE table_name = UPPER(%s) AND column_name = UPPER(%s)""", [table, pk_name]) row = cursor.fetchone() return self._get_no_autofield_sequence_name(table) if row is None else row[0] def bulk_insert_sql(self, fields, placeholder_rows): query = [] for row in placeholder_rows: select = [] for i, placeholder in enumerate(row): # A model without any fields has fields=[None]. if fields[i]: internal_type = getattr(fields[i], 'target_field', fields[i]).get_internal_type() placeholder = BulkInsertMapper.types.get(internal_type, '%s') % placeholder # Add columns aliases to the first select to avoid "ORA-00918: # column ambiguously defined" when two or more columns in the # first select have the same value. if not query: placeholder = '%s col_%s' % (placeholder, i) select.append(placeholder) query.append('SELECT %s FROM DUAL' % ', '.join(select)) # Bulk insert to tables with Oracle identity columns causes Oracle to # add sequence.nextval to it. Sequence.nextval cannot be used with the # UNION operator. To prevent incorrect SQL, move UNION to a subquery. return 'SELECT * FROM (%s)' % ' UNION ALL '.join(query) def subtract_temporals(self, internal_type, lhs, rhs): if internal_type == 'DateField': lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs params = (*lhs_params, *rhs_params) return "NUMTODSINTERVAL(TO_NUMBER(%s - %s), 'DAY')" % (lhs_sql, rhs_sql), params return super().subtract_temporals(internal_type, lhs, rhs) def bulk_batch_size(self, fields, objs): """Oracle restricts the number of parameters in a query.""" if fields: return self.connection.features.max_query_params // len(fields) return len(objs) def conditional_expression_supported_in_where_clause(self, expression): """ Oracle supports only EXISTS(...) or filters in the WHERE clause, others must be compared with True. """ if isinstance(expression, (Exists, WhereNode)): return True if isinstance(expression, ExpressionWrapper) and expression.conditional: return self.conditional_expression_supported_in_where_clause(expression.expression) if isinstance(expression, RawSQL) and expression.conditional: return True return False
10fd573c22ae727903601a78bbd2477a95f869fa3d073ab559941dcedc6fb247
from django.db import ProgrammingError from django.utils.functional import cached_property class BaseDatabaseFeatures: gis_enabled = False allows_group_by_pk = False allows_group_by_selected_pks = False empty_fetchmany_value = [] update_can_self_select = True # Does the backend distinguish between '' and None? interprets_empty_strings_as_nulls = False # Does the backend allow inserting duplicate NULL rows in a nullable # unique field? All core backends implement this correctly, but other # databases such as SQL Server do not. supports_nullable_unique_constraints = True # Does the backend allow inserting duplicate rows when a unique_together # constraint exists and some fields are nullable but not all of them? supports_partially_nullable_unique_constraints = True # Does the backend support initially deferrable unique constraints? supports_deferrable_unique_constraints = False can_use_chunked_reads = True can_return_columns_from_insert = False can_return_rows_from_bulk_insert = False has_bulk_insert = True uses_savepoints = True can_release_savepoints = False # If True, don't use integer foreign keys referring to, e.g., positive # integer primary keys. related_fields_match_type = False allow_sliced_subqueries_with_in = True has_select_for_update = False has_select_for_update_nowait = False has_select_for_update_skip_locked = False has_select_for_update_of = False has_select_for_no_key_update = False # Does the database's SELECT FOR UPDATE OF syntax require a column rather # than a table? select_for_update_of_column = False # Does the default test database allow multiple connections? # Usually an indication that the test database is in-memory test_db_allows_multiple_connections = True # Can an object be saved without an explicit primary key? supports_unspecified_pk = False # Can a fixture contain forward references? i.e., are # FK constraints checked at the end of transaction, or # at the end of each save operation? supports_forward_references = True # Does the backend truncate names properly when they are too long? truncates_names = False # Is there a REAL datatype in addition to floats/doubles? has_real_datatype = False supports_subqueries_in_group_by = True # Is there a true datatype for uuid? has_native_uuid_field = False # Is there a true datatype for timedeltas? has_native_duration_field = False # Does the database driver supports same type temporal data subtraction # by returning the type used to store duration field? supports_temporal_subtraction = False # Does the __regex lookup support backreferencing and grouping? supports_regex_backreferencing = True # Can date/datetime lookups be performed using a string? supports_date_lookup_using_string = True # Can datetimes with timezones be used? supports_timezones = True # Does the database have a copy of the zoneinfo database? has_zoneinfo_database = True # When performing a GROUP BY, is an ORDER BY NULL required # to remove any ordering? requires_explicit_null_ordering_when_grouping = False # Does the backend order NULL values as largest or smallest? nulls_order_largest = False # Does the backend support NULLS FIRST and NULLS LAST in ORDER BY? supports_order_by_nulls_modifier = True # Does the backend orders NULLS FIRST by default? order_by_nulls_first = False # The database's limit on the number of query parameters. max_query_params = None # Can an object have an autoincrement primary key of 0? allows_auto_pk_0 = True # Do we need to NULL a ForeignKey out, or can the constraint check be # deferred can_defer_constraint_checks = False # date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas supports_mixed_date_datetime_comparisons = True # Does the backend support tablespaces? Default to False because it isn't # in the SQL standard. supports_tablespaces = False # Does the backend reset sequences between tests? supports_sequence_reset = True # Can the backend introspect the default value of a column? can_introspect_default = True # Confirm support for introspected foreign keys # Every database can do this reliably, except MySQL, # which can't do it for MyISAM tables can_introspect_foreign_keys = True # Map fields which some backends may not be able to differentiate to the # field it's introspected as. introspected_field_types = { 'AutoField': 'AutoField', 'BigAutoField': 'BigAutoField', 'BigIntegerField': 'BigIntegerField', 'BinaryField': 'BinaryField', 'BooleanField': 'BooleanField', 'CharField': 'CharField', 'DurationField': 'DurationField', 'GenericIPAddressField': 'GenericIPAddressField', 'IntegerField': 'IntegerField', 'PositiveBigIntegerField': 'PositiveBigIntegerField', 'PositiveIntegerField': 'PositiveIntegerField', 'PositiveSmallIntegerField': 'PositiveSmallIntegerField', 'SmallAutoField': 'SmallAutoField', 'SmallIntegerField': 'SmallIntegerField', 'TimeField': 'TimeField', } # Can the backend introspect the column order (ASC/DESC) for indexes? supports_index_column_ordering = True # Does the backend support introspection of materialized views? can_introspect_materialized_views = False # Support for the DISTINCT ON clause can_distinct_on_fields = False # Does the backend prevent running SQL queries in broken transactions? atomic_transactions = True # Can we roll back DDL in a transaction? can_rollback_ddl = False # Does it support operations requiring references rename in a transaction? supports_atomic_references_rename = True # Can we issue more than one ALTER COLUMN clause in an ALTER TABLE? supports_combined_alters = False # Does it support foreign keys? supports_foreign_keys = True # Can it create foreign key constraints inline when adding columns? can_create_inline_fk = True # Does it support CHECK constraints? supports_column_check_constraints = True supports_table_check_constraints = True # Does the backend support introspection of CHECK constraints? can_introspect_check_constraints = True # Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value}) # parameter passing? Note this can be provided by the backend even if not # supported by the Python driver supports_paramstyle_pyformat = True # Does the backend require literal defaults, rather than parameterized ones? requires_literal_defaults = False # Does the backend require a connection reset after each material schema change? connection_persists_old_columns = False # What kind of error does the backend throw when accessing closed cursor? closed_cursor_error_class = ProgrammingError # Does 'a' LIKE 'A' match? has_case_insensitive_like = True # Suffix for backends that don't support "SELECT xxx;" queries. bare_select_suffix = '' # If NULL is implied on columns without needing to be explicitly specified implied_column_null = False # Does the backend support "select for update" queries with limit (and offset)? supports_select_for_update_with_limit = True # Does the backend ignore null expressions in GREATEST and LEAST queries unless # every expression is null? greatest_least_ignores_nulls = False # Can the backend clone databases for parallel test execution? # Defaults to False to allow third-party backends to opt-in. can_clone_databases = False # Does the backend consider table names with different casing to # be equal? ignores_table_name_case = False # Place FOR UPDATE right after FROM clause. Used on MSSQL. for_update_after_from = False # Combinatorial flags supports_select_union = True supports_select_intersection = True supports_select_difference = True supports_slicing_ordering_in_compound = False supports_parentheses_in_compound = True # Does the database support SQL 2003 FILTER (WHERE ...) in aggregate # expressions? supports_aggregate_filter_clause = False # Does the backend support indexing a TextField? supports_index_on_text_field = True # Does the backend support window expressions (expression OVER (...))? supports_over_clause = False supports_frame_range_fixed_distance = False only_supports_unbounded_with_preceding_and_following = False # Does the backend support CAST with precision? supports_cast_with_precision = True # How many second decimals does the database return when casting a value to # a type with time? time_cast_precision = 6 # SQL to create a procedure for use by the Django test suite. The # functionality of the procedure isn't important. create_test_procedure_without_params_sql = None create_test_procedure_with_int_param_sql = None # Does the backend support keyword parameters for cursor.callproc()? supports_callproc_kwargs = False # What formats does the backend EXPLAIN syntax support? supported_explain_formats = set() # Does DatabaseOperations.explain_query_prefix() raise ValueError if # unknown kwargs are passed to QuerySet.explain()? validates_explain_options = True # Does the backend support the default parameter in lead() and lag()? supports_default_in_lead_lag = True # Does the backend support ignoring constraint or uniqueness errors during # INSERT? supports_ignore_conflicts = True # Does this backend require casting the results of CASE expressions used # in UPDATE statements to ensure the expression has the correct type? requires_casted_case_in_updates = False # Does the backend support partial indexes (CREATE INDEX ... WHERE ...)? supports_partial_indexes = True supports_functions_in_partial_indexes = True # Does the backend support covering indexes (CREATE INDEX ... INCLUDE ...)? supports_covering_indexes = False # Does the database allow more than one constraint or index on the same # field(s)? allows_multiple_constraints_on_same_fields = True # Does the backend support boolean expressions in SELECT and GROUP BY # clauses? supports_boolean_expr_in_select_clause = True # Does the backend support JSONField? supports_json_field = True # Can the backend introspect a JSONField? can_introspect_json_field = True # Does the backend support primitives in JSONField? supports_primitives_in_json_field = True # Is there a true datatype for JSON? has_native_json_field = False # Does the backend use PostgreSQL-style JSON operators like '->'? has_json_operators = False def __init__(self, connection): self.connection = connection @cached_property def supports_explaining_query_execution(self): """Does this backend support explaining query execution?""" return self.connection.ops.explain_prefix is not None @cached_property def supports_transactions(self): """Confirm support for transactions.""" with self.connection.cursor() as cursor: cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)') self.connection.set_autocommit(False) cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)') self.connection.rollback() self.connection.set_autocommit(True) cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST') count, = cursor.fetchone() cursor.execute('DROP TABLE ROLLBACK_TEST') return count == 0 def allows_group_by_selected_pks_on_model(self, model): if not self.allows_group_by_selected_pks: return False return model._meta.managed
7f060185ce45a65934deb960c726465898318e5f5bac3bcf05bd8e31a9349fb8
import operator from django.db.backends.base.features import BaseDatabaseFeatures from django.utils.functional import cached_property class DatabaseFeatures(BaseDatabaseFeatures): empty_fetchmany_value = () allows_group_by_pk = True related_fields_match_type = True # MySQL doesn't support sliced subqueries with IN/ALL/ANY/SOME. allow_sliced_subqueries_with_in = False has_select_for_update = True supports_forward_references = False supports_regex_backreferencing = False supports_date_lookup_using_string = False supports_index_column_ordering = False supports_timezones = False requires_explicit_null_ordering_when_grouping = True can_release_savepoints = True atomic_transactions = False can_clone_databases = True supports_temporal_subtraction = True supports_select_intersection = False supports_select_difference = False supports_slicing_ordering_in_compound = True supports_index_on_text_field = False has_case_insensitive_like = False create_test_procedure_without_params_sql = """ CREATE PROCEDURE test_procedure () BEGIN DECLARE V_I INTEGER; SET V_I = 1; END; """ create_test_procedure_with_int_param_sql = """ CREATE PROCEDURE test_procedure (P_I INTEGER) BEGIN DECLARE V_I INTEGER; SET V_I = P_I; END; """ # Neither MySQL nor MariaDB support partial indexes. supports_partial_indexes = False supports_order_by_nulls_modifier = False order_by_nulls_first = True @cached_property def _mysql_storage_engine(self): "Internal method used in Django tests. Don't rely on this from your code" return self.connection.mysql_server_data['default_storage_engine'] @cached_property def allows_auto_pk_0(self): """ Autoincrement primary key can be set to 0 if it doesn't generate new autoincrement values. """ return 'NO_AUTO_VALUE_ON_ZERO' in self.connection.sql_mode @cached_property def update_can_self_select(self): return self.connection.mysql_is_mariadb and self.connection.mysql_version >= (10, 3, 2) @cached_property def can_introspect_foreign_keys(self): "Confirm support for introspected foreign keys" return self._mysql_storage_engine != 'MyISAM' @cached_property def introspected_field_types(self): return { **super().introspected_field_types, 'BinaryField': 'TextField', 'BooleanField': 'IntegerField', 'DurationField': 'BigIntegerField', 'GenericIPAddressField': 'CharField', } @cached_property def can_return_columns_from_insert(self): return self.connection.mysql_is_mariadb and self.connection.mysql_version >= (10, 5, 0) can_return_rows_from_bulk_insert = property(operator.attrgetter('can_return_columns_from_insert')) @cached_property def has_zoneinfo_database(self): return self.connection.mysql_server_data['has_zoneinfo_database'] @cached_property def is_sql_auto_is_null_enabled(self): return self.connection.mysql_server_data['sql_auto_is_null'] @cached_property def supports_over_clause(self): if self.connection.mysql_is_mariadb: return True return self.connection.mysql_version >= (8, 0, 2) supports_frame_range_fixed_distance = property(operator.attrgetter('supports_over_clause')) @cached_property def supports_column_check_constraints(self): if self.connection.mysql_is_mariadb: return self.connection.mysql_version >= (10, 2, 1) return self.connection.mysql_version >= (8, 0, 16) supports_table_check_constraints = property(operator.attrgetter('supports_column_check_constraints')) @cached_property def can_introspect_check_constraints(self): if self.connection.mysql_is_mariadb: version = self.connection.mysql_version return (version >= (10, 2, 22) and version < (10, 3)) or version >= (10, 3, 10) return self.connection.mysql_version >= (8, 0, 16) @cached_property def has_select_for_update_skip_locked(self): return not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (8, 0, 1) @cached_property def has_select_for_update_nowait(self): if self.connection.mysql_is_mariadb: return self.connection.mysql_version >= (10, 3, 0) return self.connection.mysql_version >= (8, 0, 1) @cached_property def has_select_for_update_of(self): return not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (8, 0, 1) @cached_property def supports_explain_analyze(self): return self.connection.mysql_is_mariadb or self.connection.mysql_version >= (8, 0, 18) @cached_property def supported_explain_formats(self): # Alias MySQL's TRADITIONAL to TEXT for consistency with other # backends. formats = {'JSON', 'TEXT', 'TRADITIONAL'} if not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (8, 0, 16): formats.add('TREE') return formats @cached_property def supports_transactions(self): """ All storage engines except MyISAM support transactions. """ return self._mysql_storage_engine != 'MyISAM' @cached_property def ignores_table_name_case(self): return self.connection.mysql_server_data['lower_case_table_names'] @cached_property def supports_default_in_lead_lag(self): # To be added in https://jira.mariadb.org/browse/MDEV-12981. return not self.connection.mysql_is_mariadb @cached_property def supports_json_field(self): if self.connection.mysql_is_mariadb: return self.connection.mysql_version >= (10, 2, 7) return self.connection.mysql_version >= (5, 7, 8) @cached_property def can_introspect_json_field(self): if self.connection.mysql_is_mariadb: return self.supports_json_field and self.can_introspect_check_constraints return self.supports_json_field
1df21ed6c35401e4e2cd447550aa2ccf9cdcf06cae9b4634c6c448d2776719c3
import uuid from django.conf import settings from django.db.backends.base.operations import BaseDatabaseOperations from django.utils import timezone from django.utils.encoding import force_str class DatabaseOperations(BaseDatabaseOperations): compiler_module = "django.db.backends.mysql.compiler" # MySQL stores positive fields as UNSIGNED ints. integer_field_ranges = { **BaseDatabaseOperations.integer_field_ranges, 'PositiveSmallIntegerField': (0, 65535), 'PositiveIntegerField': (0, 4294967295), 'PositiveBigIntegerField': (0, 18446744073709551615), } cast_data_types = { 'AutoField': 'signed integer', 'BigAutoField': 'signed integer', 'SmallAutoField': 'signed integer', 'CharField': 'char(%(max_length)s)', 'DecimalField': 'decimal(%(max_digits)s, %(decimal_places)s)', 'TextField': 'char', 'IntegerField': 'signed integer', 'BigIntegerField': 'signed integer', 'SmallIntegerField': 'signed integer', 'PositiveBigIntegerField': 'unsigned integer', 'PositiveIntegerField': 'unsigned integer', 'PositiveSmallIntegerField': 'unsigned integer', } cast_char_field_without_max_length = 'char' explain_prefix = 'EXPLAIN' def date_extract_sql(self, lookup_type, field_name): # https://dev.mysql.com/doc/mysql/en/date-and-time-functions.html if lookup_type == 'week_day': # DAYOFWEEK() returns an integer, 1-7, Sunday=1. return "DAYOFWEEK(%s)" % field_name elif lookup_type == 'iso_week_day': # WEEKDAY() returns an integer, 0-6, Monday=0. return "WEEKDAY(%s) + 1" % field_name elif lookup_type == 'week': # Override the value of default_week_format for consistency with # other database backends. # Mode 3: Monday, 1-53, with 4 or more days this year. return "WEEK(%s, 3)" % field_name elif lookup_type == 'iso_year': # Get the year part from the YEARWEEK function, which returns a # number as year * 100 + week. return "TRUNCATE(YEARWEEK(%s, 3), -2) / 100" % field_name else: # EXTRACT returns 1-53 based on ISO-8601 for the week number. return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name) def date_trunc_sql(self, lookup_type, field_name): fields = { 'year': '%%Y-01-01', 'month': '%%Y-%%m-01', } # Use double percents to escape. if lookup_type in fields: format_str = fields[lookup_type] return "CAST(DATE_FORMAT(%s, '%s') AS DATE)" % (field_name, format_str) elif lookup_type == 'quarter': return "MAKEDATE(YEAR(%s), 1) + INTERVAL QUARTER(%s) QUARTER - INTERVAL 1 QUARTER" % ( field_name, field_name ) elif lookup_type == 'week': return "DATE_SUB(%s, INTERVAL WEEKDAY(%s) DAY)" % ( field_name, field_name ) else: return "DATE(%s)" % (field_name) def _prepare_tzname_delta(self, tzname): if '+' in tzname: return tzname[tzname.find('+'):] elif '-' in tzname: return tzname[tzname.find('-'):] return tzname def _convert_field_to_tz(self, field_name, tzname): if settings.USE_TZ and self.connection.timezone_name != tzname: field_name = "CONVERT_TZ(%s, '%s', '%s')" % ( field_name, self.connection.timezone_name, self._prepare_tzname_delta(tzname), ) return field_name def datetime_cast_date_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return "DATE(%s)" % field_name def datetime_cast_time_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return "TIME(%s)" % field_name def datetime_extract_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return self.date_extract_sql(lookup_type, field_name) def datetime_trunc_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) fields = ['year', 'month', 'day', 'hour', 'minute', 'second'] format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape. format_def = ('0000-', '01', '-01', ' 00:', '00', ':00') if lookup_type == 'quarter': return ( "CAST(DATE_FORMAT(MAKEDATE(YEAR({field_name}), 1) + " "INTERVAL QUARTER({field_name}) QUARTER - " + "INTERVAL 1 QUARTER, '%%Y-%%m-01 00:00:00') AS DATETIME)" ).format(field_name=field_name) if lookup_type == 'week': return ( "CAST(DATE_FORMAT(DATE_SUB({field_name}, " "INTERVAL WEEKDAY({field_name}) DAY), " "'%%Y-%%m-%%d 00:00:00') AS DATETIME)" ).format(field_name=field_name) try: i = fields.index(lookup_type) + 1 except ValueError: sql = field_name else: format_str = ''.join(format[:i] + format_def[i:]) sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str) return sql def time_trunc_sql(self, lookup_type, field_name): fields = { 'hour': '%%H:00:00', 'minute': '%%H:%%i:00', 'second': '%%H:%%i:%%s', } # Use double percents to escape. if lookup_type in fields: format_str = fields[lookup_type] return "CAST(DATE_FORMAT(%s, '%s') AS TIME)" % (field_name, format_str) else: return "TIME(%s)" % (field_name) def fetch_returned_insert_rows(self, cursor): """ Given a cursor object that has just performed an INSERT...RETURNING statement into a table, return the tuple of returned data. """ return cursor.fetchall() def format_for_duration_arithmetic(self, sql): return 'INTERVAL %s MICROSECOND' % sql def force_no_ordering(self): """ "ORDER BY NULL" prevents MySQL from implicitly ordering by grouped columns. If no ordering would otherwise be applied, we don't want any implicit sorting going on. """ return [(None, ("NULL", [], False))] def last_executed_query(self, cursor, sql, params): # With MySQLdb, cursor objects have an (undocumented) "_executed" # attribute where the exact query sent to the database is saved. # See MySQLdb/cursors.py in the source distribution. # MySQLdb returns string, PyMySQL bytes. return force_str(getattr(cursor, '_executed', None), errors='replace') def no_limit_value(self): # 2**64 - 1, as recommended by the MySQL documentation return 18446744073709551615 def quote_name(self, name): if name.startswith("`") and name.endswith("`"): return name # Quoting once is enough. return "`%s`" % name def random_function_sql(self): return 'RAND()' def return_insert_columns(self, fields): # MySQL and MariaDB < 10.5.0 don't support an INSERT...RETURNING # statement. if not fields: return '', () columns = [ '%s.%s' % ( self.quote_name(field.model._meta.db_table), self.quote_name(field.column), ) for field in fields ] return 'RETURNING %s' % ', '.join(columns), () def sql_flush(self, style, tables, *, reset_sequences=False, allow_cascade=False): if not tables: return [] sql = ['SET FOREIGN_KEY_CHECKS = 0;'] if reset_sequences: # It's faster to TRUNCATE tables that require a sequence reset # since ALTER TABLE AUTO_INCREMENT is slower than TRUNCATE. sql.extend( '%s %s;' % ( style.SQL_KEYWORD('TRUNCATE'), style.SQL_FIELD(self.quote_name(table_name)), ) for table_name in tables ) else: # Otherwise issue a simple DELETE since it's faster than TRUNCATE # and preserves sequences. sql.extend( '%s %s %s;' % ( style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'), style.SQL_FIELD(self.quote_name(table_name)), ) for table_name in tables ) sql.append('SET FOREIGN_KEY_CHECKS = 1;') return sql def sequence_reset_by_name_sql(self, style, sequences): return [ '%s %s %s %s = 1;' % ( style.SQL_KEYWORD('ALTER'), style.SQL_KEYWORD('TABLE'), style.SQL_FIELD(self.quote_name(sequence_info['table'])), style.SQL_FIELD('AUTO_INCREMENT'), ) for sequence_info in sequences ] def validate_autopk_value(self, value): # Zero in AUTO_INCREMENT field does not work without the # NO_AUTO_VALUE_ON_ZERO SQL mode. if value == 0 and not self.connection.features.allows_auto_pk_0: raise ValueError('The database backend does not accept 0 as a ' 'value for AutoField.') return value def adapt_datetimefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # MySQL doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.") return str(value) def adapt_timefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # MySQL doesn't support tz-aware times if timezone.is_aware(value): raise ValueError("MySQL backend does not support timezone-aware times.") return str(value) def max_name_length(self): return 64 def pk_default_value(self): return 'NULL' def bulk_insert_sql(self, fields, placeholder_rows): placeholder_rows_sql = (", ".join(row) for row in placeholder_rows) values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql) return "VALUES " + values_sql def combine_expression(self, connector, sub_expressions): if connector == '^': return 'POW(%s)' % ','.join(sub_expressions) # Convert the result to a signed integer since MySQL's binary operators # return an unsigned integer. elif connector in ('&', '|', '<<', '#'): connector = '^' if connector == '#' else connector return 'CONVERT(%s, SIGNED)' % connector.join(sub_expressions) elif connector == '>>': lhs, rhs = sub_expressions return 'FLOOR(%(lhs)s / POW(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs} return super().combine_expression(connector, sub_expressions) def get_db_converters(self, expression): converters = super().get_db_converters(expression) internal_type = expression.output_field.get_internal_type() if internal_type in ['BooleanField', 'NullBooleanField']: converters.append(self.convert_booleanfield_value) elif internal_type == 'DateTimeField': if settings.USE_TZ: converters.append(self.convert_datetimefield_value) elif internal_type == 'UUIDField': converters.append(self.convert_uuidfield_value) return converters def convert_booleanfield_value(self, value, expression, connection): if value in (0, 1): value = bool(value) return value def convert_datetimefield_value(self, value, expression, connection): if value is not None: value = timezone.make_aware(value, self.connection.timezone) return value def convert_uuidfield_value(self, value, expression, connection): if value is not None: value = uuid.UUID(value) return value def binary_placeholder_sql(self, value): return '_binary %s' if value is not None and not hasattr(value, 'as_sql') else '%s' def subtract_temporals(self, internal_type, lhs, rhs): lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs if internal_type == 'TimeField': if self.connection.mysql_is_mariadb: # MariaDB includes the microsecond component in TIME_TO_SEC as # a decimal. MySQL returns an integer without microseconds. return 'CAST((TIME_TO_SEC(%(lhs)s) - TIME_TO_SEC(%(rhs)s)) * 1000000 AS SIGNED)' % { 'lhs': lhs_sql, 'rhs': rhs_sql }, (*lhs_params, *rhs_params) return ( "((TIME_TO_SEC(%(lhs)s) * 1000000 + MICROSECOND(%(lhs)s)) -" " (TIME_TO_SEC(%(rhs)s) * 1000000 + MICROSECOND(%(rhs)s)))" ) % {'lhs': lhs_sql, 'rhs': rhs_sql}, tuple(lhs_params) * 2 + tuple(rhs_params) * 2 params = (*rhs_params, *lhs_params) return "TIMESTAMPDIFF(MICROSECOND, %s, %s)" % (rhs_sql, lhs_sql), params def explain_query_prefix(self, format=None, **options): # Alias MySQL's TRADITIONAL to TEXT for consistency with other backends. if format and format.upper() == 'TEXT': format = 'TRADITIONAL' elif not format and 'TREE' in self.connection.features.supported_explain_formats: # Use TREE by default (if supported) as it's more informative. format = 'TREE' analyze = options.pop('analyze', False) prefix = super().explain_query_prefix(format, **options) if analyze and self.connection.features.supports_explain_analyze: # MariaDB uses ANALYZE instead of EXPLAIN ANALYZE. prefix = 'ANALYZE' if self.connection.mysql_is_mariadb else prefix + ' ANALYZE' if format and not (analyze and not self.connection.mysql_is_mariadb): # Only MariaDB supports the analyze option with formats. prefix += ' FORMAT=%s' % format return prefix def regex_lookup(self, lookup_type): # REGEXP BINARY doesn't work correctly in MySQL 8+ and REGEXP_LIKE # doesn't exist in MySQL 5.x or in MariaDB. if self.connection.mysql_version < (8, 0, 0) or self.connection.mysql_is_mariadb: if lookup_type == 'regex': return '%s REGEXP BINARY %s' return '%s REGEXP %s' match_option = 'c' if lookup_type == 'regex' else 'i' return "REGEXP_LIKE(%%s, %%s, '%s')" % match_option def insert_statement(self, ignore_conflicts=False): return 'INSERT IGNORE INTO' if ignore_conflicts else super().insert_statement(ignore_conflicts) def lookup_cast(self, lookup_type, internal_type=None): lookup = '%s' if internal_type == 'JSONField': if self.connection.mysql_is_mariadb or lookup_type in ( 'iexact', 'contains', 'icontains', 'startswith', 'istartswith', 'endswith', 'iendswith', 'regex', 'iregex', ): lookup = 'JSON_UNQUOTE(%s)' return lookup
0961b94da0d1e7a170bac31e75806207c882a30b7c4967fb8e1a2e8c4537b248
from psycopg2.extras import Inet from django.conf import settings from django.db.backends.base.operations import BaseDatabaseOperations class DatabaseOperations(BaseDatabaseOperations): cast_char_field_without_max_length = 'varchar' explain_prefix = 'EXPLAIN' cast_data_types = { 'AutoField': 'integer', 'BigAutoField': 'bigint', 'SmallAutoField': 'smallint', } def unification_cast_sql(self, output_field): internal_type = output_field.get_internal_type() if internal_type in ("GenericIPAddressField", "IPAddressField", "TimeField", "UUIDField"): # PostgreSQL will resolve a union as type 'text' if input types are # 'unknown'. # https://www.postgresql.org/docs/current/typeconv-union-case.html # These fields cannot be implicitly cast back in the default # PostgreSQL configuration so we need to explicitly cast them. # We must also remove components of the type within brackets: # varchar(255) -> varchar. return 'CAST(%%s AS %s)' % output_field.db_type(self.connection).split('(')[0] return '%s' def date_extract_sql(self, lookup_type, field_name): # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT if lookup_type == 'week_day': # For consistency across backends, we return Sunday=1, Saturday=7. return "EXTRACT('dow' FROM %s) + 1" % field_name elif lookup_type == 'iso_week_day': return "EXTRACT('isodow' FROM %s)" % field_name elif lookup_type == 'iso_year': return "EXTRACT('isoyear' FROM %s)" % field_name else: return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name) def date_trunc_sql(self, lookup_type, field_name): # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name) def _prepare_tzname_delta(self, tzname): if '+' in tzname: return tzname.replace('+', '-') elif '-' in tzname: return tzname.replace('-', '+') return tzname def _convert_field_to_tz(self, field_name, tzname): if settings.USE_TZ: field_name = "%s AT TIME ZONE '%s'" % (field_name, self._prepare_tzname_delta(tzname)) return field_name def datetime_cast_date_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return '(%s)::date' % field_name def datetime_cast_time_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return '(%s)::time' % field_name def datetime_extract_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return self.date_extract_sql(lookup_type, field_name) def datetime_trunc_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) # https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name) def time_trunc_sql(self, lookup_type, field_name): return "DATE_TRUNC('%s', %s)::time" % (lookup_type, field_name) def json_cast_text_sql(self, field_name): return '(%s)::text' % field_name def deferrable_sql(self): return " DEFERRABLE INITIALLY DEFERRED" def fetch_returned_insert_rows(self, cursor): """ Given a cursor object that has just performed an INSERT...RETURNING statement into a table, return the tuple of returned data. """ return cursor.fetchall() def lookup_cast(self, lookup_type, internal_type=None): lookup = '%s' # Cast text lookups to text to allow things like filter(x__contains=4) if lookup_type in ('iexact', 'contains', 'icontains', 'startswith', 'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'): if internal_type in ('IPAddressField', 'GenericIPAddressField'): lookup = "HOST(%s)" elif internal_type in ('CICharField', 'CIEmailField', 'CITextField'): lookup = '%s::citext' else: lookup = "%s::text" # Use UPPER(x) for case-insensitive lookups; it's faster. if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'): lookup = 'UPPER(%s)' % lookup return lookup def no_limit_value(self): return None def prepare_sql_script(self, sql): return [sql] def quote_name(self, name): if name.startswith('"') and name.endswith('"'): return name # Quoting once is enough. return '"%s"' % name def set_time_zone_sql(self): return "SET TIME ZONE %s" def sql_flush(self, style, tables, *, reset_sequences=False, allow_cascade=False): if not tables: return [] # Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows us # to truncate tables referenced by a foreign key in any other table. sql_parts = [ style.SQL_KEYWORD('TRUNCATE'), ', '.join(style.SQL_FIELD(self.quote_name(table)) for table in tables), ] if reset_sequences: sql_parts.append(style.SQL_KEYWORD('RESTART IDENTITY')) if allow_cascade: sql_parts.append(style.SQL_KEYWORD('CASCADE')) return ['%s;' % ' '.join(sql_parts)] def sequence_reset_by_name_sql(self, style, sequences): # 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements # to reset sequence indices sql = [] for sequence_info in sequences: table_name = sequence_info['table'] # 'id' will be the case if it's an m2m using an autogenerated # intermediate table (see BaseDatabaseIntrospection.sequence_list). column_name = sequence_info['column'] or 'id' sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" % ( style.SQL_KEYWORD('SELECT'), style.SQL_TABLE(self.quote_name(table_name)), style.SQL_FIELD(column_name), )) return sql def tablespace_sql(self, tablespace, inline=False): if inline: return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace) else: return "TABLESPACE %s" % self.quote_name(tablespace) def sequence_reset_sql(self, style, model_list): from django.db import models output = [] qn = self.quote_name for model in model_list: # Use `coalesce` to set the sequence for each model to the max pk value if there are records, # or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true # if there are records (as the max pk value is already in use), otherwise set it to false. # Use pg_get_serial_sequence to get the underlying sequence name from the table name # and column name (available since PostgreSQL 8) for f in model._meta.local_fields: if isinstance(f, models.AutoField): output.append( "%s setval(pg_get_serial_sequence('%s','%s'), " "coalesce(max(%s), 1), max(%s) %s null) %s %s;" % ( style.SQL_KEYWORD('SELECT'), style.SQL_TABLE(qn(model._meta.db_table)), style.SQL_FIELD(f.column), style.SQL_FIELD(qn(f.column)), style.SQL_FIELD(qn(f.column)), style.SQL_KEYWORD('IS NOT'), style.SQL_KEYWORD('FROM'), style.SQL_TABLE(qn(model._meta.db_table)), ) ) break # Only one AutoField is allowed per model, so don't bother continuing. return output def prep_for_iexact_query(self, x): return x def max_name_length(self): """ Return the maximum length of an identifier. The maximum length of an identifier is 63 by default, but can be changed by recompiling PostgreSQL after editing the NAMEDATALEN macro in src/include/pg_config_manual.h. This implementation returns 63, but can be overridden by a custom database backend that inherits most of its behavior from this one. """ return 63 def distinct_sql(self, fields, params): if fields: params = [param for param_list in params for param in param_list] return (['DISTINCT ON (%s)' % ', '.join(fields)], params) else: return ['DISTINCT'], [] def last_executed_query(self, cursor, sql, params): # https://www.psycopg.org/docs/cursor.html#cursor.query # The query attribute is a Psycopg extension to the DB API 2.0. if cursor.query is not None: return cursor.query.decode() return None def return_insert_columns(self, fields): if not fields: return '', () columns = [ '%s.%s' % ( self.quote_name(field.model._meta.db_table), self.quote_name(field.column), ) for field in fields ] return 'RETURNING %s' % ', '.join(columns), () def bulk_insert_sql(self, fields, placeholder_rows): placeholder_rows_sql = (", ".join(row) for row in placeholder_rows) values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql) return "VALUES " + values_sql def adapt_datefield_value(self, value): return value def adapt_datetimefield_value(self, value): return value def adapt_timefield_value(self, value): return value def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None): return value def adapt_ipaddressfield_value(self, value): if value: return Inet(value) return None def subtract_temporals(self, internal_type, lhs, rhs): if internal_type == 'DateField': lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs params = (*lhs_params, *rhs_params) return "(interval '1 day' * (%s - %s))" % (lhs_sql, rhs_sql), params return super().subtract_temporals(internal_type, lhs, rhs) def explain_query_prefix(self, format=None, **options): prefix = super().explain_query_prefix(format) extra = {} if format: extra['FORMAT'] = format if options: extra.update({ name.upper(): 'true' if value else 'false' for name, value in options.items() }) if extra: prefix += ' (%s)' % ', '.join('%s %s' % i for i in extra.items()) return prefix def ignore_conflicts_suffix_sql(self, ignore_conflicts=None): return 'ON CONFLICT DO NOTHING' if ignore_conflicts else super().ignore_conflicts_suffix_sql(ignore_conflicts)
996c39c7fb030806ceb469403e95effb1b6472ef07bc427cbc57871951e9280f
import psycopg2 from django.db.backends.base.schema import BaseDatabaseSchemaEditor from django.db.backends.ddl_references import IndexColumns from django.db.backends.utils import strip_quotes class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): sql_create_sequence = "CREATE SEQUENCE %(sequence)s" sql_delete_sequence = "DROP SEQUENCE IF EXISTS %(sequence)s CASCADE" sql_set_sequence_max = "SELECT setval('%(sequence)s', MAX(%(column)s)) FROM %(table)s" sql_set_sequence_owner = 'ALTER SEQUENCE %(sequence)s OWNED BY %(table)s.%(column)s' sql_create_index = ( 'CREATE INDEX %(name)s ON %(table)s%(using)s ' '(%(columns)s)%(include)s%(extra)s%(condition)s' ) sql_create_index_concurrently = ( 'CREATE INDEX CONCURRENTLY %(name)s ON %(table)s%(using)s ' '(%(columns)s)%(include)s%(extra)s%(condition)s' ) sql_delete_index = "DROP INDEX IF EXISTS %(name)s" sql_delete_index_concurrently = "DROP INDEX CONCURRENTLY IF EXISTS %(name)s" # Setting the constraint to IMMEDIATE to allow changing data in the same # transaction. sql_create_column_inline_fk = ( 'CONSTRAINT %(name)s REFERENCES %(to_table)s(%(to_column)s)%(deferrable)s' '; SET CONSTRAINTS %(namespace)s%(name)s IMMEDIATE' ) # Setting the constraint to IMMEDIATE runs any deferred checks to allow # dropping it in the same transaction. sql_delete_fk = "SET CONSTRAINTS %(name)s IMMEDIATE; ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" sql_delete_procedure = 'DROP FUNCTION %(procedure)s(%(param_types)s)' def quote_value(self, value): if isinstance(value, str): value = value.replace('%', '%%') adapted = psycopg2.extensions.adapt(value) if hasattr(adapted, 'encoding'): adapted.encoding = 'utf8' # getquoted() returns a quoted bytestring of the adapted value. return adapted.getquoted().decode() def _field_indexes_sql(self, model, field): output = super()._field_indexes_sql(model, field) like_index_statement = self._create_like_index_sql(model, field) if like_index_statement is not None: output.append(like_index_statement) return output def _field_data_type(self, field): if field.is_relation: return field.rel_db_type(self.connection) return self.connection.data_types.get( field.get_internal_type(), field.db_type(self.connection), ) def _field_base_data_types(self, field): # Yield base data types for array fields. if field.base_field.get_internal_type() == 'ArrayField': yield from self._field_base_data_types(field.base_field) else: yield self._field_data_type(field.base_field) def _create_like_index_sql(self, model, field): """ Return the statement to create an index with varchar operator pattern when the column type is 'varchar' or 'text', otherwise return None. """ db_type = field.db_type(connection=self.connection) if db_type is not None and (field.db_index or field.unique): # Fields with database column types of `varchar` and `text` need # a second index that specifies their operator class, which is # needed when performing correct LIKE queries outside the # C locale. See #12234. # # The same doesn't apply to array fields such as varchar[size] # and text[size], so skip them. if '[' in db_type: return None if db_type.startswith('varchar'): return self._create_index_sql(model, [field], suffix='_like', opclasses=['varchar_pattern_ops']) elif db_type.startswith('text'): return self._create_index_sql(model, [field], suffix='_like', opclasses=['text_pattern_ops']) return None def _alter_column_type_sql(self, model, old_field, new_field, new_type): self.sql_alter_column_type = 'ALTER COLUMN %(column)s TYPE %(type)s' # Cast when data type changed. using_sql = ' USING %(column)s::%(type)s' new_internal_type = new_field.get_internal_type() old_internal_type = old_field.get_internal_type() if new_internal_type == 'ArrayField' and new_internal_type == old_internal_type: # Compare base data types for array fields. if list(self._field_base_data_types(old_field)) != list(self._field_base_data_types(new_field)): self.sql_alter_column_type += using_sql elif self._field_data_type(old_field) != self._field_data_type(new_field): self.sql_alter_column_type += using_sql # Make ALTER TYPE with SERIAL make sense. table = strip_quotes(model._meta.db_table) serial_fields_map = {'bigserial': 'bigint', 'serial': 'integer', 'smallserial': 'smallint'} if new_type.lower() in serial_fields_map: column = strip_quotes(new_field.column) sequence_name = "%s_%s_seq" % (table, column) return ( ( self.sql_alter_column_type % { "column": self.quote_name(column), "type": serial_fields_map[new_type.lower()], }, [], ), [ ( self.sql_delete_sequence % { "sequence": self.quote_name(sequence_name), }, [], ), ( self.sql_create_sequence % { "sequence": self.quote_name(sequence_name), }, [], ), ( self.sql_alter_column % { "table": self.quote_name(table), "changes": self.sql_alter_column_default % { "column": self.quote_name(column), "default": "nextval('%s')" % self.quote_name(sequence_name), } }, [], ), ( self.sql_set_sequence_max % { "table": self.quote_name(table), "column": self.quote_name(column), "sequence": self.quote_name(sequence_name), }, [], ), ( self.sql_set_sequence_owner % { 'table': self.quote_name(table), 'column': self.quote_name(column), 'sequence': self.quote_name(sequence_name), }, [], ), ], ) else: return super()._alter_column_type_sql(model, old_field, new_field, new_type) def _alter_field(self, model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict=False): # Drop indexes on varchar/text/citext columns that are changing to a # different type. if (old_field.db_index or old_field.unique) and ( (old_type.startswith('varchar') and not new_type.startswith('varchar')) or (old_type.startswith('text') and not new_type.startswith('text')) or (old_type.startswith('citext') and not new_type.startswith('citext')) ): index_name = self._create_index_name(model._meta.db_table, [old_field.column], suffix='_like') self.execute(self._delete_index_sql(model, index_name)) super()._alter_field( model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict, ) # Added an index? Create any PostgreSQL-specific indexes. if ((not (old_field.db_index or old_field.unique) and new_field.db_index) or (not old_field.unique and new_field.unique)): like_index_statement = self._create_like_index_sql(model, new_field) if like_index_statement is not None: self.execute(like_index_statement) # Removed an index? Drop any PostgreSQL-specific indexes. if old_field.unique and not (new_field.db_index or new_field.unique): index_to_remove = self._create_index_name(model._meta.db_table, [old_field.column], suffix='_like') self.execute(self._delete_index_sql(model, index_to_remove)) def _index_columns(self, table, columns, col_suffixes, opclasses): if opclasses: return IndexColumns(table, columns, self.quote_name, col_suffixes=col_suffixes, opclasses=opclasses) return super()._index_columns(table, columns, col_suffixes, opclasses) def add_index(self, model, index, concurrently=False): self.execute(index.create_sql(model, self, concurrently=concurrently), params=None) def remove_index(self, model, index, concurrently=False): self.execute(index.remove_sql(model, self, concurrently=concurrently)) def _delete_index_sql(self, model, name, sql=None, concurrently=False): sql = self.sql_delete_index_concurrently if concurrently else self.sql_delete_index return super()._delete_index_sql(model, name, sql) def _create_index_sql( self, model, fields, *, name=None, suffix='', using='', db_tablespace=None, col_suffixes=(), sql=None, opclasses=(), condition=None, concurrently=False, include=None, ): sql = self.sql_create_index if not concurrently else self.sql_create_index_concurrently return super()._create_index_sql( model, fields, name=name, suffix=suffix, using=using, db_tablespace=db_tablespace, col_suffixes=col_suffixes, sql=sql, opclasses=opclasses, condition=condition, include=include, )
1b2dfad2323176c92393e35b7d73da18facd398687dd05bbd320ba24e85e9d53
import operator import platform from django.db import transaction from django.db.backends.base.features import BaseDatabaseFeatures from django.db.utils import OperationalError from django.utils.functional import cached_property from .base import Database class DatabaseFeatures(BaseDatabaseFeatures): # SQLite can read from a cursor since SQLite 3.6.5, subject to the caveat # that statements within a connection aren't isolated from each other. See # https://sqlite.org/isolation.html. can_use_chunked_reads = True test_db_allows_multiple_connections = False supports_unspecified_pk = True supports_timezones = False max_query_params = 999 supports_mixed_date_datetime_comparisons = False supports_transactions = True atomic_transactions = False can_rollback_ddl = True can_create_inline_fk = False supports_paramstyle_pyformat = False can_clone_databases = True supports_temporal_subtraction = True ignores_table_name_case = True supports_cast_with_precision = False time_cast_precision = 3 can_release_savepoints = True # Is "ALTER TABLE ... RENAME COLUMN" supported? can_alter_table_rename_column = Database.sqlite_version_info >= (3, 25, 0) supports_parentheses_in_compound = False # Deferred constraint checks can be emulated on SQLite < 3.20 but not in a # reasonably performant way. supports_pragma_foreign_key_check = Database.sqlite_version_info >= (3, 20, 0) can_defer_constraint_checks = supports_pragma_foreign_key_check supports_functions_in_partial_indexes = Database.sqlite_version_info >= (3, 15, 0) supports_over_clause = Database.sqlite_version_info >= (3, 25, 0) supports_frame_range_fixed_distance = Database.sqlite_version_info >= (3, 28, 0) supports_aggregate_filter_clause = Database.sqlite_version_info >= (3, 30, 1) supports_order_by_nulls_modifier = Database.sqlite_version_info >= (3, 30, 0) order_by_nulls_first = True @cached_property def supports_atomic_references_rename(self): # SQLite 3.28.0 bundled with MacOS 10.15 does not support renaming # references atomically. if platform.mac_ver()[0].startswith('10.15.') and Database.sqlite_version_info == (3, 28, 0): return False return Database.sqlite_version_info >= (3, 26, 0) @cached_property def introspected_field_types(self): return{ **super().introspected_field_types, 'BigAutoField': 'AutoField', 'DurationField': 'BigIntegerField', 'GenericIPAddressField': 'CharField', 'SmallAutoField': 'AutoField', } @cached_property def supports_json_field(self): try: with self.connection.cursor() as cursor, transaction.atomic(): cursor.execute('SELECT JSON(\'{"a": "b"}\')') except OperationalError: return False return True can_introspect_json_field = property(operator.attrgetter('supports_json_field'))
7c17249f247e94b1262e2f15e5ea9ffa1ce66c8e5aaae4e9be56bfe55ebbfc8d
from itertools import chain from django.utils.itercompat import is_iterable class Tags: """ Built-in tags for internal checks. """ admin = 'admin' async_support = 'async_support' caches = 'caches' compatibility = 'compatibility' database = 'database' models = 'models' security = 'security' signals = 'signals' sites = 'sites' staticfiles = 'staticfiles' templates = 'templates' translation = 'translation' urls = 'urls' class CheckRegistry: def __init__(self): self.registered_checks = set() self.deployment_checks = set() def register(self, check=None, *tags, **kwargs): """ Can be used as a function or a decorator. Register given function `f` labeled with given `tags`. The function should receive **kwargs and return list of Errors and Warnings. Example:: registry = CheckRegistry() @registry.register('mytag', 'anothertag') def my_check(apps, **kwargs): # ... perform checks and collect `errors` ... return errors # or registry.register(my_check, 'mytag', 'anothertag') """ def inner(check): check.tags = tags checks = self.deployment_checks if kwargs.get('deploy') else self.registered_checks checks.add(check) return check if callable(check): return inner(check) else: if check: tags += (check,) return inner def run_checks(self, app_configs=None, tags=None, include_deployment_checks=False, databases=None): """ Run all registered checks and return list of Errors and Warnings. """ errors = [] checks = self.get_checks(include_deployment_checks) if tags is not None: checks = [check for check in checks if not set(check.tags).isdisjoint(tags)] for check in checks: new_errors = check(app_configs=app_configs, databases=databases) assert is_iterable(new_errors), ( "The function %r did not return a list. All functions registered " "with the checks registry must return a list." % check) errors.extend(new_errors) return errors def tag_exists(self, tag, include_deployment_checks=False): return tag in self.tags_available(include_deployment_checks) def tags_available(self, deployment_checks=False): return set(chain.from_iterable( check.tags for check in self.get_checks(deployment_checks) )) def get_checks(self, include_deployment_checks=False): checks = list(self.registered_checks) if include_deployment_checks: checks.extend(self.deployment_checks) return checks registry = CheckRegistry() register = registry.register run_checks = registry.run_checks tag_exists = registry.tag_exists
93b7f23d893a79c4131c389aea07ce4a0bfb2caae647b2f784b92ee5e8152fc6
import mimetypes from email import ( charset as Charset, encoders as Encoders, generator, message_from_string, ) from email.errors import HeaderParseError from email.header import Header from email.headerregistry import Address, parser from email.message import Message from email.mime.base import MIMEBase from email.mime.message import MIMEMessage from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from email.utils import formataddr, formatdate, getaddresses, make_msgid from io import BytesIO, StringIO from pathlib import Path from django.conf import settings from django.core.mail.utils import DNS_NAME from django.utils.encoding import force_str, punycode # Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from # some spam filters. utf8_charset = Charset.Charset('utf-8') utf8_charset.body_encoding = None # Python defaults to BASE64 utf8_charset_qp = Charset.Charset('utf-8') utf8_charset_qp.body_encoding = Charset.QP # Default MIME type to use on attachments (if it is not explicitly given # and cannot be guessed). DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream' RFC5322_EMAIL_LINE_LENGTH_LIMIT = 998 class BadHeaderError(ValueError): pass # Header names that contain structured address data (RFC #5322) ADDRESS_HEADERS = { 'from', 'sender', 'reply-to', 'to', 'cc', 'bcc', 'resent-from', 'resent-sender', 'resent-to', 'resent-cc', 'resent-bcc', } def forbid_multi_line_headers(name, val, encoding): """Forbid multi-line headers to prevent header injection.""" encoding = encoding or settings.DEFAULT_CHARSET val = str(val) # val may be lazy if '\n' in val or '\r' in val: raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name)) try: val.encode('ascii') except UnicodeEncodeError: if name.lower() in ADDRESS_HEADERS: val = ', '.join(sanitize_address(addr, encoding) for addr in getaddresses((val,))) else: val = Header(val, encoding).encode() else: if name.lower() == 'subject': val = Header(val).encode() return name, val def sanitize_address(addr, encoding): """ Format a pair of (name, address) or an email address string. """ address = None if not isinstance(addr, tuple): addr = force_str(addr) try: token, rest = parser.get_mailbox(addr) except (HeaderParseError, ValueError, IndexError): raise ValueError('Invalid address "%s"' % addr) else: if rest: # The entire email address must be parsed. raise ValueError( 'Invalid address; only %s could be parsed from "%s"' % (token, addr) ) nm = token.display_name or '' localpart = token.local_part domain = token.domain or '' else: nm, address = addr localpart, domain = address.rsplit('@', 1) address_parts = nm + localpart + domain if '\n' in address_parts or '\r' in address_parts: raise ValueError('Invalid address; address parts cannot contain newlines.') # Avoid UTF-8 encode, if it's possible. try: nm.encode('ascii') nm = Header(nm).encode() except UnicodeEncodeError: nm = Header(nm, encoding).encode() try: localpart.encode('ascii') except UnicodeEncodeError: localpart = Header(localpart, encoding).encode() domain = punycode(domain) parsed_address = Address(username=localpart, domain=domain) return formataddr((nm, parsed_address.addr_spec)) class MIMEMixin: def as_string(self, unixfrom=False, linesep='\n'): """Return the entire formatted message as a string. Optional `unixfrom' when True, means include the Unix From_ envelope header. This overrides the default as_string() implementation to not mangle lines that begin with 'From '. See bug #13433 for details. """ fp = StringIO() g = generator.Generator(fp, mangle_from_=False) g.flatten(self, unixfrom=unixfrom, linesep=linesep) return fp.getvalue() def as_bytes(self, unixfrom=False, linesep='\n'): """Return the entire formatted message as bytes. Optional `unixfrom' when True, means include the Unix From_ envelope header. This overrides the default as_bytes() implementation to not mangle lines that begin with 'From '. See bug #13433 for details. """ fp = BytesIO() g = generator.BytesGenerator(fp, mangle_from_=False) g.flatten(self, unixfrom=unixfrom, linesep=linesep) return fp.getvalue() class SafeMIMEMessage(MIMEMixin, MIMEMessage): def __setitem__(self, name, val): # message/rfc822 attachments must be ASCII name, val = forbid_multi_line_headers(name, val, 'ascii') MIMEMessage.__setitem__(self, name, val) class SafeMIMEText(MIMEMixin, MIMEText): def __init__(self, _text, _subtype='plain', _charset=None): self.encoding = _charset MIMEText.__init__(self, _text, _subtype=_subtype, _charset=_charset) def __setitem__(self, name, val): name, val = forbid_multi_line_headers(name, val, self.encoding) MIMEText.__setitem__(self, name, val) def set_payload(self, payload, charset=None): if charset == 'utf-8' and not isinstance(charset, Charset.Charset): has_long_lines = any( len(line.encode()) > RFC5322_EMAIL_LINE_LENGTH_LIMIT for line in payload.splitlines() ) # Quoted-Printable encoding has the side effect of shortening long # lines, if any (#22561). charset = utf8_charset_qp if has_long_lines else utf8_charset MIMEText.set_payload(self, payload, charset=charset) class SafeMIMEMultipart(MIMEMixin, MIMEMultipart): def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params): self.encoding = encoding MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params) def __setitem__(self, name, val): name, val = forbid_multi_line_headers(name, val, self.encoding) MIMEMultipart.__setitem__(self, name, val) class EmailMessage: """A container for email information.""" content_subtype = 'plain' mixed_subtype = 'mixed' encoding = None # None => use settings default def __init__(self, subject='', body='', from_email=None, to=None, bcc=None, connection=None, attachments=None, headers=None, cc=None, reply_to=None): """ Initialize a single email message (which can be sent to multiple recipients). """ if to: if isinstance(to, str): raise TypeError('"to" argument must be a list or tuple') self.to = list(to) else: self.to = [] if cc: if isinstance(cc, str): raise TypeError('"cc" argument must be a list or tuple') self.cc = list(cc) else: self.cc = [] if bcc: if isinstance(bcc, str): raise TypeError('"bcc" argument must be a list or tuple') self.bcc = list(bcc) else: self.bcc = [] if reply_to: if isinstance(reply_to, str): raise TypeError('"reply_to" argument must be a list or tuple') self.reply_to = list(reply_to) else: self.reply_to = [] self.from_email = from_email or settings.DEFAULT_FROM_EMAIL self.subject = subject self.body = body or '' self.attachments = [] if attachments: for attachment in attachments: if isinstance(attachment, MIMEBase): self.attach(attachment) else: self.attach(*attachment) self.extra_headers = headers or {} self.connection = connection def get_connection(self, fail_silently=False): from django.core.mail import get_connection if not self.connection: self.connection = get_connection(fail_silently=fail_silently) return self.connection def message(self): encoding = self.encoding or settings.DEFAULT_CHARSET msg = SafeMIMEText(self.body, self.content_subtype, encoding) msg = self._create_message(msg) msg['Subject'] = self.subject msg['From'] = self.extra_headers.get('From', self.from_email) self._set_list_header_if_not_empty(msg, 'To', self.to) self._set_list_header_if_not_empty(msg, 'Cc', self.cc) self._set_list_header_if_not_empty(msg, 'Reply-To', self.reply_to) # Email header names are case-insensitive (RFC 2045), so we have to # accommodate that when doing comparisons. header_names = [key.lower() for key in self.extra_headers] if 'date' not in header_names: # formatdate() uses stdlib methods to format the date, which use # the stdlib/OS concept of a timezone, however, Django sets the # TZ environment variable based on the TIME_ZONE setting which # will get picked up by formatdate(). msg['Date'] = formatdate(localtime=settings.EMAIL_USE_LOCALTIME) if 'message-id' not in header_names: # Use cached DNS_NAME for performance msg['Message-ID'] = make_msgid(domain=DNS_NAME) for name, value in self.extra_headers.items(): if name.lower() != 'from': # From is already handled msg[name] = value return msg def recipients(self): """ Return a list of all recipients of the email (includes direct addressees as well as Cc and Bcc entries). """ return [email for email in (self.to + self.cc + self.bcc) if email] def send(self, fail_silently=False): """Send the email message.""" if not self.recipients(): # Don't bother creating the network connection if there's nobody to # send to. return 0 return self.get_connection(fail_silently).send_messages([self]) def attach(self, filename=None, content=None, mimetype=None): """ Attach a file with the given filename and content. The filename can be omitted and the mimetype is guessed, if not provided. If the first parameter is a MIMEBase subclass, insert it directly into the resulting message attachments. For a text/* mimetype (guessed or specified), when a bytes object is specified as content, decode it as UTF-8. If that fails, set the mimetype to DEFAULT_ATTACHMENT_MIME_TYPE and don't decode the content. """ if isinstance(filename, MIMEBase): assert content is None assert mimetype is None self.attachments.append(filename) else: assert content is not None mimetype = mimetype or mimetypes.guess_type(filename)[0] or DEFAULT_ATTACHMENT_MIME_TYPE basetype, subtype = mimetype.split('/', 1) if basetype == 'text': if isinstance(content, bytes): try: content = content.decode() except UnicodeDecodeError: # If mimetype suggests the file is text but it's # actually binary, read() raises a UnicodeDecodeError. mimetype = DEFAULT_ATTACHMENT_MIME_TYPE self.attachments.append((filename, content, mimetype)) def attach_file(self, path, mimetype=None): """ Attach a file from the filesystem. Set the mimetype to DEFAULT_ATTACHMENT_MIME_TYPE if it isn't specified and cannot be guessed. For a text/* mimetype (guessed or specified), decode the file's content as UTF-8. If that fails, set the mimetype to DEFAULT_ATTACHMENT_MIME_TYPE and don't decode the content. """ path = Path(path) with path.open('rb') as file: content = file.read() self.attach(path.name, content, mimetype) def _create_message(self, msg): return self._create_attachments(msg) def _create_attachments(self, msg): if self.attachments: encoding = self.encoding or settings.DEFAULT_CHARSET body_msg = msg msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding) if self.body or body_msg.is_multipart(): msg.attach(body_msg) for attachment in self.attachments: if isinstance(attachment, MIMEBase): msg.attach(attachment) else: msg.attach(self._create_attachment(*attachment)) return msg def _create_mime_attachment(self, content, mimetype): """ Convert the content, mimetype pair into a MIME attachment object. If the mimetype is message/rfc822, content may be an email.Message or EmailMessage object, as well as a str. """ basetype, subtype = mimetype.split('/', 1) if basetype == 'text': encoding = self.encoding or settings.DEFAULT_CHARSET attachment = SafeMIMEText(content, subtype, encoding) elif basetype == 'message' and subtype == 'rfc822': # Bug #18967: per RFC2046 s5.2.1, message/rfc822 attachments # must not be base64 encoded. if isinstance(content, EmailMessage): # convert content into an email.Message first content = content.message() elif not isinstance(content, Message): # For compatibility with existing code, parse the message # into an email.Message object if it is not one already. content = message_from_string(force_str(content)) attachment = SafeMIMEMessage(content, subtype) else: # Encode non-text attachments with base64. attachment = MIMEBase(basetype, subtype) attachment.set_payload(content) Encoders.encode_base64(attachment) return attachment def _create_attachment(self, filename, content, mimetype=None): """ Convert the filename, content, mimetype triple into a MIME attachment object. """ attachment = self._create_mime_attachment(content, mimetype) if filename: try: filename.encode('ascii') except UnicodeEncodeError: filename = ('utf-8', '', filename) attachment.add_header('Content-Disposition', 'attachment', filename=filename) return attachment def _set_list_header_if_not_empty(self, msg, header, values): """ Set msg's header, either from self.extra_headers, if present, or from the values argument. """ if values: try: value = self.extra_headers[header] except KeyError: value = ', '.join(str(v) for v in values) msg[header] = value class EmailMultiAlternatives(EmailMessage): """ A version of EmailMessage that makes it easy to send multipart/alternative messages. For example, including text and HTML versions of the text is made easier. """ alternative_subtype = 'alternative' def __init__(self, subject='', body='', from_email=None, to=None, bcc=None, connection=None, attachments=None, headers=None, alternatives=None, cc=None, reply_to=None): """ Initialize a single email message (which can be sent to multiple recipients). """ super().__init__( subject, body, from_email, to, bcc, connection, attachments, headers, cc, reply_to, ) self.alternatives = alternatives or [] def attach_alternative(self, content, mimetype): """Attach an alternative content representation.""" assert content is not None assert mimetype is not None self.alternatives.append((content, mimetype)) def _create_message(self, msg): return self._create_attachments(self._create_alternatives(msg)) def _create_alternatives(self, msg): encoding = self.encoding or settings.DEFAULT_CHARSET if self.alternatives: body_msg = msg msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding) if self.body: msg.attach(body_msg) for alternative in self.alternatives: msg.attach(self._create_mime_attachment(*alternative)) return msg
23b358aed0711c2ef847861de1a8f2f41bab117a73e5506ae341901183a4b2bc
import os import sys import warnings from itertools import takewhile from django.apps import apps from django.conf import settings from django.core.management.base import ( BaseCommand, CommandError, no_translations, ) from django.db import DEFAULT_DB_ALIAS, OperationalError, connections, router from django.db.migrations import Migration from django.db.migrations.autodetector import MigrationAutodetector from django.db.migrations.loader import MigrationLoader from django.db.migrations.questioner import ( InteractiveMigrationQuestioner, MigrationQuestioner, NonInteractiveMigrationQuestioner, ) from django.db.migrations.state import ProjectState from django.db.migrations.utils import get_migration_name_timestamp from django.db.migrations.writer import MigrationWriter class Command(BaseCommand): help = "Creates new migration(s) for apps." def add_arguments(self, parser): parser.add_argument( 'args', metavar='app_label', nargs='*', help='Specify the app label(s) to create migrations for.', ) parser.add_argument( '--dry-run', action='store_true', help="Just show what migrations would be made; don't actually write them.", ) parser.add_argument( '--merge', action='store_true', help="Enable fixing of migration conflicts.", ) parser.add_argument( '--empty', action='store_true', help="Create an empty migration.", ) parser.add_argument( '--noinput', '--no-input', action='store_false', dest='interactive', help='Tells Django to NOT prompt the user for input of any kind.', ) parser.add_argument( '-n', '--name', help="Use this name for migration file(s).", ) parser.add_argument( '--no-header', action='store_false', dest='include_header', help='Do not add header comments to new migration file(s).', ) parser.add_argument( '--check', action='store_true', dest='check_changes', help='Exit with a non-zero status if model changes are missing migrations.', ) @no_translations def handle(self, *app_labels, **options): self.verbosity = options['verbosity'] self.interactive = options['interactive'] self.dry_run = options['dry_run'] self.merge = options['merge'] self.empty = options['empty'] self.migration_name = options['name'] if self.migration_name and not self.migration_name.isidentifier(): raise CommandError('The migration name must be a valid Python identifier.') self.include_header = options['include_header'] check_changes = options['check_changes'] # Make sure the app they asked for exists app_labels = set(app_labels) has_bad_labels = False for app_label in app_labels: try: apps.get_app_config(app_label) except LookupError as err: self.stderr.write(str(err)) has_bad_labels = True if has_bad_labels: sys.exit(2) # Load the current graph state. Pass in None for the connection so # the loader doesn't try to resolve replaced migrations from DB. loader = MigrationLoader(None, ignore_no_migrations=True) # Raise an error if any migrations are applied before their dependencies. consistency_check_labels = {config.label for config in apps.get_app_configs()} # Non-default databases are only checked if database routers used. aliases_to_check = connections if settings.DATABASE_ROUTERS else [DEFAULT_DB_ALIAS] for alias in sorted(aliases_to_check): connection = connections[alias] if (connection.settings_dict['ENGINE'] != 'django.db.backends.dummy' and any( # At least one model must be migrated to the database. router.allow_migrate(connection.alias, app_label, model_name=model._meta.object_name) for app_label in consistency_check_labels for model in apps.get_app_config(app_label).get_models() )): try: loader.check_consistent_history(connection) except OperationalError as error: warnings.warn( "Got an error checking a consistent migration history " "performed for database connection '%s': %s" % (alias, error), RuntimeWarning, ) # Before anything else, see if there's conflicting apps and drop out # hard if there are any and they don't want to merge conflicts = loader.detect_conflicts() # If app_labels is specified, filter out conflicting migrations for unspecified apps if app_labels: conflicts = { app_label: conflict for app_label, conflict in conflicts.items() if app_label in app_labels } if conflicts and not self.merge: name_str = "; ".join( "%s in %s" % (", ".join(names), app) for app, names in conflicts.items() ) raise CommandError( "Conflicting migrations detected; multiple leaf nodes in the " "migration graph: (%s).\nTo fix them run " "'python manage.py makemigrations --merge'" % name_str ) # If they want to merge and there's nothing to merge, then politely exit if self.merge and not conflicts: self.stdout.write("No conflicts detected to merge.") return # If they want to merge and there is something to merge, then # divert into the merge code if self.merge and conflicts: return self.handle_merge(loader, conflicts) if self.interactive: questioner = InteractiveMigrationQuestioner(specified_apps=app_labels, dry_run=self.dry_run) else: questioner = NonInteractiveMigrationQuestioner(specified_apps=app_labels, dry_run=self.dry_run) # Set up autodetector autodetector = MigrationAutodetector( loader.project_state(), ProjectState.from_apps(apps), questioner, ) # If they want to make an empty migration, make one for each app if self.empty: if not app_labels: raise CommandError("You must supply at least one app label when using --empty.") # Make a fake changes() result we can pass to arrange_for_graph changes = { app: [Migration("custom", app)] for app in app_labels } changes = autodetector.arrange_for_graph( changes=changes, graph=loader.graph, migration_name=self.migration_name, ) self.write_migration_files(changes) return # Detect changes changes = autodetector.changes( graph=loader.graph, trim_to_apps=app_labels or None, convert_apps=app_labels or None, migration_name=self.migration_name, ) if not changes: # No changes? Tell them. if self.verbosity >= 1: if app_labels: if len(app_labels) == 1: self.stdout.write("No changes detected in app '%s'" % app_labels.pop()) else: self.stdout.write("No changes detected in apps '%s'" % ("', '".join(app_labels))) else: self.stdout.write("No changes detected") else: self.write_migration_files(changes) if check_changes: sys.exit(1) def write_migration_files(self, changes): """ Take a changes dict and write them out as migration files. """ directory_created = {} for app_label, app_migrations in changes.items(): if self.verbosity >= 1: self.stdout.write(self.style.MIGRATE_HEADING("Migrations for '%s':" % app_label)) for migration in app_migrations: # Describe the migration writer = MigrationWriter(migration, self.include_header) if self.verbosity >= 1: # Display a relative path if it's below the current working # directory, or an absolute path otherwise. try: migration_string = os.path.relpath(writer.path) except ValueError: migration_string = writer.path if migration_string.startswith('..'): migration_string = writer.path self.stdout.write(' %s\n' % self.style.MIGRATE_LABEL(migration_string)) for operation in migration.operations: self.stdout.write(' - %s' % operation.describe()) if not self.dry_run: # Write the migrations file to the disk. migrations_directory = os.path.dirname(writer.path) if not directory_created.get(app_label): os.makedirs(migrations_directory, exist_ok=True) init_path = os.path.join(migrations_directory, "__init__.py") if not os.path.isfile(init_path): open(init_path, "w").close() # We just do this once per app directory_created[app_label] = True migration_string = writer.as_string() with open(writer.path, "w", encoding='utf-8') as fh: fh.write(migration_string) elif self.verbosity == 3: # Alternatively, makemigrations --dry-run --verbosity 3 # will output the migrations to stdout rather than saving # the file to the disk. self.stdout.write(self.style.MIGRATE_HEADING( "Full migrations file '%s':" % writer.filename )) self.stdout.write(writer.as_string()) def handle_merge(self, loader, conflicts): """ Handles merging together conflicted migrations interactively, if it's safe; otherwise, advises on how to fix it. """ if self.interactive: questioner = InteractiveMigrationQuestioner() else: questioner = MigrationQuestioner(defaults={'ask_merge': True}) for app_label, migration_names in conflicts.items(): # Grab out the migrations in question, and work out their # common ancestor. merge_migrations = [] for migration_name in migration_names: migration = loader.get_migration(app_label, migration_name) migration.ancestry = [ mig for mig in loader.graph.forwards_plan((app_label, migration_name)) if mig[0] == migration.app_label ] merge_migrations.append(migration) def all_items_equal(seq): return all(item == seq[0] for item in seq[1:]) merge_migrations_generations = zip(*(m.ancestry for m in merge_migrations)) common_ancestor_count = sum(1 for common_ancestor_generation in takewhile(all_items_equal, merge_migrations_generations)) if not common_ancestor_count: raise ValueError("Could not find common ancestor of %s" % migration_names) # Now work out the operations along each divergent branch for migration in merge_migrations: migration.branch = migration.ancestry[common_ancestor_count:] migrations_ops = (loader.get_migration(node_app, node_name).operations for node_app, node_name in migration.branch) migration.merged_operations = sum(migrations_ops, []) # In future, this could use some of the Optimizer code # (can_optimize_through) to automatically see if they're # mergeable. For now, we always just prompt the user. if self.verbosity > 0: self.stdout.write(self.style.MIGRATE_HEADING("Merging %s" % app_label)) for migration in merge_migrations: self.stdout.write(self.style.MIGRATE_LABEL(" Branch %s" % migration.name)) for operation in migration.merged_operations: self.stdout.write(' - %s' % operation.describe()) if questioner.ask_merge(app_label): # If they still want to merge it, then write out an empty # file depending on the migrations needing merging. numbers = [ MigrationAutodetector.parse_number(migration.name) for migration in merge_migrations ] try: biggest_number = max(x for x in numbers if x is not None) except ValueError: biggest_number = 1 subclass = type("Migration", (Migration,), { "dependencies": [(app_label, migration.name) for migration in merge_migrations], }) parts = ['%04i' % (biggest_number + 1)] if self.migration_name: parts.append(self.migration_name) else: parts.append('merge') leaf_names = '_'.join(sorted(migration.name for migration in merge_migrations)) if len(leaf_names) > 47: parts.append(get_migration_name_timestamp()) else: parts.append(leaf_names) migration_name = '_'.join(parts) new_migration = subclass(migration_name, app_label) writer = MigrationWriter(new_migration, self.include_header) if not self.dry_run: # Write the merge migrations file to the disk with open(writer.path, "w", encoding='utf-8') as fh: fh.write(writer.as_string()) if self.verbosity > 0: self.stdout.write("\nCreated new merge migration %s" % writer.path) elif self.verbosity == 3: # Alternatively, makemigrations --merge --dry-run --verbosity 3 # will output the merge migrations to stdout rather than saving # the file to the disk. self.stdout.write(self.style.MIGRATE_HEADING( "Full merge migrations file '%s':" % writer.filename )) self.stdout.write(writer.as_string())
036a61f4c1c97cd35d5f279cae80a575c2d98e0b6e038c1b061aeef6d4bc7d15
from urllib.parse import urlencode from urllib.request import urlopen from django.apps import apps as django_apps from django.conf import settings from django.core import paginator from django.core.exceptions import ImproperlyConfigured from django.urls import NoReverseMatch, reverse from django.utils import translation PING_URL = "https://www.google.com/webmasters/tools/ping" class SitemapNotFound(Exception): pass def ping_google(sitemap_url=None, ping_url=PING_URL, sitemap_uses_https=True): """ Alert Google that the sitemap for the current site has been updated. If sitemap_url is provided, it should be an absolute path to the sitemap for this site -- e.g., '/sitemap.xml'. If sitemap_url is not provided, this function will attempt to deduce it by using urls.reverse(). """ sitemap_full_url = _get_sitemap_full_url(sitemap_url, sitemap_uses_https) params = urlencode({'sitemap': sitemap_full_url}) urlopen('%s?%s' % (ping_url, params)) def _get_sitemap_full_url(sitemap_url, sitemap_uses_https=True): if not django_apps.is_installed('django.contrib.sites'): raise ImproperlyConfigured("ping_google requires django.contrib.sites, which isn't installed.") if sitemap_url is None: try: # First, try to get the "index" sitemap URL. sitemap_url = reverse('django.contrib.sitemaps.views.index') except NoReverseMatch: try: # Next, try for the "global" sitemap URL. sitemap_url = reverse('django.contrib.sitemaps.views.sitemap') except NoReverseMatch: pass if sitemap_url is None: raise SitemapNotFound("You didn't provide a sitemap_url, and the sitemap URL couldn't be auto-detected.") Site = django_apps.get_model('sites.Site') current_site = Site.objects.get_current() scheme = 'https' if sitemap_uses_https else 'http' return '%s://%s%s' % (scheme, current_site.domain, sitemap_url) class Sitemap: # This limit is defined by Google. See the index documentation at # https://www.sitemaps.org/protocol.html#index. limit = 50000 # If protocol is None, the URLs in the sitemap will use the protocol # with which the sitemap was requested. protocol = None def __get(self, name, obj, default=None): try: attr = getattr(self, name) except AttributeError: return default if callable(attr): return attr(obj) return attr def items(self): return [] def location(self, obj): return obj.get_absolute_url() @property def paginator(self): return paginator.Paginator(self.items(), self.limit) def get_urls(self, page=1, site=None, protocol=None): # Determine protocol if self.protocol is not None: protocol = self.protocol if protocol is None: protocol = 'http' # Determine domain if site is None: if django_apps.is_installed('django.contrib.sites'): Site = django_apps.get_model('sites.Site') try: site = Site.objects.get_current() except Site.DoesNotExist: pass if site is None: raise ImproperlyConfigured( "To use sitemaps, either enable the sites framework or pass " "a Site/RequestSite object in your view." ) domain = site.domain if getattr(self, 'i18n', False): urls = [] current_lang_code = translation.get_language() for lang_code, lang_name in settings.LANGUAGES: translation.activate(lang_code) urls += self._urls(page, protocol, domain) translation.activate(current_lang_code) else: urls = self._urls(page, protocol, domain) return urls def _urls(self, page, protocol, domain): urls = [] latest_lastmod = None all_items_lastmod = True # track if all items have a lastmod for item in self.paginator.page(page).object_list: loc = "%s://%s%s" % (protocol, domain, self.__get('location', item)) priority = self.__get('priority', item) lastmod = self.__get('lastmod', item) if all_items_lastmod: all_items_lastmod = lastmod is not None if (all_items_lastmod and (latest_lastmod is None or lastmod > latest_lastmod)): latest_lastmod = lastmod url_info = { 'item': item, 'location': loc, 'lastmod': lastmod, 'changefreq': self.__get('changefreq', item), 'priority': str(priority if priority is not None else ''), } urls.append(url_info) if all_items_lastmod and latest_lastmod: self.latest_lastmod = latest_lastmod return urls class GenericSitemap(Sitemap): priority = None changefreq = None def __init__(self, info_dict, priority=None, changefreq=None, protocol=None): self.queryset = info_dict['queryset'] self.date_field = info_dict.get('date_field') self.priority = priority self.changefreq = changefreq self.protocol = protocol def items(self): # Make sure to return a clone; we don't want premature evaluation. return self.queryset.filter() def lastmod(self, item): if self.date_field is not None: return getattr(item, self.date_field) return None
ea6c9023020581cdd20321f93f5b28223c042d111581bc60c4ff397c97207b4e
from django.contrib.messages.api import * # NOQA from django.contrib.messages.constants import * # NOQA
adc2916e774d3bd9d2514e27344b295aebfe701039c728a98712adfcaf96239c
import inspect import re from django.apps import apps as django_apps from django.conf import settings from django.core.exceptions import ImproperlyConfigured, PermissionDenied from django.middleware.csrf import rotate_token from django.utils.crypto import constant_time_compare from django.utils.module_loading import import_string from .signals import user_logged_in, user_logged_out, user_login_failed SESSION_KEY = '_auth_user_id' BACKEND_SESSION_KEY = '_auth_user_backend' HASH_SESSION_KEY = '_auth_user_hash' REDIRECT_FIELD_NAME = 'next' def load_backend(path): return import_string(path)() def _get_backends(return_tuples=False): backends = [] for backend_path in settings.AUTHENTICATION_BACKENDS: backend = load_backend(backend_path) backends.append((backend, backend_path) if return_tuples else backend) if not backends: raise ImproperlyConfigured( 'No authentication backends have been defined. Does ' 'AUTHENTICATION_BACKENDS contain anything?' ) return backends def get_backends(): return _get_backends(return_tuples=False) def _clean_credentials(credentials): """ Clean a dictionary of credentials of potentially sensitive info before sending to less secure functions. Not comprehensive - intended for user_login_failed signal """ SENSITIVE_CREDENTIALS = re.compile('api|token|key|secret|password|signature', re.I) CLEANSED_SUBSTITUTE = '********************' for key in credentials: if SENSITIVE_CREDENTIALS.search(key): credentials[key] = CLEANSED_SUBSTITUTE return credentials def _get_user_session_key(request): # This value in the session is always serialized to a string, so we need # to convert it back to Python whenever we access it. return get_user_model()._meta.pk.to_python(request.session[SESSION_KEY]) def authenticate(request=None, **credentials): """ If the given credentials are valid, return a User object. """ for backend, backend_path in _get_backends(return_tuples=True): backend_signature = inspect.signature(backend.authenticate) try: backend_signature.bind(request, **credentials) except TypeError: # This backend doesn't accept these credentials as arguments. Try the next one. continue try: user = backend.authenticate(request, **credentials) except PermissionDenied: # This backend says to stop in our tracks - this user should not be allowed in at all. break if user is None: continue # Annotate the user object with the path of the backend. user.backend = backend_path return user # The credentials supplied are invalid to all backends, fire signal user_login_failed.send(sender=__name__, credentials=_clean_credentials(credentials), request=request) def login(request, user, backend=None): """ Persist a user id and a backend in the request. This way a user doesn't have to reauthenticate on every request. Note that data set during the anonymous session is retained when the user logs in. """ session_auth_hash = '' if user is None: user = request.user if hasattr(user, 'get_session_auth_hash'): session_auth_hash = user.get_session_auth_hash() if SESSION_KEY in request.session: if _get_user_session_key(request) != user.pk or ( session_auth_hash and not constant_time_compare(request.session.get(HASH_SESSION_KEY, ''), session_auth_hash)): # To avoid reusing another user's session, create a new, empty # session if the existing session corresponds to a different # authenticated user. request.session.flush() else: request.session.cycle_key() try: backend = backend or user.backend except AttributeError: backends = _get_backends(return_tuples=True) if len(backends) == 1: _, backend = backends[0] else: raise ValueError( 'You have multiple authentication backends configured and ' 'therefore must provide the `backend` argument or set the ' '`backend` attribute on the user.' ) else: if not isinstance(backend, str): raise TypeError('backend must be a dotted import path string (got %r).' % backend) request.session[SESSION_KEY] = user._meta.pk.value_to_string(user) request.session[BACKEND_SESSION_KEY] = backend request.session[HASH_SESSION_KEY] = session_auth_hash if hasattr(request, 'user'): request.user = user rotate_token(request) user_logged_in.send(sender=user.__class__, request=request, user=user) def logout(request): """ Remove the authenticated user's ID from the request and flush their session data. """ # Dispatch the signal before the user is logged out so the receivers have a # chance to find out *who* logged out. user = getattr(request, 'user', None) if not getattr(user, 'is_authenticated', True): user = None user_logged_out.send(sender=user.__class__, request=request, user=user) request.session.flush() if hasattr(request, 'user'): from django.contrib.auth.models import AnonymousUser request.user = AnonymousUser() def get_user_model(): """ Return the User model that is active in this project. """ try: return django_apps.get_model(settings.AUTH_USER_MODEL, require_ready=False) except ValueError: raise ImproperlyConfigured("AUTH_USER_MODEL must be of the form 'app_label.model_name'") except LookupError: raise ImproperlyConfigured( "AUTH_USER_MODEL refers to model '%s' that has not been installed" % settings.AUTH_USER_MODEL ) def get_user(request): """ Return the user model instance associated with the given request session. If no user is retrieved, return an instance of `AnonymousUser`. """ from .models import AnonymousUser user = None try: user_id = _get_user_session_key(request) backend_path = request.session[BACKEND_SESSION_KEY] except KeyError: pass else: if backend_path in settings.AUTHENTICATION_BACKENDS: backend = load_backend(backend_path) user = backend.get_user(user_id) # Verify the session if hasattr(user, 'get_session_auth_hash'): session_hash = request.session.get(HASH_SESSION_KEY) session_hash_verified = session_hash and constant_time_compare( session_hash, user.get_session_auth_hash() ) if not session_hash_verified: if not ( session_hash and hasattr(user, '_legacy_get_session_auth_hash') and constant_time_compare(session_hash, user._legacy_get_session_auth_hash()) ): request.session.flush() user = None return user or AnonymousUser() def get_permission_codename(action, opts): """ Return the codename of the permission for the specified action. """ return '%s_%s' % (action, opts.model_name) def update_session_auth_hash(request, user): """ Updating a user's password logs out all sessions for the user. Take the current request and the updated user object from which the new session hash will be derived and update the session hash appropriately to prevent a password change from logging out the session from which the password was changed. """ request.session.cycle_key() if hasattr(user, 'get_session_auth_hash') and request.user == user: request.session[HASH_SESSION_KEY] = user.get_session_auth_hash()
85fc28f6b9daec815fcc9c687d0459211a078a8a93b1afd3e37ed49f6c08f179
from django.contrib.admin.decorators import register from django.contrib.admin.filters import ( AllValuesFieldListFilter, BooleanFieldListFilter, ChoicesFieldListFilter, DateFieldListFilter, EmptyFieldListFilter, FieldListFilter, ListFilter, RelatedFieldListFilter, RelatedOnlyFieldListFilter, SimpleListFilter, ) from django.contrib.admin.options import ( HORIZONTAL, VERTICAL, ModelAdmin, StackedInline, TabularInline, ) from django.contrib.admin.sites import AdminSite, site from django.utils.module_loading import autodiscover_modules __all__ = [ "register", "ModelAdmin", "HORIZONTAL", "VERTICAL", "StackedInline", "TabularInline", "AdminSite", "site", "ListFilter", "SimpleListFilter", "FieldListFilter", "BooleanFieldListFilter", "RelatedFieldListFilter", "ChoicesFieldListFilter", "DateFieldListFilter", "AllValuesFieldListFilter", "EmptyFieldListFilter", "RelatedOnlyFieldListFilter", "autodiscover", ] def autodiscover(): autodiscover_modules('admin', register_to=site)
1c89944b3e624709090ec8b5901c1e222022c0bdc19f154dd1b8e694182bb387
from django.apps import AppConfig from django.contrib.admin.checks import check_admin_app, check_dependencies from django.core import checks from django.utils.translation import gettext_lazy as _ class SimpleAdminConfig(AppConfig): """Simple AppConfig which does not do automatic discovery.""" default_site = 'django.contrib.admin.sites.AdminSite' name = 'django.contrib.admin' verbose_name = _("Administration") def ready(self): checks.register(check_dependencies, checks.Tags.admin) checks.register(check_admin_app, checks.Tags.admin) class AdminConfig(SimpleAdminConfig): """The default AppConfig for admin which does autodiscovery.""" default = True def ready(self): super().ready() self.module.autodiscover()
854acccb45930e4bfe5928e778f978ae474e8b7eaa40b81e9ea5e9bbae70f0d3
from django.conf import settings from django.core.checks import Error def check_site_id(app_configs, **kwargs): if ( hasattr(settings, 'SITE_ID') and not isinstance(settings.SITE_ID, (type(None), int)) ): return [ Error('The SITE_ID setting must be an integer', id='sites.E101'), ] return []
09a155d2212f12083bb78ffc9f23e61418560cbacc7f040a3ff548f33c382f98
from django.apps import AppConfig from django.contrib.sites.checks import check_site_id from django.core import checks from django.db.models.signals import post_migrate from django.utils.translation import gettext_lazy as _ from .management import create_default_site class SitesConfig(AppConfig): name = 'django.contrib.sites' verbose_name = _("Sites") def ready(self): post_migrate.connect(create_default_site, sender=self) checks.register(check_site_id, checks.Tags.sites)
d8d7862fd3137b15c516dff9318a252a24a39c166ea8f6c70eb3d0444f5c7051
import time from importlib import import_module from django.conf import settings from django.contrib.sessions.backends.base import UpdateError from django.core.exceptions import SuspiciousOperation from django.utils.cache import patch_vary_headers from django.utils.deprecation import MiddlewareMixin from django.utils.http import http_date class SessionMiddleware(MiddlewareMixin): # RemovedInDjango40Warning: when the deprecation ends, replace with: # def __init__(self, get_response): def __init__(self, get_response=None): self._get_response_none_deprecation(get_response) self.get_response = get_response self._async_check() engine = import_module(settings.SESSION_ENGINE) self.SessionStore = engine.SessionStore def process_request(self, request): session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME) request.session = self.SessionStore(session_key) def process_response(self, request, response): """ If request.session was modified, or if the configuration is to save the session every time, save the changes and set a session cookie or delete the session cookie if the session has been emptied. """ try: accessed = request.session.accessed modified = request.session.modified empty = request.session.is_empty() except AttributeError: return response # First check if we need to delete this cookie. # The session should be deleted only if the session is entirely empty. if settings.SESSION_COOKIE_NAME in request.COOKIES and empty: response.delete_cookie( settings.SESSION_COOKIE_NAME, path=settings.SESSION_COOKIE_PATH, domain=settings.SESSION_COOKIE_DOMAIN, samesite=settings.SESSION_COOKIE_SAMESITE, ) patch_vary_headers(response, ('Cookie',)) else: if accessed: patch_vary_headers(response, ('Cookie',)) if (modified or settings.SESSION_SAVE_EVERY_REQUEST) and not empty: if request.session.get_expire_at_browser_close(): max_age = None expires = None else: max_age = request.session.get_expiry_age() expires_time = time.time() + max_age expires = http_date(expires_time) # Save the session data and refresh the client cookie. # Skip session save for 500 responses, refs #3881. if response.status_code != 500: try: request.session.save() except UpdateError: raise SuspiciousOperation( "The request's session was deleted before the " "request completed. The user may have logged " "out in a concurrent request, for example." ) response.set_cookie( settings.SESSION_COOKIE_NAME, request.session.session_key, max_age=max_age, expires=expires, domain=settings.SESSION_COOKIE_DOMAIN, path=settings.SESSION_COOKIE_PATH, secure=settings.SESSION_COOKIE_SECURE or None, httponly=settings.SESSION_COOKIE_HTTPONLY or None, samesite=settings.SESSION_COOKIE_SAMESITE, ) return response
d9dde2aca6ae5078dd2b4820440be409d1671e85a9f2538d9e598dfe93b6e337
import json from django.conf import settings from django.contrib.messages.storage.base import BaseStorage, Message from django.core import signing from django.http import SimpleCookie from django.utils.crypto import constant_time_compare, salted_hmac from django.utils.safestring import SafeData, mark_safe class MessageEncoder(json.JSONEncoder): """ Compactly serialize instances of the ``Message`` class as JSON. """ message_key = '__json_message' def default(self, obj): if isinstance(obj, Message): # Using 0/1 here instead of False/True to produce more compact json is_safedata = 1 if isinstance(obj.message, SafeData) else 0 message = [self.message_key, is_safedata, obj.level, obj.message] if obj.extra_tags: message.append(obj.extra_tags) return message return super().default(obj) class MessageDecoder(json.JSONDecoder): """ Decode JSON that includes serialized ``Message`` instances. """ def process_messages(self, obj): if isinstance(obj, list) and obj: if obj[0] == MessageEncoder.message_key: if obj[1]: obj[3] = mark_safe(obj[3]) return Message(*obj[2:]) return [self.process_messages(item) for item in obj] if isinstance(obj, dict): return {key: self.process_messages(value) for key, value in obj.items()} return obj def decode(self, s, **kwargs): decoded = super().decode(s, **kwargs) return self.process_messages(decoded) class CookieStorage(BaseStorage): """ Store messages in a cookie. """ cookie_name = 'messages' # uwsgi's default configuration enforces a maximum size of 4kb for all the # HTTP headers. In order to leave some room for other cookies and headers, # restrict the session cookie to 1/2 of 4kb. See #18781. max_cookie_size = 2048 not_finished = '__messagesnotfinished__' key_salt = 'django.contrib.messages' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.signer = signing.get_cookie_signer(salt=self.key_salt) def _get(self, *args, **kwargs): """ Retrieve a list of messages from the messages cookie. If the not_finished sentinel value is found at the end of the message list, remove it and return a result indicating that not all messages were retrieved by this storage. """ data = self.request.COOKIES.get(self.cookie_name) messages = self._decode(data) all_retrieved = not (messages and messages[-1] == self.not_finished) if messages and not all_retrieved: # remove the sentinel value messages.pop() return messages, all_retrieved def _update_cookie(self, encoded_data, response): """ Either set the cookie with the encoded data if there is any data to store, or delete the cookie. """ if encoded_data: response.set_cookie( self.cookie_name, encoded_data, domain=settings.SESSION_COOKIE_DOMAIN, secure=settings.SESSION_COOKIE_SECURE or None, httponly=settings.SESSION_COOKIE_HTTPONLY or None, samesite=settings.SESSION_COOKIE_SAMESITE, ) else: response.delete_cookie( self.cookie_name, domain=settings.SESSION_COOKIE_DOMAIN, samesite=settings.SESSION_COOKIE_SAMESITE, ) def _store(self, messages, response, remove_oldest=True, *args, **kwargs): """ Store the messages to a cookie and return a list of any messages which could not be stored. If the encoded data is larger than ``max_cookie_size``, remove messages until the data fits (these are the messages which are returned), and add the not_finished sentinel value to indicate as much. """ unstored_messages = [] encoded_data = self._encode(messages) if self.max_cookie_size: # data is going to be stored eventually by SimpleCookie, which # adds its own overhead, which we must account for. cookie = SimpleCookie() # create outside the loop def stored_length(val): return len(cookie.value_encode(val)[1]) while encoded_data and stored_length(encoded_data) > self.max_cookie_size: if remove_oldest: unstored_messages.append(messages.pop(0)) else: unstored_messages.insert(0, messages.pop()) encoded_data = self._encode(messages + [self.not_finished], encode_empty=unstored_messages) self._update_cookie(encoded_data, response) return unstored_messages def _legacy_hash(self, value): """ # RemovedInDjango40Warning: pre-Django 3.1 hashes will be invalid. Create an HMAC/SHA1 hash based on the value and the project setting's SECRET_KEY, modified to make it unique for the present purpose. """ # The class wide key salt is not reused here since older Django # versions had it fixed and making it dynamic would break old hashes if # self.key_salt is changed. key_salt = 'django.contrib.messages' return salted_hmac(key_salt, value).hexdigest() def _encode(self, messages, encode_empty=False): """ Return an encoded version of the messages list which can be stored as plain text. Since the data will be retrieved from the client-side, the encoded data also contains a hash to ensure that the data was not tampered with. """ if messages or encode_empty: encoder = MessageEncoder(separators=(',', ':')) value = encoder.encode(messages) return self.signer.sign(value) def _decode(self, data): """ Safely decode an encoded text stream back into a list of messages. If the encoded text stream contained an invalid hash or was in an invalid format, return None. """ if not data: return None try: decoded = self.signer.unsign(data) except signing.BadSignature: # RemovedInDjango40Warning: when the deprecation ends, replace # with: # decoded = None. decoded = self._legacy_decode(data) if decoded: try: return json.loads(decoded, cls=MessageDecoder) except json.JSONDecodeError: pass # Mark the data as used (so it gets removed) since something was wrong # with the data. self.used = True return None def _legacy_decode(self, data): # RemovedInDjango40Warning: pre-Django 3.1 hashes will be invalid. bits = data.split('$', 1) if len(bits) == 2: hash_, value = bits if constant_time_compare(hash_, self._legacy_hash(value)): return value return None
36d62260bc7bf890cb52c168cc377f84923d87064470b5b5a35e7c7459cc07ac
from django.contrib.postgres.fields import ArrayField from django.db.models import Aggregate, BooleanField, JSONField, Value from .mixins import OrderableAggMixin __all__ = [ 'ArrayAgg', 'BitAnd', 'BitOr', 'BoolAnd', 'BoolOr', 'JSONBAgg', 'StringAgg', ] class ArrayAgg(OrderableAggMixin, Aggregate): function = 'ARRAY_AGG' template = '%(function)s(%(distinct)s%(expressions)s %(ordering)s)' allow_distinct = True @property def output_field(self): return ArrayField(self.source_expressions[0].output_field) def convert_value(self, value, expression, connection): if not value: return [] return value class BitAnd(Aggregate): function = 'BIT_AND' class BitOr(Aggregate): function = 'BIT_OR' class BoolAnd(Aggregate): function = 'BOOL_AND' output_field = BooleanField() class BoolOr(Aggregate): function = 'BOOL_OR' output_field = BooleanField() class JSONBAgg(OrderableAggMixin, Aggregate): function = 'JSONB_AGG' template = '%(function)s(%(expressions)s %(ordering)s)' output_field = JSONField() def convert_value(self, value, expression, connection): if not value: return [] return value class StringAgg(OrderableAggMixin, Aggregate): function = 'STRING_AGG' template = '%(function)s(%(distinct)s%(expressions)s %(ordering)s)' allow_distinct = True def __init__(self, expression, delimiter, **extra): delimiter_expr = Value(str(delimiter)) super().__init__(expression, delimiter_expr, **extra) def convert_value(self, value, expression, connection): if not value: return '' return value
669532b4d5a437a4fdcb713bab9bf079567c9fa7abb6a2760f9beb1d6817d1e3
import datetime import importlib import io import os import sys from unittest import mock from django.apps import apps from django.core.management import CommandError, call_command from django.db import ( ConnectionHandler, DatabaseError, OperationalError, connection, connections, models, ) from django.db.backends.base.schema import BaseDatabaseSchemaEditor from django.db.backends.utils import truncate_name from django.db.migrations.exceptions import InconsistentMigrationHistory from django.db.migrations.recorder import MigrationRecorder from django.test import TestCase, override_settings, skipUnlessDBFeature from .models import UnicodeModel, UnserializableModel from .routers import TestRouter from .test_base import MigrationTestBase class MigrateTests(MigrationTestBase): """ Tests running the migrate command. """ databases = {'default', 'other'} @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) def test_migrate(self): """ Tests basic usage of the migrate command. """ # No tables are created self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") self.assertTableNotExists("migrations_book") # Run the migrations to 0001 only stdout = io.StringIO() call_command('migrate', 'migrations', '0001', verbosity=1, stdout=stdout, no_color=True) stdout = stdout.getvalue() self.assertIn('Target specific migration: 0001_initial, from migrations', stdout) self.assertIn('Applying migrations.0001_initial... OK', stdout) # The correct tables exist self.assertTableExists("migrations_author") self.assertTableExists("migrations_tribble") self.assertTableNotExists("migrations_book") # Run migrations all the way call_command("migrate", verbosity=0) # The correct tables exist self.assertTableExists("migrations_author") self.assertTableNotExists("migrations_tribble") self.assertTableExists("migrations_book") # Unmigrate everything stdout = io.StringIO() call_command('migrate', 'migrations', 'zero', verbosity=1, stdout=stdout, no_color=True) stdout = stdout.getvalue() self.assertIn('Unapply all migrations: migrations', stdout) self.assertIn('Unapplying migrations.0002_second... OK', stdout) # Tables are gone self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") self.assertTableNotExists("migrations_book") @override_settings(INSTALLED_APPS=[ 'django.contrib.auth', 'django.contrib.contenttypes', 'migrations.migrations_test_apps.migrated_app', ]) def test_migrate_with_system_checks(self): out = io.StringIO() call_command('migrate', skip_checks=False, no_color=True, stdout=out) self.assertIn('Apply all migrations: migrated_app', out.getvalue()) @override_settings(INSTALLED_APPS=['migrations', 'migrations.migrations_test_apps.unmigrated_app_syncdb']) def test_app_without_migrations(self): msg = "App 'unmigrated_app_syncdb' does not have migrations." with self.assertRaisesMessage(CommandError, msg): call_command('migrate', app_label='unmigrated_app_syncdb') @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_clashing_prefix'}) def test_ambiguous_prefix(self): msg = ( "More than one migration matches 'a' in app 'migrations'. Please " "be more specific." ) with self.assertRaisesMessage(CommandError, msg): call_command('migrate', app_label='migrations', migration_name='a') @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations'}) def test_unknown_prefix(self): msg = "Cannot find a migration matching 'nonexistent' from app 'migrations'." with self.assertRaisesMessage(CommandError, msg): call_command('migrate', app_label='migrations', migration_name='nonexistent') @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_initial_false"}) def test_migrate_initial_false(self): """ `Migration.initial = False` skips fake-initial detection. """ # Make sure no tables are created self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") # Run the migrations to 0001 only call_command("migrate", "migrations", "0001", verbosity=0) # Fake rollback call_command("migrate", "migrations", "zero", fake=True, verbosity=0) # Make sure fake-initial detection does not run with self.assertRaises(DatabaseError): call_command("migrate", "migrations", "0001", fake_initial=True, verbosity=0) call_command("migrate", "migrations", "0001", fake=True, verbosity=0) # Real rollback call_command("migrate", "migrations", "zero", verbosity=0) # Make sure it's all gone self.assertTableNotExists("migrations_author") self.assertTableNotExists("migrations_tribble") self.assertTableNotExists("migrations_book") @override_settings( MIGRATION_MODULES={"migrations": "migrations.test_migrations"}, DATABASE_ROUTERS=['migrations.routers.TestRouter'], ) def test_migrate_fake_initial(self): """ --fake-initial only works if all tables created in the initial migration of an app exists. Database routers must be obeyed when doing that check. """ # Make sure no tables are created for db in self.databases: self.assertTableNotExists("migrations_author", using=db) self.assertTableNotExists("migrations_tribble", using=db) # Run the migrations to 0001 only call_command("migrate", "migrations", "0001", verbosity=0) call_command("migrate", "migrations", "0001", verbosity=0, database="other") # Make sure the right tables exist self.assertTableExists("migrations_author") self.assertTableNotExists("migrations_tribble") # Also check the "other" database self.assertTableNotExists("migrations_author", using="other") self.assertTableExists("migrations_tribble", using="other") # Fake a roll-back call_command("migrate", "migrations", "zero", fake=True, verbosity=0) call_command("migrate", "migrations", "zero", fake=True, verbosity=0, database="other") # Make sure the tables still exist self.assertTableExists("migrations_author") self.assertTableExists("migrations_tribble", using="other") # Try to run initial migration with self.assertRaises(DatabaseError): call_command("migrate", "migrations", "0001", verbosity=0) # Run initial migration with an explicit --fake-initial out = io.StringIO() with mock.patch('django.core.management.color.supports_color', lambda *args: False): call_command("migrate", "migrations", "0001", fake_initial=True, stdout=out, verbosity=1) call_command("migrate", "migrations", "0001", fake_initial=True, verbosity=0, database="other") self.assertIn( "migrations.0001_initial... faked", out.getvalue().lower() ) try: # Run migrations all the way. call_command('migrate', verbosity=0) call_command('migrate', verbosity=0, database="other") self.assertTableExists('migrations_author') self.assertTableNotExists('migrations_tribble') self.assertTableExists('migrations_book') self.assertTableNotExists('migrations_author', using='other') self.assertTableNotExists('migrations_tribble', using='other') self.assertTableNotExists('migrations_book', using='other') # Fake a roll-back. call_command('migrate', 'migrations', 'zero', fake=True, verbosity=0) call_command('migrate', 'migrations', 'zero', fake=True, verbosity=0, database='other') self.assertTableExists('migrations_author') self.assertTableNotExists('migrations_tribble') self.assertTableExists('migrations_book') # Run initial migration. with self.assertRaises(DatabaseError): call_command('migrate', 'migrations', verbosity=0) # Run initial migration with an explicit --fake-initial. with self.assertRaises(DatabaseError): # Fails because "migrations_tribble" does not exist but needs # to in order to make --fake-initial work. call_command('migrate', 'migrations', fake_initial=True, verbosity=0) # Fake an apply. call_command('migrate', 'migrations', fake=True, verbosity=0) call_command('migrate', 'migrations', fake=True, verbosity=0, database='other') finally: # Unmigrate everything. call_command('migrate', 'migrations', 'zero', verbosity=0) call_command('migrate', 'migrations', 'zero', verbosity=0, database='other') # Make sure it's all gone for db in self.databases: self.assertTableNotExists("migrations_author", using=db) self.assertTableNotExists("migrations_tribble", using=db) self.assertTableNotExists("migrations_book", using=db) @skipUnlessDBFeature('ignores_table_name_case') def test_migrate_fake_initial_case_insensitive(self): with override_settings(MIGRATION_MODULES={ 'migrations': 'migrations.test_fake_initial_case_insensitive.initial', }): call_command('migrate', 'migrations', '0001', verbosity=0) call_command('migrate', 'migrations', 'zero', fake=True, verbosity=0) with override_settings(MIGRATION_MODULES={ 'migrations': 'migrations.test_fake_initial_case_insensitive.fake_initial', }): out = io.StringIO() call_command( 'migrate', 'migrations', '0001', fake_initial=True, stdout=out, verbosity=1, no_color=True, ) self.assertIn( 'migrations.0001_initial... faked', out.getvalue().lower(), ) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_fake_split_initial"}) def test_migrate_fake_split_initial(self): """ Split initial migrations can be faked with --fake-initial. """ call_command("migrate", "migrations", "0002", verbosity=0) call_command("migrate", "migrations", "zero", fake=True, verbosity=0) out = io.StringIO() with mock.patch('django.core.management.color.supports_color', lambda *args: False): call_command("migrate", "migrations", "0002", fake_initial=True, stdout=out, verbosity=1) value = out.getvalue().lower() self.assertIn("migrations.0001_initial... faked", value) self.assertIn("migrations.0002_second... faked", value) # Fake an apply call_command("migrate", "migrations", fake=True, verbosity=0) # Unmigrate everything call_command("migrate", "migrations", "zero", verbosity=0) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_conflict"}) def test_migrate_conflict_exit(self): """ migrate exits if it detects a conflict. """ msg = ( "Conflicting migrations detected; multiple leaf nodes in the " "migration graph: (0002_conflicting_second, 0002_second in " "migrations).\n" "To fix them run 'python manage.py makemigrations --merge'" ) with self.assertRaisesMessage(CommandError, msg): call_command("migrate", "migrations") @override_settings(MIGRATION_MODULES={ 'migrations': 'migrations.test_migrations', }) def test_migrate_check(self): with self.assertRaises(SystemExit): call_command('migrate', 'migrations', '0001', check_unapplied=True) self.assertTableNotExists('migrations_author') self.assertTableNotExists('migrations_tribble') self.assertTableNotExists('migrations_book') @override_settings(MIGRATION_MODULES={ 'migrations': 'migrations.test_migrations_plan', }) def test_migrate_check_plan(self): out = io.StringIO() with self.assertRaises(SystemExit): call_command( 'migrate', 'migrations', '0001', check_unapplied=True, plan=True, stdout=out, no_color=True, ) self.assertEqual( 'Planned operations:\n' 'migrations.0001_initial\n' ' Create model Salamander\n' ' Raw Python operation -> Grow salamander tail.\n', out.getvalue(), ) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) def test_showmigrations_list(self): """ showmigrations --list displays migrations and whether or not they're applied. """ out = io.StringIO() with mock.patch('django.core.management.color.supports_color', lambda *args: True): call_command("showmigrations", format='list', stdout=out, verbosity=0, no_color=False) self.assertEqual( '\x1b[1mmigrations\n\x1b[0m' ' [ ] 0001_initial\n' ' [ ] 0002_second\n', out.getvalue().lower() ) call_command("migrate", "migrations", "0001", verbosity=0) out = io.StringIO() # Giving the explicit app_label tests for selective `show_list` in the command call_command("showmigrations", "migrations", format='list', stdout=out, verbosity=0, no_color=True) self.assertEqual( 'migrations\n' ' [x] 0001_initial\n' ' [ ] 0002_second\n', out.getvalue().lower() ) out = io.StringIO() # Applied datetimes are displayed at verbosity 2+. call_command('showmigrations', 'migrations', stdout=out, verbosity=2, no_color=True) migration1 = MigrationRecorder(connection).migration_qs.get(app='migrations', name='0001_initial') self.assertEqual( 'migrations\n' ' [x] 0001_initial (applied at %s)\n' ' [ ] 0002_second\n' % migration1.applied.strftime('%Y-%m-%d %H:%M:%S'), out.getvalue().lower() ) # Cleanup by unmigrating everything call_command("migrate", "migrations", "zero", verbosity=0) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_run_before"}) def test_showmigrations_plan(self): """ Tests --plan output of showmigrations command """ out = io.StringIO() call_command("showmigrations", format='plan', stdout=out) self.assertEqual( "[ ] migrations.0001_initial\n" "[ ] migrations.0003_third\n" "[ ] migrations.0002_second\n", out.getvalue().lower() ) out = io.StringIO() call_command("showmigrations", format='plan', stdout=out, verbosity=2) self.assertEqual( "[ ] migrations.0001_initial\n" "[ ] migrations.0003_third ... (migrations.0001_initial)\n" "[ ] migrations.0002_second ... (migrations.0001_initial, migrations.0003_third)\n", out.getvalue().lower() ) call_command("migrate", "migrations", "0003", verbosity=0) out = io.StringIO() call_command("showmigrations", format='plan', stdout=out) self.assertEqual( "[x] migrations.0001_initial\n" "[x] migrations.0003_third\n" "[ ] migrations.0002_second\n", out.getvalue().lower() ) out = io.StringIO() call_command("showmigrations", format='plan', stdout=out, verbosity=2) self.assertEqual( "[x] migrations.0001_initial\n" "[x] migrations.0003_third ... (migrations.0001_initial)\n" "[ ] migrations.0002_second ... (migrations.0001_initial, migrations.0003_third)\n", out.getvalue().lower() ) # Cleanup by unmigrating everything call_command("migrate", "migrations", "zero", verbosity=0) @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_plan'}) def test_migrate_plan(self): """Tests migrate --plan output.""" out = io.StringIO() # Show the plan up to the third migration. call_command('migrate', 'migrations', '0003', plan=True, stdout=out, no_color=True) self.assertEqual( 'Planned operations:\n' 'migrations.0001_initial\n' ' Create model Salamander\n' ' Raw Python operation -> Grow salamander tail.\n' 'migrations.0002_second\n' ' Create model Book\n' " Raw SQL operation -> ['SELECT * FROM migrations_book']\n" 'migrations.0003_third\n' ' Create model Author\n' " Raw SQL operation -> ['SELECT * FROM migrations_author']\n", out.getvalue() ) try: # Migrate to the third migration. call_command('migrate', 'migrations', '0003', verbosity=0) out = io.StringIO() # Show the plan for when there is nothing to apply. call_command('migrate', 'migrations', '0003', plan=True, stdout=out, no_color=True) self.assertEqual( 'Planned operations:\n' ' No planned migration operations.\n', out.getvalue() ) out = io.StringIO() # Show the plan for reverse migration back to 0001. call_command('migrate', 'migrations', '0001', plan=True, stdout=out, no_color=True) self.assertEqual( 'Planned operations:\n' 'migrations.0003_third\n' ' Undo Create model Author\n' " Raw SQL operation -> ['SELECT * FROM migrations_book']\n" 'migrations.0002_second\n' ' Undo Create model Book\n' " Raw SQL operation -> ['SELECT * FROM migrations_salamand…\n", out.getvalue() ) out = io.StringIO() # Show the migration plan to fourth, with truncated details. call_command('migrate', 'migrations', '0004', plan=True, stdout=out, no_color=True) self.assertEqual( 'Planned operations:\n' 'migrations.0004_fourth\n' ' Raw SQL operation -> SELECT * FROM migrations_author WHE…\n', out.getvalue() ) # Show the plan when an operation is irreversible. # Migrate to the fourth migration. call_command('migrate', 'migrations', '0004', verbosity=0) out = io.StringIO() call_command('migrate', 'migrations', '0003', plan=True, stdout=out, no_color=True) self.assertEqual( 'Planned operations:\n' 'migrations.0004_fourth\n' ' Raw SQL operation -> IRREVERSIBLE\n', out.getvalue() ) out = io.StringIO() call_command('migrate', 'migrations', '0005', plan=True, stdout=out, no_color=True) # Operation is marked as irreversible only in the revert plan. self.assertEqual( 'Planned operations:\n' 'migrations.0005_fifth\n' ' Raw Python operation\n' ' Raw Python operation\n' ' Raw Python operation -> Feed salamander.\n', out.getvalue() ) call_command('migrate', 'migrations', '0005', verbosity=0) out = io.StringIO() call_command('migrate', 'migrations', '0004', plan=True, stdout=out, no_color=True) self.assertEqual( 'Planned operations:\n' 'migrations.0005_fifth\n' ' Raw Python operation -> IRREVERSIBLE\n' ' Raw Python operation -> IRREVERSIBLE\n' ' Raw Python operation\n', out.getvalue() ) finally: # Cleanup by unmigrating everything: fake the irreversible, then # migrate all to zero. call_command('migrate', 'migrations', '0003', fake=True, verbosity=0) call_command('migrate', 'migrations', 'zero', verbosity=0) @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_empty'}) def test_showmigrations_no_migrations(self): out = io.StringIO() call_command('showmigrations', stdout=out, no_color=True) self.assertEqual('migrations\n (no migrations)\n', out.getvalue().lower()) @override_settings(INSTALLED_APPS=['migrations.migrations_test_apps.unmigrated_app']) def test_showmigrations_unmigrated_app(self): out = io.StringIO() call_command('showmigrations', 'unmigrated_app', stdout=out, no_color=True) self.assertEqual('unmigrated_app\n (no migrations)\n', out.getvalue().lower()) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_empty"}) def test_showmigrations_plan_no_migrations(self): """ Tests --plan output of showmigrations command without migrations """ out = io.StringIO() call_command('showmigrations', format='plan', stdout=out, no_color=True) self.assertEqual('(no migrations)\n', out.getvalue().lower()) out = io.StringIO() call_command('showmigrations', format='plan', stdout=out, verbosity=2, no_color=True) self.assertEqual('(no migrations)\n', out.getvalue().lower()) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_complex"}) def test_showmigrations_plan_squashed(self): """ Tests --plan output of showmigrations command with squashed migrations. """ out = io.StringIO() call_command("showmigrations", format='plan', stdout=out) self.assertEqual( "[ ] migrations.1_auto\n" "[ ] migrations.2_auto\n" "[ ] migrations.3_squashed_5\n" "[ ] migrations.6_auto\n" "[ ] migrations.7_auto\n", out.getvalue().lower() ) out = io.StringIO() call_command("showmigrations", format='plan', stdout=out, verbosity=2) self.assertEqual( "[ ] migrations.1_auto\n" "[ ] migrations.2_auto ... (migrations.1_auto)\n" "[ ] migrations.3_squashed_5 ... (migrations.2_auto)\n" "[ ] migrations.6_auto ... (migrations.3_squashed_5)\n" "[ ] migrations.7_auto ... (migrations.6_auto)\n", out.getvalue().lower() ) call_command("migrate", "migrations", "3_squashed_5", verbosity=0) out = io.StringIO() call_command("showmigrations", format='plan', stdout=out) self.assertEqual( "[x] migrations.1_auto\n" "[x] migrations.2_auto\n" "[x] migrations.3_squashed_5\n" "[ ] migrations.6_auto\n" "[ ] migrations.7_auto\n", out.getvalue().lower() ) out = io.StringIO() call_command("showmigrations", format='plan', stdout=out, verbosity=2) self.assertEqual( "[x] migrations.1_auto\n" "[x] migrations.2_auto ... (migrations.1_auto)\n" "[x] migrations.3_squashed_5 ... (migrations.2_auto)\n" "[ ] migrations.6_auto ... (migrations.3_squashed_5)\n" "[ ] migrations.7_auto ... (migrations.6_auto)\n", out.getvalue().lower() ) @override_settings(INSTALLED_APPS=[ 'migrations.migrations_test_apps.mutate_state_b', 'migrations.migrations_test_apps.alter_fk.author_app', 'migrations.migrations_test_apps.alter_fk.book_app', ]) def test_showmigrations_plan_single_app_label(self): """ `showmigrations --plan app_label` output with a single app_label. """ # Single app with no dependencies on other apps. out = io.StringIO() call_command('showmigrations', 'mutate_state_b', format='plan', stdout=out) self.assertEqual( '[ ] mutate_state_b.0001_initial\n' '[ ] mutate_state_b.0002_add_field\n', out.getvalue() ) # Single app with dependencies. out = io.StringIO() call_command('showmigrations', 'author_app', format='plan', stdout=out) self.assertEqual( '[ ] author_app.0001_initial\n' '[ ] book_app.0001_initial\n' '[ ] author_app.0002_alter_id\n', out.getvalue() ) # Some migrations already applied. call_command('migrate', 'author_app', '0001', verbosity=0) out = io.StringIO() call_command('showmigrations', 'author_app', format='plan', stdout=out) self.assertEqual( '[X] author_app.0001_initial\n' '[ ] book_app.0001_initial\n' '[ ] author_app.0002_alter_id\n', out.getvalue() ) # Cleanup by unmigrating author_app. call_command('migrate', 'author_app', 'zero', verbosity=0) @override_settings(INSTALLED_APPS=[ 'migrations.migrations_test_apps.mutate_state_b', 'migrations.migrations_test_apps.alter_fk.author_app', 'migrations.migrations_test_apps.alter_fk.book_app', ]) def test_showmigrations_plan_multiple_app_labels(self): """ `showmigrations --plan app_label` output with multiple app_labels. """ # Multiple apps: author_app depends on book_app; mutate_state_b doesn't # depend on other apps. out = io.StringIO() call_command('showmigrations', 'mutate_state_b', 'author_app', format='plan', stdout=out) self.assertEqual( '[ ] author_app.0001_initial\n' '[ ] book_app.0001_initial\n' '[ ] author_app.0002_alter_id\n' '[ ] mutate_state_b.0001_initial\n' '[ ] mutate_state_b.0002_add_field\n', out.getvalue() ) # Multiple apps: args order shouldn't matter (the same result is # expected as above). out = io.StringIO() call_command('showmigrations', 'author_app', 'mutate_state_b', format='plan', stdout=out) self.assertEqual( '[ ] author_app.0001_initial\n' '[ ] book_app.0001_initial\n' '[ ] author_app.0002_alter_id\n' '[ ] mutate_state_b.0001_initial\n' '[ ] mutate_state_b.0002_add_field\n', out.getvalue() ) @override_settings(INSTALLED_APPS=['migrations.migrations_test_apps.unmigrated_app']) def test_showmigrations_plan_app_label_no_migrations(self): out = io.StringIO() call_command('showmigrations', 'unmigrated_app', format='plan', stdout=out, no_color=True) self.assertEqual('(no migrations)\n', out.getvalue()) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) def test_sqlmigrate_forwards(self): """ sqlmigrate outputs forward looking SQL. """ out = io.StringIO() call_command("sqlmigrate", "migrations", "0001", stdout=out) output = out.getvalue().lower() index_tx_start = output.find(connection.ops.start_transaction_sql().lower()) index_op_desc_author = output.find('-- create model author') index_create_table = output.find('create table') index_op_desc_tribble = output.find('-- create model tribble') index_op_desc_unique_together = output.find('-- alter unique_together') index_tx_end = output.find(connection.ops.end_transaction_sql().lower()) if connection.features.can_rollback_ddl: self.assertGreater(index_tx_start, -1, "Transaction start not found") self.assertGreater( index_tx_end, index_op_desc_unique_together, "Transaction end not found or found before operation description (unique_together)" ) self.assertGreater( index_op_desc_author, index_tx_start, "Operation description (author) not found or found before transaction start" ) self.assertGreater( index_create_table, index_op_desc_author, "CREATE TABLE not found or found before operation description (author)" ) self.assertGreater( index_op_desc_tribble, index_create_table, "Operation description (tribble) not found or found before CREATE TABLE (author)" ) self.assertGreater( index_op_desc_unique_together, index_op_desc_tribble, "Operation description (unique_together) not found or found before operation description (tribble)" ) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) def test_sqlmigrate_backwards(self): """ sqlmigrate outputs reverse looking SQL. """ # Cannot generate the reverse SQL unless we've applied the migration. call_command("migrate", "migrations", verbosity=0) out = io.StringIO() call_command("sqlmigrate", "migrations", "0001", stdout=out, backwards=True) output = out.getvalue().lower() index_tx_start = output.find(connection.ops.start_transaction_sql().lower()) index_op_desc_unique_together = output.find('-- alter unique_together') index_op_desc_tribble = output.find('-- create model tribble') index_op_desc_author = output.find('-- create model author') index_drop_table = output.rfind('drop table') index_tx_end = output.find(connection.ops.end_transaction_sql().lower()) if connection.features.can_rollback_ddl: self.assertGreater(index_tx_start, -1, "Transaction start not found") self.assertGreater( index_tx_end, index_op_desc_unique_together, "Transaction end not found or found before DROP TABLE" ) self.assertGreater( index_op_desc_unique_together, index_tx_start, "Operation description (unique_together) not found or found before transaction start" ) self.assertGreater( index_op_desc_tribble, index_op_desc_unique_together, "Operation description (tribble) not found or found before operation description (unique_together)" ) self.assertGreater( index_op_desc_author, index_op_desc_tribble, "Operation description (author) not found or found before operation description (tribble)" ) self.assertGreater( index_drop_table, index_op_desc_author, "DROP TABLE not found or found before operation description (author)" ) # Cleanup by unmigrating everything call_command("migrate", "migrations", "zero", verbosity=0) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_non_atomic"}) def test_sqlmigrate_for_non_atomic_migration(self): """ Transaction wrappers aren't shown for non-atomic migrations. """ out = io.StringIO() call_command("sqlmigrate", "migrations", "0001", stdout=out) output = out.getvalue().lower() queries = [q.strip() for q in output.splitlines()] if connection.ops.start_transaction_sql(): self.assertNotIn(connection.ops.start_transaction_sql().lower(), queries) self.assertNotIn(connection.ops.end_transaction_sql().lower(), queries) @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations'}) def test_sqlmigrate_for_non_transactional_databases(self): """ Transaction wrappers aren't shown for databases that don't support transactional DDL. """ out = io.StringIO() with mock.patch.object(connection.features, 'can_rollback_ddl', False): call_command('sqlmigrate', 'migrations', '0001', stdout=out) output = out.getvalue().lower() queries = [q.strip() for q in output.splitlines()] start_transaction_sql = connection.ops.start_transaction_sql() if start_transaction_sql: self.assertNotIn(start_transaction_sql.lower(), queries) self.assertNotIn(connection.ops.end_transaction_sql().lower(), queries) @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_squashed'}) def test_sqlmigrate_ambiguous_prefix_squashed_migrations(self): msg = ( "More than one migration matches '0001' in app 'migrations'. " "Please be more specific." ) with self.assertRaisesMessage(CommandError, msg): call_command('sqlmigrate', 'migrations', '0001') @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_squashed'}) def test_sqlmigrate_squashed_migration(self): out = io.StringIO() call_command('sqlmigrate', 'migrations', '0001_squashed_0002', stdout=out) output = out.getvalue().lower() self.assertIn('-- create model author', output) self.assertIn('-- create model book', output) self.assertNotIn('-- create model tribble', output) @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_squashed'}) def test_sqlmigrate_replaced_migration(self): out = io.StringIO() call_command('sqlmigrate', 'migrations', '0001_initial', stdout=out) output = out.getvalue().lower() self.assertIn('-- create model author', output) self.assertIn('-- create model tribble', output) @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_no_operations'}) def test_migrations_no_operations(self): err = io.StringIO() call_command('sqlmigrate', 'migrations', '0001_initial', stderr=err) self.assertEqual(err.getvalue(), 'No operations found.\n') @override_settings( INSTALLED_APPS=[ "migrations.migrations_test_apps.migrated_app", "migrations.migrations_test_apps.migrated_unapplied_app", "migrations.migrations_test_apps.unmigrated_app", ], ) def test_regression_22823_unmigrated_fk_to_migrated_model(self): """ Assuming you have 3 apps, `A`, `B`, and `C`, such that: * `A` has migrations * `B` has a migration we want to apply * `C` has no migrations, but has an FK to `A` When we try to migrate "B", an exception occurs because the "B" was not included in the ProjectState that is used to detect soft-applied migrations (#22823). """ call_command('migrate', 'migrated_unapplied_app', verbosity=0) # unmigrated_app.SillyModel has a foreign key to 'migrations.Tribble', # but that model is only defined in a migration, so the global app # registry never sees it and the reference is left dangling. Remove it # to avoid problems in subsequent tests. del apps._pending_operations[('migrations', 'tribble')] @override_settings(INSTALLED_APPS=['migrations.migrations_test_apps.unmigrated_app_syncdb']) def test_migrate_syncdb_deferred_sql_executed_with_schemaeditor(self): """ For an app without migrations, editor.execute() is used for executing the syncdb deferred SQL. """ stdout = io.StringIO() with mock.patch.object(BaseDatabaseSchemaEditor, 'execute') as execute: call_command('migrate', run_syncdb=True, verbosity=1, stdout=stdout, no_color=True) create_table_count = len([call for call in execute.mock_calls if 'CREATE TABLE' in str(call)]) self.assertEqual(create_table_count, 2) # There's at least one deferred SQL for creating the foreign key # index. self.assertGreater(len(execute.mock_calls), 2) stdout = stdout.getvalue() self.assertIn('Synchronize unmigrated apps: unmigrated_app_syncdb', stdout) self.assertIn('Creating tables...', stdout) table_name = truncate_name('unmigrated_app_syncdb_classroom', connection.ops.max_name_length()) self.assertIn('Creating table %s' % table_name, stdout) @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations'}) def test_migrate_syncdb_app_with_migrations(self): msg = "Can't use run_syncdb with app 'migrations' as it has migrations." with self.assertRaisesMessage(CommandError, msg): call_command('migrate', 'migrations', run_syncdb=True, verbosity=0) @override_settings(INSTALLED_APPS=[ 'migrations.migrations_test_apps.unmigrated_app_syncdb', 'migrations.migrations_test_apps.unmigrated_app_simple', ]) def test_migrate_syncdb_app_label(self): """ Running migrate --run-syncdb with an app_label only creates tables for the specified app. """ stdout = io.StringIO() with mock.patch.object(BaseDatabaseSchemaEditor, 'execute') as execute: call_command('migrate', 'unmigrated_app_syncdb', run_syncdb=True, stdout=stdout) create_table_count = len([call for call in execute.mock_calls if 'CREATE TABLE' in str(call)]) self.assertEqual(create_table_count, 2) self.assertGreater(len(execute.mock_calls), 2) self.assertIn('Synchronize unmigrated app: unmigrated_app_syncdb', stdout.getvalue()) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}) def test_migrate_record_replaced(self): """ Running a single squashed migration should record all of the original replaced migrations as run. """ recorder = MigrationRecorder(connection) out = io.StringIO() call_command("migrate", "migrations", verbosity=0) call_command("showmigrations", "migrations", stdout=out, no_color=True) self.assertEqual( 'migrations\n' ' [x] 0001_squashed_0002 (2 squashed migrations)\n', out.getvalue().lower() ) applied_migrations = recorder.applied_migrations() self.assertIn(("migrations", "0001_initial"), applied_migrations) self.assertIn(("migrations", "0002_second"), applied_migrations) self.assertIn(("migrations", "0001_squashed_0002"), applied_migrations) # Rollback changes call_command("migrate", "migrations", "zero", verbosity=0) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}) def test_migrate_record_squashed(self): """ Running migrate for a squashed migration should record as run if all of the replaced migrations have been run (#25231). """ recorder = MigrationRecorder(connection) recorder.record_applied("migrations", "0001_initial") recorder.record_applied("migrations", "0002_second") out = io.StringIO() call_command("migrate", "migrations", verbosity=0) call_command("showmigrations", "migrations", stdout=out, no_color=True) self.assertEqual( 'migrations\n' ' [x] 0001_squashed_0002 (2 squashed migrations)\n', out.getvalue().lower() ) self.assertIn( ("migrations", "0001_squashed_0002"), recorder.applied_migrations() ) # No changes were actually applied so there is nothing to rollback @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations'}) def test_migrate_inconsistent_history(self): """ Running migrate with some migrations applied before their dependencies should not be allowed. """ recorder = MigrationRecorder(connection) recorder.record_applied("migrations", "0002_second") msg = "Migration migrations.0002_second is applied before its dependency migrations.0001_initial" with self.assertRaisesMessage(InconsistentMigrationHistory, msg): call_command("migrate") applied_migrations = recorder.applied_migrations() self.assertNotIn(("migrations", "0001_initial"), applied_migrations) @override_settings(INSTALLED_APPS=[ 'migrations.migrations_test_apps.migrated_unapplied_app', 'migrations.migrations_test_apps.migrated_app', ]) def test_migrate_not_reflected_changes(self): class NewModel1(models.Model): class Meta(): app_label = 'migrated_app' class NewModel2(models.Model): class Meta(): app_label = 'migrated_unapplied_app' out = io.StringIO() try: call_command('migrate', verbosity=0) call_command('migrate', stdout=out, no_color=True) self.assertEqual( "operations to perform:\n" " apply all migrations: migrated_app, migrated_unapplied_app\n" "running migrations:\n" " no migrations to apply.\n" " your models in app(s): 'migrated_app', " "'migrated_unapplied_app' have changes that are not yet " "reflected in a migration, and so won't be applied.\n" " run 'manage.py makemigrations' to make new migrations, and " "then re-run 'manage.py migrate' to apply them.\n", out.getvalue().lower(), ) finally: # Unmigrate everything. call_command('migrate', 'migrated_app', 'zero', verbosity=0) call_command('migrate', 'migrated_unapplied_app', 'zero', verbosity=0) class MakeMigrationsTests(MigrationTestBase): """ Tests running the makemigrations command. """ def setUp(self): super().setUp() self._old_models = apps.app_configs['migrations'].models.copy() def tearDown(self): apps.app_configs['migrations'].models = self._old_models apps.all_models['migrations'] = self._old_models apps.clear_cache() super().tearDown() def test_files_content(self): self.assertTableNotExists("migrations_unicodemodel") apps.register_model('migrations', UnicodeModel) with self.temporary_migration_module() as migration_dir: call_command("makemigrations", "migrations", verbosity=0) # Check for empty __init__.py file in migrations folder init_file = os.path.join(migration_dir, "__init__.py") self.assertTrue(os.path.exists(init_file)) with open(init_file) as fp: content = fp.read() self.assertEqual(content, '') # Check for existing 0001_initial.py file in migration folder initial_file = os.path.join(migration_dir, "0001_initial.py") self.assertTrue(os.path.exists(initial_file)) with open(initial_file, encoding='utf-8') as fp: content = fp.read() self.assertIn('migrations.CreateModel', content) self.assertIn('initial = True', content) self.assertIn('úñí©óðé µóðéø', content) # Meta.verbose_name self.assertIn('úñí©óðé µóðéøß', content) # Meta.verbose_name_plural self.assertIn('ÚÑÍ¢ÓÐÉ', content) # title.verbose_name self.assertIn('“Ðjáñgó”', content) # title.default def test_makemigrations_order(self): """ makemigrations should recognize number-only migrations (0001.py). """ module = 'migrations.test_migrations_order' with self.temporary_migration_module(module=module) as migration_dir: if hasattr(importlib, 'invalidate_caches'): # importlib caches os.listdir() on some platforms like macOS # (#23850). importlib.invalidate_caches() call_command('makemigrations', 'migrations', '--empty', '-n', 'a', '-v', '0') self.assertTrue(os.path.exists(os.path.join(migration_dir, '0002_a.py'))) def test_makemigrations_empty_connections(self): empty_connections = ConnectionHandler({'default': {}}) with mock.patch('django.core.management.commands.makemigrations.connections', new=empty_connections): # with no apps out = io.StringIO() call_command('makemigrations', stdout=out) self.assertIn('No changes detected', out.getvalue()) # with an app with self.temporary_migration_module() as migration_dir: call_command('makemigrations', 'migrations', verbosity=0) init_file = os.path.join(migration_dir, '__init__.py') self.assertTrue(os.path.exists(init_file)) @override_settings(INSTALLED_APPS=['migrations', 'migrations2']) def test_makemigrations_consistency_checks_respect_routers(self): """ The history consistency checks in makemigrations respect settings.DATABASE_ROUTERS. """ def patched_has_table(migration_recorder): if migration_recorder.connection is connections['other']: raise Exception('Other connection') else: return mock.DEFAULT self.assertTableNotExists('migrations_unicodemodel') apps.register_model('migrations', UnicodeModel) with mock.patch.object( MigrationRecorder, 'has_table', autospec=True, side_effect=patched_has_table) as has_table: with self.temporary_migration_module() as migration_dir: call_command("makemigrations", "migrations", verbosity=0) initial_file = os.path.join(migration_dir, "0001_initial.py") self.assertTrue(os.path.exists(initial_file)) self.assertEqual(has_table.call_count, 1) # 'default' is checked # Router says not to migrate 'other' so consistency shouldn't # be checked. with self.settings(DATABASE_ROUTERS=['migrations.routers.TestRouter']): call_command('makemigrations', 'migrations', verbosity=0) self.assertEqual(has_table.call_count, 2) # 'default' again # With a router that doesn't prohibit migrating 'other', # consistency is checked. with self.settings(DATABASE_ROUTERS=['migrations.routers.DefaultOtherRouter']): with self.assertRaisesMessage(Exception, 'Other connection'): call_command('makemigrations', 'migrations', verbosity=0) self.assertEqual(has_table.call_count, 4) # 'default' and 'other' # With a router that doesn't allow migrating on any database, # no consistency checks are made. with self.settings(DATABASE_ROUTERS=['migrations.routers.TestRouter']): with mock.patch.object(TestRouter, 'allow_migrate', return_value=False) as allow_migrate: call_command('makemigrations', 'migrations', verbosity=0) allow_migrate.assert_any_call('other', 'migrations', model_name='UnicodeModel') # allow_migrate() is called with the correct arguments. self.assertGreater(len(allow_migrate.mock_calls), 0) called_aliases = set() for mock_call in allow_migrate.mock_calls: _, call_args, call_kwargs = mock_call connection_alias, app_name = call_args called_aliases.add(connection_alias) # Raises an error if invalid app_name/model_name occurs. apps.get_app_config(app_name).get_model(call_kwargs['model_name']) self.assertEqual(called_aliases, set(connections)) self.assertEqual(has_table.call_count, 4) def test_failing_migration(self): # If a migration fails to serialize, it shouldn't generate an empty file. #21280 apps.register_model('migrations', UnserializableModel) with self.temporary_migration_module() as migration_dir: with self.assertRaisesMessage(ValueError, 'Cannot serialize'): call_command("makemigrations", "migrations", verbosity=0) initial_file = os.path.join(migration_dir, "0001_initial.py") self.assertFalse(os.path.exists(initial_file)) def test_makemigrations_conflict_exit(self): """ makemigrations exits if it detects a conflict. """ with self.temporary_migration_module(module="migrations.test_migrations_conflict"): with self.assertRaises(CommandError) as context: call_command("makemigrations") self.assertEqual( str(context.exception), "Conflicting migrations detected; multiple leaf nodes in the " "migration graph: (0002_conflicting_second, 0002_second in " "migrations).\n" "To fix them run 'python manage.py makemigrations --merge'" ) def test_makemigrations_merge_no_conflict(self): """ makemigrations exits if in merge mode with no conflicts. """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations"): call_command("makemigrations", merge=True, stdout=out) self.assertIn("No conflicts detected to merge.", out.getvalue()) def test_makemigrations_empty_no_app_specified(self): """ makemigrations exits if no app is specified with 'empty' mode. """ msg = 'You must supply at least one app label when using --empty.' with self.assertRaisesMessage(CommandError, msg): call_command("makemigrations", empty=True) def test_makemigrations_empty_migration(self): """ makemigrations properly constructs an empty migration. """ with self.temporary_migration_module() as migration_dir: call_command("makemigrations", "migrations", empty=True, verbosity=0) # Check for existing 0001_initial.py file in migration folder initial_file = os.path.join(migration_dir, "0001_initial.py") self.assertTrue(os.path.exists(initial_file)) with open(initial_file, encoding='utf-8') as fp: content = fp.read() # Remove all whitespace to check for empty dependencies and operations content = content.replace(' ', '') self.assertIn('dependencies=[\n]', content) self.assertIn('operations=[\n]', content) @override_settings(MIGRATION_MODULES={"migrations": None}) def test_makemigrations_disabled_migrations_for_app(self): """ makemigrations raises a nice error when migrations are disabled for an app. """ msg = ( "Django can't create migrations for app 'migrations' because migrations " "have been disabled via the MIGRATION_MODULES setting." ) with self.assertRaisesMessage(ValueError, msg): call_command("makemigrations", "migrations", empty=True, verbosity=0) def test_makemigrations_no_changes_no_apps(self): """ makemigrations exits when there are no changes and no apps are specified. """ out = io.StringIO() call_command("makemigrations", stdout=out) self.assertIn("No changes detected", out.getvalue()) def test_makemigrations_no_changes(self): """ makemigrations exits when there are no changes to an app. """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_no_changes"): call_command("makemigrations", "migrations", stdout=out) self.assertIn("No changes detected in app 'migrations'", out.getvalue()) def test_makemigrations_no_apps_initial(self): """ makemigrations should detect initial is needed on empty migration modules if no app provided. """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_empty"): call_command("makemigrations", stdout=out) self.assertIn("0001_initial.py", out.getvalue()) def test_makemigrations_no_init(self): """Migration directories without an __init__.py file are allowed.""" out = io.StringIO() with self.temporary_migration_module(module='migrations.test_migrations_no_init'): call_command('makemigrations', stdout=out) self.assertIn('0001_initial.py', out.getvalue()) def test_makemigrations_migrations_announce(self): """ makemigrations announces the migration at the default verbosity level. """ out = io.StringIO() with self.temporary_migration_module(): call_command("makemigrations", "migrations", stdout=out) self.assertIn("Migrations for 'migrations'", out.getvalue()) def test_makemigrations_no_common_ancestor(self): """ makemigrations fails to merge migrations with no common ancestor. """ with self.assertRaises(ValueError) as context: with self.temporary_migration_module(module="migrations.test_migrations_no_ancestor"): call_command("makemigrations", "migrations", merge=True) exception_message = str(context.exception) self.assertIn("Could not find common ancestor of", exception_message) self.assertIn("0002_second", exception_message) self.assertIn("0002_conflicting_second", exception_message) def test_makemigrations_interactive_reject(self): """ makemigrations enters and exits interactive mode properly. """ # Monkeypatch interactive questioner to auto reject with mock.patch('builtins.input', mock.Mock(return_value='N')): with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir: call_command("makemigrations", "migrations", name="merge", merge=True, interactive=True, verbosity=0) merge_file = os.path.join(migration_dir, '0003_merge.py') self.assertFalse(os.path.exists(merge_file)) def test_makemigrations_interactive_accept(self): """ makemigrations enters interactive mode and merges properly. """ # Monkeypatch interactive questioner to auto accept with mock.patch('builtins.input', mock.Mock(return_value='y')): out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir: call_command("makemigrations", "migrations", name="merge", merge=True, interactive=True, stdout=out) merge_file = os.path.join(migration_dir, '0003_merge.py') self.assertTrue(os.path.exists(merge_file)) self.assertIn("Created new merge migration", out.getvalue()) def test_makemigrations_default_merge_name(self): out = io.StringIO() with self.temporary_migration_module( module='migrations.test_migrations_conflict' ) as migration_dir: call_command('makemigrations', 'migrations', merge=True, interactive=False, stdout=out) merge_file = os.path.join( migration_dir, '0003_merge_0002_conflicting_second_0002_second.py', ) self.assertIs(os.path.exists(merge_file), True) self.assertIn('Created new merge migration %s' % merge_file, out.getvalue()) @mock.patch('django.db.migrations.utils.datetime') def test_makemigrations_auto_merge_name(self, mock_datetime): mock_datetime.datetime.now.return_value = datetime.datetime(2016, 1, 2, 3, 4) with mock.patch('builtins.input', mock.Mock(return_value='y')): out = io.StringIO() with self.temporary_migration_module( module='migrations.test_migrations_conflict_long_name' ) as migration_dir: call_command("makemigrations", "migrations", merge=True, interactive=True, stdout=out) merge_file = os.path.join(migration_dir, '0003_merge_20160102_0304.py') self.assertTrue(os.path.exists(merge_file)) self.assertIn("Created new merge migration", out.getvalue()) def test_makemigrations_non_interactive_not_null_addition(self): """ Non-interactive makemigrations fails when a default is missing on a new not-null field. """ class SillyModel(models.Model): silly_field = models.BooleanField(default=False) silly_int = models.IntegerField() class Meta: app_label = "migrations" with self.assertRaises(SystemExit): with self.temporary_migration_module(module="migrations.test_migrations_no_default"): call_command("makemigrations", "migrations", interactive=False) def test_makemigrations_non_interactive_not_null_alteration(self): """ Non-interactive makemigrations fails when a default is missing on a field changed to not-null. """ class Author(models.Model): name = models.CharField(max_length=255) slug = models.SlugField() age = models.IntegerField(default=0) class Meta: app_label = "migrations" out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations"): call_command("makemigrations", "migrations", interactive=False, stdout=out) self.assertIn("Alter field slug on author", out.getvalue()) def test_makemigrations_non_interactive_no_model_rename(self): """ makemigrations adds and removes a possible model rename in non-interactive mode. """ class RenamedModel(models.Model): silly_field = models.BooleanField(default=False) class Meta: app_label = "migrations" out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_no_default"): call_command("makemigrations", "migrations", interactive=False, stdout=out) self.assertIn("Delete model SillyModel", out.getvalue()) self.assertIn("Create model RenamedModel", out.getvalue()) def test_makemigrations_non_interactive_no_field_rename(self): """ makemigrations adds and removes a possible field rename in non-interactive mode. """ class SillyModel(models.Model): silly_rename = models.BooleanField(default=False) class Meta: app_label = "migrations" out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_no_default"): call_command("makemigrations", "migrations", interactive=False, stdout=out) self.assertIn("Remove field silly_field from sillymodel", out.getvalue()) self.assertIn("Add field silly_rename to sillymodel", out.getvalue()) def test_makemigrations_handle_merge(self): """ makemigrations properly merges the conflicting migrations with --noinput. """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir: call_command("makemigrations", "migrations", name="merge", merge=True, interactive=False, stdout=out) merge_file = os.path.join(migration_dir, '0003_merge.py') self.assertTrue(os.path.exists(merge_file)) output = out.getvalue() self.assertIn("Merging migrations", output) self.assertIn("Branch 0002_second", output) self.assertIn("Branch 0002_conflicting_second", output) self.assertIn("Created new merge migration", output) def test_makemigration_merge_dry_run(self): """ makemigrations respects --dry-run option when fixing migration conflicts (#24427). """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir: call_command( "makemigrations", "migrations", name="merge", dry_run=True, merge=True, interactive=False, stdout=out, ) merge_file = os.path.join(migration_dir, '0003_merge.py') self.assertFalse(os.path.exists(merge_file)) output = out.getvalue() self.assertIn("Merging migrations", output) self.assertIn("Branch 0002_second", output) self.assertIn("Branch 0002_conflicting_second", output) self.assertNotIn("Created new merge migration", output) def test_makemigration_merge_dry_run_verbosity_3(self): """ `makemigrations --merge --dry-run` writes the merge migration file to stdout with `verbosity == 3` (#24427). """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir: call_command( "makemigrations", "migrations", name="merge", dry_run=True, merge=True, interactive=False, stdout=out, verbosity=3, ) merge_file = os.path.join(migration_dir, '0003_merge.py') self.assertFalse(os.path.exists(merge_file)) output = out.getvalue() self.assertIn("Merging migrations", output) self.assertIn("Branch 0002_second", output) self.assertIn("Branch 0002_conflicting_second", output) self.assertNotIn("Created new merge migration", output) # Additional output caused by verbosity 3 # The complete merge migration file that would be written self.assertIn("class Migration(migrations.Migration):", output) self.assertIn("dependencies = [", output) self.assertIn("('migrations', '0002_second')", output) self.assertIn("('migrations', '0002_conflicting_second')", output) self.assertIn("operations = [", output) self.assertIn("]", output) def test_makemigrations_dry_run(self): """ `makemigrations --dry-run` should not ask for defaults. """ class SillyModel(models.Model): silly_field = models.BooleanField(default=False) silly_date = models.DateField() # Added field without a default class Meta: app_label = "migrations" out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_no_default"): call_command("makemigrations", "migrations", dry_run=True, stdout=out) # Output the expected changes directly, without asking for defaults self.assertIn("Add field silly_date to sillymodel", out.getvalue()) def test_makemigrations_dry_run_verbosity_3(self): """ Allow `makemigrations --dry-run` to output the migrations file to stdout (with verbosity == 3). """ class SillyModel(models.Model): silly_field = models.BooleanField(default=False) silly_char = models.CharField(default="") class Meta: app_label = "migrations" out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_no_default"): call_command("makemigrations", "migrations", dry_run=True, stdout=out, verbosity=3) # Normal --dry-run output self.assertIn("- Add field silly_char to sillymodel", out.getvalue()) # Additional output caused by verbosity 3 # The complete migrations file that would be written self.assertIn("class Migration(migrations.Migration):", out.getvalue()) self.assertIn("dependencies = [", out.getvalue()) self.assertIn("('migrations', '0001_initial'),", out.getvalue()) self.assertIn("migrations.AddField(", out.getvalue()) self.assertIn("model_name='sillymodel',", out.getvalue()) self.assertIn("name='silly_char',", out.getvalue()) def test_makemigrations_migrations_modules_path_not_exist(self): """ makemigrations creates migrations when specifying a custom location for migration files using MIGRATION_MODULES if the custom path doesn't already exist. """ class SillyModel(models.Model): silly_field = models.BooleanField(default=False) class Meta: app_label = "migrations" out = io.StringIO() migration_module = "migrations.test_migrations_path_doesnt_exist.foo.bar" with self.temporary_migration_module(module=migration_module) as migration_dir: call_command("makemigrations", "migrations", stdout=out) # Migrations file is actually created in the expected path. initial_file = os.path.join(migration_dir, "0001_initial.py") self.assertTrue(os.path.exists(initial_file)) # Command output indicates the migration is created. self.assertIn(" - Create model SillyModel", out.getvalue()) @override_settings(MIGRATION_MODULES={'migrations': 'some.nonexistent.path'}) def test_makemigrations_migrations_modules_nonexistent_toplevel_package(self): msg = ( 'Could not locate an appropriate location to create migrations ' 'package some.nonexistent.path. Make sure the toplevel package ' 'exists and can be imported.' ) with self.assertRaisesMessage(ValueError, msg): call_command('makemigrations', 'migrations', empty=True, verbosity=0) def test_makemigrations_interactive_by_default(self): """ The user is prompted to merge by default if there are conflicts and merge is True. Answer negative to differentiate it from behavior when --noinput is specified. """ # Monkeypatch interactive questioner to auto reject out = io.StringIO() with mock.patch('builtins.input', mock.Mock(return_value='N')): with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir: call_command("makemigrations", "migrations", name="merge", merge=True, stdout=out) merge_file = os.path.join(migration_dir, '0003_merge.py') # This will fail if interactive is False by default self.assertFalse(os.path.exists(merge_file)) self.assertNotIn("Created new merge migration", out.getvalue()) @override_settings( INSTALLED_APPS=[ "migrations", "migrations.migrations_test_apps.unspecified_app_with_conflict"]) def test_makemigrations_unspecified_app_with_conflict_no_merge(self): """ makemigrations does not raise a CommandError when an unspecified app has conflicting migrations. """ with self.temporary_migration_module(module="migrations.test_migrations_no_changes"): call_command("makemigrations", "migrations", merge=False, verbosity=0) @override_settings( INSTALLED_APPS=[ "migrations.migrations_test_apps.migrated_app", "migrations.migrations_test_apps.unspecified_app_with_conflict"]) def test_makemigrations_unspecified_app_with_conflict_merge(self): """ makemigrations does not create a merge for an unspecified app even if it has conflicting migrations. """ # Monkeypatch interactive questioner to auto accept with mock.patch('builtins.input', mock.Mock(return_value='y')): out = io.StringIO() with self.temporary_migration_module(app_label="migrated_app") as migration_dir: call_command("makemigrations", "migrated_app", name="merge", merge=True, interactive=True, stdout=out) merge_file = os.path.join(migration_dir, '0003_merge.py') self.assertFalse(os.path.exists(merge_file)) self.assertIn("No conflicts detected to merge.", out.getvalue()) @override_settings( INSTALLED_APPS=[ "migrations.migrations_test_apps.migrated_app", "migrations.migrations_test_apps.conflicting_app_with_dependencies"]) def test_makemigrations_merge_dont_output_dependency_operations(self): """ makemigrations --merge does not output any operations from apps that don't belong to a given app. """ # Monkeypatch interactive questioner to auto accept with mock.patch('builtins.input', mock.Mock(return_value='N')): out = io.StringIO() with mock.patch('django.core.management.color.supports_color', lambda *args: False): call_command( "makemigrations", "conflicting_app_with_dependencies", merge=True, interactive=True, stdout=out ) self.assertEqual( out.getvalue().lower(), 'merging conflicting_app_with_dependencies\n' ' branch 0002_conflicting_second\n' ' - create model something\n' ' branch 0002_second\n' ' - delete model tribble\n' ' - remove field silly_field from author\n' ' - add field rating to author\n' ' - create model book\n' ) def test_makemigrations_with_custom_name(self): """ makemigrations --name generate a custom migration name. """ with self.temporary_migration_module() as migration_dir: def cmd(migration_count, migration_name, *args): call_command("makemigrations", "migrations", "--verbosity", "0", "--name", migration_name, *args) migration_file = os.path.join(migration_dir, "%s_%s.py" % (migration_count, migration_name)) # Check for existing migration file in migration folder self.assertTrue(os.path.exists(migration_file)) with open(migration_file, encoding='utf-8') as fp: content = fp.read() content = content.replace(" ", "") return content # generate an initial migration migration_name_0001 = "my_initial_migration" content = cmd("0001", migration_name_0001) self.assertIn("dependencies=[\n]", content) # importlib caches os.listdir() on some platforms like macOS # (#23850). if hasattr(importlib, 'invalidate_caches'): importlib.invalidate_caches() # generate an empty migration migration_name_0002 = "my_custom_migration" content = cmd("0002", migration_name_0002, "--empty") self.assertIn("dependencies=[\n('migrations','0001_%s'),\n]" % migration_name_0001, content) self.assertIn("operations=[\n]", content) def test_makemigrations_with_invalid_custom_name(self): msg = 'The migration name must be a valid Python identifier.' with self.assertRaisesMessage(CommandError, msg): call_command('makemigrations', 'migrations', '--name', 'invalid name', '--empty') def test_makemigrations_check(self): """ makemigrations --check should exit with a non-zero status when there are changes to an app requiring migrations. """ with self.temporary_migration_module(): with self.assertRaises(SystemExit): call_command("makemigrations", "--check", "migrations", verbosity=0) with self.temporary_migration_module(module="migrations.test_migrations_no_changes"): call_command("makemigrations", "--check", "migrations", verbosity=0) def test_makemigrations_migration_path_output(self): """ makemigrations should print the relative paths to the migrations unless they are outside of the current tree, in which case the absolute path should be shown. """ out = io.StringIO() apps.register_model('migrations', UnicodeModel) with self.temporary_migration_module() as migration_dir: call_command("makemigrations", "migrations", stdout=out) self.assertIn(os.path.join(migration_dir, '0001_initial.py'), out.getvalue()) def test_makemigrations_migration_path_output_valueerror(self): """ makemigrations prints the absolute path if os.path.relpath() raises a ValueError when it's impossible to obtain a relative path, e.g. on Windows if Django is installed on a different drive than where the migration files are created. """ out = io.StringIO() with self.temporary_migration_module() as migration_dir: with mock.patch('os.path.relpath', side_effect=ValueError): call_command('makemigrations', 'migrations', stdout=out) self.assertIn(os.path.join(migration_dir, '0001_initial.py'), out.getvalue()) def test_makemigrations_inconsistent_history(self): """ makemigrations should raise InconsistentMigrationHistory exception if there are some migrations applied before their dependencies. """ recorder = MigrationRecorder(connection) recorder.record_applied('migrations', '0002_second') msg = "Migration migrations.0002_second is applied before its dependency migrations.0001_initial" with self.temporary_migration_module(module="migrations.test_migrations"): with self.assertRaisesMessage(InconsistentMigrationHistory, msg): call_command("makemigrations") def test_makemigrations_inconsistent_history_db_failure(self): msg = ( "Got an error checking a consistent migration history performed " "for database connection 'default': could not connect to server" ) with mock.patch( 'django.db.migrations.loader.MigrationLoader.check_consistent_history', side_effect=OperationalError('could not connect to server'), ): with self.temporary_migration_module(): with self.assertWarns(RuntimeWarning) as cm: call_command('makemigrations', verbosity=0) self.assertEqual(str(cm.warning), msg) @mock.patch('builtins.input', return_value='1') @mock.patch('django.db.migrations.questioner.sys.stdin', mock.MagicMock(encoding=sys.getdefaultencoding())) def test_makemigrations_auto_now_add_interactive(self, *args): """ makemigrations prompts the user when adding auto_now_add to an existing model. """ class Entry(models.Model): title = models.CharField(max_length=255) creation_date = models.DateTimeField(auto_now_add=True) class Meta: app_label = 'migrations' # Monkeypatch interactive questioner to auto accept with mock.patch('django.db.migrations.questioner.sys.stdout', new_callable=io.StringIO) as prompt_stdout: out = io.StringIO() with self.temporary_migration_module(module='migrations.test_auto_now_add'): call_command('makemigrations', 'migrations', interactive=True, stdout=out) output = out.getvalue() prompt_output = prompt_stdout.getvalue() self.assertIn("You can accept the default 'timezone.now' by pressing 'Enter'", prompt_output) self.assertIn("Add field creation_date to entry", output) class SquashMigrationsTests(MigrationTestBase): """ Tests running the squashmigrations command. """ def test_squashmigrations_squashes(self): """ squashmigrations squashes migrations. """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations") as migration_dir: call_command('squashmigrations', 'migrations', '0002', interactive=False, stdout=out, no_color=True) squashed_migration_file = os.path.join(migration_dir, "0001_squashed_0002_second.py") self.assertTrue(os.path.exists(squashed_migration_file)) self.assertEqual( out.getvalue(), 'Will squash the following migrations:\n' ' - 0001_initial\n' ' - 0002_second\n' 'Optimizing...\n' ' Optimized from 8 operations to 2 operations.\n' 'Created new squashed migration %s\n' ' You should commit this migration but leave the old ones in place;\n' ' the new migration will be used for new installs. Once you are sure\n' ' all instances of the codebase have applied the migrations you squashed,\n' ' you can delete them.\n' % squashed_migration_file ) def test_squashmigrations_initial_attribute(self): with self.temporary_migration_module(module="migrations.test_migrations") as migration_dir: call_command("squashmigrations", "migrations", "0002", interactive=False, verbosity=0) squashed_migration_file = os.path.join(migration_dir, "0001_squashed_0002_second.py") with open(squashed_migration_file, encoding='utf-8') as fp: content = fp.read() self.assertIn("initial = True", content) def test_squashmigrations_optimizes(self): """ squashmigrations optimizes operations. """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations"): call_command("squashmigrations", "migrations", "0002", interactive=False, verbosity=1, stdout=out) self.assertIn("Optimized from 8 operations to 2 operations.", out.getvalue()) def test_ticket_23799_squashmigrations_no_optimize(self): """ squashmigrations --no-optimize doesn't optimize operations. """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations"): call_command("squashmigrations", "migrations", "0002", interactive=False, verbosity=1, no_optimize=True, stdout=out) self.assertIn("Skipping optimization", out.getvalue()) def test_squashmigrations_valid_start(self): """ squashmigrations accepts a starting migration. """ out = io.StringIO() with self.temporary_migration_module(module="migrations.test_migrations_no_changes") as migration_dir: call_command("squashmigrations", "migrations", "0002", "0003", interactive=False, verbosity=1, stdout=out) squashed_migration_file = os.path.join(migration_dir, "0002_second_squashed_0003_third.py") with open(squashed_migration_file, encoding='utf-8') as fp: content = fp.read() self.assertIn(" ('migrations', '0001_initial')", content) self.assertNotIn("initial = True", content) out = out.getvalue() self.assertNotIn(" - 0001_initial", out) self.assertIn(" - 0002_second", out) self.assertIn(" - 0003_third", out) def test_squashmigrations_invalid_start(self): """ squashmigrations doesn't accept a starting migration after the ending migration. """ with self.temporary_migration_module(module="migrations.test_migrations_no_changes"): msg = ( "The migration 'migrations.0003_third' cannot be found. Maybe " "it comes after the migration 'migrations.0002_second'" ) with self.assertRaisesMessage(CommandError, msg): call_command("squashmigrations", "migrations", "0003", "0002", interactive=False, verbosity=0) def test_squashed_name_with_start_migration_name(self): """--squashed-name specifies the new migration's name.""" squashed_name = 'squashed_name' with self.temporary_migration_module(module='migrations.test_migrations') as migration_dir: call_command( 'squashmigrations', 'migrations', '0001', '0002', squashed_name=squashed_name, interactive=False, verbosity=0, ) squashed_migration_file = os.path.join(migration_dir, '0001_%s.py' % squashed_name) self.assertTrue(os.path.exists(squashed_migration_file)) def test_squashed_name_without_start_migration_name(self): """--squashed-name also works if a start migration is omitted.""" squashed_name = 'squashed_name' with self.temporary_migration_module(module="migrations.test_migrations") as migration_dir: call_command( 'squashmigrations', 'migrations', '0001', squashed_name=squashed_name, interactive=False, verbosity=0, ) squashed_migration_file = os.path.join(migration_dir, '0001_%s.py' % squashed_name) self.assertTrue(os.path.exists(squashed_migration_file)) class AppLabelErrorTests(TestCase): """ This class inherits TestCase because MigrationTestBase uses `available_apps = ['migrations']` which means that it's the only installed app. 'django.contrib.auth' must be in INSTALLED_APPS for some of these tests. """ nonexistent_app_error = "No installed app with label 'nonexistent_app'." did_you_mean_auth_error = ( "No installed app with label 'django.contrib.auth'. Did you mean " "'auth'?" ) def test_makemigrations_nonexistent_app_label(self): err = io.StringIO() with self.assertRaises(SystemExit): call_command('makemigrations', 'nonexistent_app', stderr=err) self.assertIn(self.nonexistent_app_error, err.getvalue()) def test_makemigrations_app_name_specified_as_label(self): err = io.StringIO() with self.assertRaises(SystemExit): call_command('makemigrations', 'django.contrib.auth', stderr=err) self.assertIn(self.did_you_mean_auth_error, err.getvalue()) def test_migrate_nonexistent_app_label(self): with self.assertRaisesMessage(CommandError, self.nonexistent_app_error): call_command('migrate', 'nonexistent_app') def test_migrate_app_name_specified_as_label(self): with self.assertRaisesMessage(CommandError, self.did_you_mean_auth_error): call_command('migrate', 'django.contrib.auth') def test_showmigrations_nonexistent_app_label(self): err = io.StringIO() with self.assertRaises(SystemExit): call_command('showmigrations', 'nonexistent_app', stderr=err) self.assertIn(self.nonexistent_app_error, err.getvalue()) def test_showmigrations_app_name_specified_as_label(self): err = io.StringIO() with self.assertRaises(SystemExit): call_command('showmigrations', 'django.contrib.auth', stderr=err) self.assertIn(self.did_you_mean_auth_error, err.getvalue()) def test_sqlmigrate_nonexistent_app_label(self): with self.assertRaisesMessage(CommandError, self.nonexistent_app_error): call_command('sqlmigrate', 'nonexistent_app', '0002') def test_sqlmigrate_app_name_specified_as_label(self): with self.assertRaisesMessage(CommandError, self.did_you_mean_auth_error): call_command('sqlmigrate', 'django.contrib.auth', '0002') def test_squashmigrations_nonexistent_app_label(self): with self.assertRaisesMessage(CommandError, self.nonexistent_app_error): call_command('squashmigrations', 'nonexistent_app', '0002') def test_squashmigrations_app_name_specified_as_label(self): with self.assertRaisesMessage(CommandError, self.did_you_mean_auth_error): call_command('squashmigrations', 'django.contrib.auth', '0002')
776ab6a1f6d09c250d67445c01296317512aa483781fbabe66c96e25f41427ac
import compileall import os from django.db import connection, connections from django.db.migrations.exceptions import ( AmbiguityError, InconsistentMigrationHistory, NodeNotFoundError, ) from django.db.migrations.loader import MigrationLoader from django.db.migrations.recorder import MigrationRecorder from django.test import TestCase, modify_settings, override_settings from .test_base import MigrationTestBase class RecorderTests(TestCase): """ Tests recording migrations as applied or not. """ databases = {'default', 'other'} def test_apply(self): """ Tests marking migrations as applied/unapplied. """ recorder = MigrationRecorder(connection) self.assertEqual( {(x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"}, set(), ) recorder.record_applied("myapp", "0432_ponies") self.assertEqual( {(x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"}, {("myapp", "0432_ponies")}, ) # That should not affect records of another database recorder_other = MigrationRecorder(connections['other']) self.assertEqual( {(x, y) for (x, y) in recorder_other.applied_migrations() if x == "myapp"}, set(), ) recorder.record_unapplied("myapp", "0432_ponies") self.assertEqual( {(x, y) for (x, y) in recorder.applied_migrations() if x == "myapp"}, set(), ) class LoaderTests(TestCase): """ Tests the disk and database loader, and running through migrations in memory. """ @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) @modify_settings(INSTALLED_APPS={'append': 'basic'}) def test_load(self): """ Makes sure the loader can load the migrations for the test apps, and then render them out to a new Apps. """ # Load and test the plan migration_loader = MigrationLoader(connection) self.assertEqual( migration_loader.graph.forwards_plan(("migrations", "0002_second")), [ ("migrations", "0001_initial"), ("migrations", "0002_second"), ], ) # Now render it out! project_state = migration_loader.project_state(("migrations", "0002_second")) self.assertEqual(len(project_state.models), 2) author_state = project_state.models["migrations", "author"] self.assertEqual( list(author_state.fields), ["id", "name", "slug", "age", "rating"] ) book_state = project_state.models["migrations", "book"] self.assertEqual(list(book_state.fields), ['id', 'author']) # Ensure we've included unmigrated apps in there too self.assertIn("basic", project_state.real_apps) @override_settings(MIGRATION_MODULES={ 'migrations': 'migrations.test_migrations', 'migrations2': 'migrations2.test_migrations_2', }) @modify_settings(INSTALLED_APPS={'append': 'migrations2'}) def test_plan_handles_repeated_migrations(self): """ _generate_plan() doesn't readd migrations already in the plan (#29180). """ migration_loader = MigrationLoader(connection) nodes = [('migrations', '0002_second'), ('migrations2', '0001_initial')] self.assertEqual( migration_loader.graph._generate_plan(nodes, at_end=True), [('migrations', '0001_initial'), ('migrations', '0002_second'), ('migrations2', '0001_initial')] ) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_unmigdep"}) def test_load_unmigrated_dependency(self): """ Makes sure the loader can load migrations with a dependency on an unmigrated app. """ # Load and test the plan migration_loader = MigrationLoader(connection) self.assertEqual( migration_loader.graph.forwards_plan(("migrations", "0001_initial")), [ ('contenttypes', '0001_initial'), ('auth', '0001_initial'), ("migrations", "0001_initial"), ], ) # Now render it out! project_state = migration_loader.project_state(("migrations", "0001_initial")) self.assertEqual(len([m for a, m in project_state.models if a == "migrations"]), 1) book_state = project_state.models["migrations", "book"] self.assertEqual(list(book_state.fields), ['id', 'user']) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_run_before"}) def test_run_before(self): """ Makes sure the loader uses Migration.run_before. """ # Load and test the plan migration_loader = MigrationLoader(connection) self.assertEqual( migration_loader.graph.forwards_plan(("migrations", "0002_second")), [ ("migrations", "0001_initial"), ("migrations", "0003_third"), ("migrations", "0002_second"), ], ) @override_settings(MIGRATION_MODULES={ "migrations": "migrations.test_migrations_first", "migrations2": "migrations2.test_migrations_2_first", }) @modify_settings(INSTALLED_APPS={'append': 'migrations2'}) def test_first(self): """ Makes sure the '__first__' migrations build correctly. """ migration_loader = MigrationLoader(connection) self.assertEqual( migration_loader.graph.forwards_plan(("migrations", "second")), [ ("migrations", "thefirst"), ("migrations2", "0001_initial"), ("migrations2", "0002_second"), ("migrations", "second"), ], ) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"}) def test_name_match(self): "Tests prefix name matching" migration_loader = MigrationLoader(connection) self.assertEqual( migration_loader.get_migration_by_prefix("migrations", "0001").name, "0001_initial", ) with self.assertRaises(AmbiguityError): migration_loader.get_migration_by_prefix("migrations", "0") with self.assertRaises(KeyError): migration_loader.get_migration_by_prefix("migrations", "blarg") def test_load_import_error(self): with override_settings(MIGRATION_MODULES={"migrations": "import_error_package"}): with self.assertRaises(ImportError): MigrationLoader(connection) def test_load_module_file(self): with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.file"}): loader = MigrationLoader(connection) self.assertIn( "migrations", loader.unmigrated_apps, "App with migrations module file not in unmigrated apps." ) def test_load_empty_dir(self): with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.namespace"}): loader = MigrationLoader(connection) self.assertIn( "migrations", loader.unmigrated_apps, "App missing __init__.py in migrations module not in unmigrated apps." ) @override_settings( INSTALLED_APPS=['migrations.migrations_test_apps.migrated_app'], ) def test_marked_as_migrated(self): """ Undefined MIGRATION_MODULES implies default migration module. """ migration_loader = MigrationLoader(connection) self.assertEqual(migration_loader.migrated_apps, {'migrated_app'}) self.assertEqual(migration_loader.unmigrated_apps, set()) @override_settings( INSTALLED_APPS=['migrations.migrations_test_apps.migrated_app'], MIGRATION_MODULES={"migrated_app": None}, ) def test_marked_as_unmigrated(self): """ MIGRATION_MODULES allows disabling of migrations for a particular app. """ migration_loader = MigrationLoader(connection) self.assertEqual(migration_loader.migrated_apps, set()) self.assertEqual(migration_loader.unmigrated_apps, {'migrated_app'}) @override_settings( INSTALLED_APPS=['migrations.migrations_test_apps.migrated_app'], MIGRATION_MODULES={'migrated_app': 'missing-module'}, ) def test_explicit_missing_module(self): """ If a MIGRATION_MODULES override points to a missing module, the error raised during the importation attempt should be propagated unless `ignore_no_migrations=True`. """ with self.assertRaisesMessage(ImportError, 'missing-module'): migration_loader = MigrationLoader(connection) migration_loader = MigrationLoader(connection, ignore_no_migrations=True) self.assertEqual(migration_loader.migrated_apps, set()) self.assertEqual(migration_loader.unmigrated_apps, {'migrated_app'}) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"}) def test_loading_squashed(self): "Tests loading a squashed migration" migration_loader = MigrationLoader(connection) recorder = MigrationRecorder(connection) self.addCleanup(recorder.flush) # Loading with nothing applied should just give us the one node self.assertEqual( len([x for x in migration_loader.graph.nodes if x[0] == "migrations"]), 1, ) # However, fake-apply one migration and it should now use the old two recorder.record_applied("migrations", "0001_initial") migration_loader.build_graph() self.assertEqual( len([x for x in migration_loader.graph.nodes if x[0] == "migrations"]), 2, ) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_complex"}) def test_loading_squashed_complex(self): "Tests loading a complex set of squashed migrations" loader = MigrationLoader(connection) recorder = MigrationRecorder(connection) self.addCleanup(recorder.flush) def num_nodes(): plan = set(loader.graph.forwards_plan(('migrations', '7_auto'))) return len(plan - loader.applied_migrations.keys()) # Empty database: use squashed migration loader.build_graph() self.assertEqual(num_nodes(), 5) # Starting at 1 or 2 should use the squashed migration too recorder.record_applied("migrations", "1_auto") loader.build_graph() self.assertEqual(num_nodes(), 4) recorder.record_applied("migrations", "2_auto") loader.build_graph() self.assertEqual(num_nodes(), 3) # However, starting at 3 to 5 cannot use the squashed migration recorder.record_applied("migrations", "3_auto") loader.build_graph() self.assertEqual(num_nodes(), 4) recorder.record_applied("migrations", "4_auto") loader.build_graph() self.assertEqual(num_nodes(), 3) # Starting at 5 to 7 we are passed the squashed migrations recorder.record_applied("migrations", "5_auto") loader.build_graph() self.assertEqual(num_nodes(), 2) recorder.record_applied("migrations", "6_auto") loader.build_graph() self.assertEqual(num_nodes(), 1) recorder.record_applied("migrations", "7_auto") loader.build_graph() self.assertEqual(num_nodes(), 0) @override_settings(MIGRATION_MODULES={ "app1": "migrations.test_migrations_squashed_complex_multi_apps.app1", "app2": "migrations.test_migrations_squashed_complex_multi_apps.app2", }) @modify_settings(INSTALLED_APPS={'append': [ "migrations.test_migrations_squashed_complex_multi_apps.app1", "migrations.test_migrations_squashed_complex_multi_apps.app2", ]}) def test_loading_squashed_complex_multi_apps(self): loader = MigrationLoader(connection) loader.build_graph() plan = set(loader.graph.forwards_plan(('app1', '4_auto'))) expected_plan = { ('app1', '1_auto'), ('app2', '1_squashed_2'), ('app1', '2_squashed_3'), ('app1', '4_auto'), } self.assertEqual(plan, expected_plan) @override_settings(MIGRATION_MODULES={ "app1": "migrations.test_migrations_squashed_complex_multi_apps.app1", "app2": "migrations.test_migrations_squashed_complex_multi_apps.app2", }) @modify_settings(INSTALLED_APPS={'append': [ "migrations.test_migrations_squashed_complex_multi_apps.app1", "migrations.test_migrations_squashed_complex_multi_apps.app2", ]}) def test_loading_squashed_complex_multi_apps_partially_applied(self): loader = MigrationLoader(connection) recorder = MigrationRecorder(connection) recorder.record_applied('app1', '1_auto') recorder.record_applied('app1', '2_auto') loader.build_graph() plan = set(loader.graph.forwards_plan(('app1', '4_auto'))) plan = plan - loader.applied_migrations.keys() expected_plan = { ('app2', '1_squashed_2'), ('app1', '3_auto'), ('app1', '4_auto'), } self.assertEqual(plan, expected_plan) @override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed_erroneous"}) def test_loading_squashed_erroneous(self): "Tests loading a complex but erroneous set of squashed migrations" loader = MigrationLoader(connection) recorder = MigrationRecorder(connection) self.addCleanup(recorder.flush) def num_nodes(): plan = set(loader.graph.forwards_plan(('migrations', '7_auto'))) return len(plan - loader.applied_migrations.keys()) # Empty database: use squashed migration loader.build_graph() self.assertEqual(num_nodes(), 5) # Starting at 1 or 2 should use the squashed migration too recorder.record_applied("migrations", "1_auto") loader.build_graph() self.assertEqual(num_nodes(), 4) recorder.record_applied("migrations", "2_auto") loader.build_graph() self.assertEqual(num_nodes(), 3) # However, starting at 3 or 4, nonexistent migrations would be needed. msg = ("Migration migrations.6_auto depends on nonexistent node ('migrations', '5_auto'). " "Django tried to replace migration migrations.5_auto with any of " "[migrations.3_squashed_5] but wasn't able to because some of the replaced " "migrations are already applied.") recorder.record_applied("migrations", "3_auto") with self.assertRaisesMessage(NodeNotFoundError, msg): loader.build_graph() recorder.record_applied("migrations", "4_auto") with self.assertRaisesMessage(NodeNotFoundError, msg): loader.build_graph() # Starting at 5 to 7 we are passed the squashed migrations recorder.record_applied("migrations", "5_auto") loader.build_graph() self.assertEqual(num_nodes(), 2) recorder.record_applied("migrations", "6_auto") loader.build_graph() self.assertEqual(num_nodes(), 1) recorder.record_applied("migrations", "7_auto") loader.build_graph() self.assertEqual(num_nodes(), 0) @override_settings( MIGRATION_MODULES={'migrations': 'migrations.test_migrations'}, INSTALLED_APPS=['migrations'], ) def test_check_consistent_history(self): loader = MigrationLoader(connection=None) loader.check_consistent_history(connection) recorder = MigrationRecorder(connection) recorder.record_applied('migrations', '0002_second') msg = ( "Migration migrations.0002_second is applied before its dependency " "migrations.0001_initial on database 'default'." ) with self.assertRaisesMessage(InconsistentMigrationHistory, msg): loader.check_consistent_history(connection) @override_settings( MIGRATION_MODULES={'migrations': 'migrations.test_migrations_squashed_extra'}, INSTALLED_APPS=['migrations'], ) def test_check_consistent_history_squashed(self): """ MigrationLoader.check_consistent_history() should ignore unapplied squashed migrations that have all of their `replaces` applied. """ loader = MigrationLoader(connection=None) recorder = MigrationRecorder(connection) recorder.record_applied('migrations', '0001_initial') recorder.record_applied('migrations', '0002_second') loader.check_consistent_history(connection) recorder.record_applied('migrations', '0003_third') loader.check_consistent_history(connection) @override_settings(MIGRATION_MODULES={ "app1": "migrations.test_migrations_squashed_ref_squashed.app1", "app2": "migrations.test_migrations_squashed_ref_squashed.app2", }) @modify_settings(INSTALLED_APPS={'append': [ "migrations.test_migrations_squashed_ref_squashed.app1", "migrations.test_migrations_squashed_ref_squashed.app2", ]}) def test_loading_squashed_ref_squashed(self): "Tests loading a squashed migration with a new migration referencing it" r""" The sample migrations are structured like this: app_1 1 --> 2 ---------------------*--> 3 *--> 4 \ / / *-------------------*----/--> 2_sq_3 --* \ / / =============== \ ============= / == / ====================== app_2 *--> 1_sq_2 --* / \ / *--> 1 --> 2 --* Where 2_sq_3 is a replacing migration for 2 and 3 in app_1, as 1_sq_2 is a replacing migration for 1 and 2 in app_2. """ loader = MigrationLoader(connection) recorder = MigrationRecorder(connection) self.addCleanup(recorder.flush) # Load with nothing applied: both migrations squashed. loader.build_graph() plan = set(loader.graph.forwards_plan(('app1', '4_auto'))) plan = plan - loader.applied_migrations.keys() expected_plan = { ('app1', '1_auto'), ('app2', '1_squashed_2'), ('app1', '2_squashed_3'), ('app1', '4_auto'), } self.assertEqual(plan, expected_plan) # Fake-apply a few from app1: unsquashes migration in app1. recorder.record_applied('app1', '1_auto') recorder.record_applied('app1', '2_auto') loader.build_graph() plan = set(loader.graph.forwards_plan(('app1', '4_auto'))) plan = plan - loader.applied_migrations.keys() expected_plan = { ('app2', '1_squashed_2'), ('app1', '3_auto'), ('app1', '4_auto'), } self.assertEqual(plan, expected_plan) # Fake-apply one from app2: unsquashes migration in app2 too. recorder.record_applied('app2', '1_auto') loader.build_graph() plan = set(loader.graph.forwards_plan(('app1', '4_auto'))) plan = plan - loader.applied_migrations.keys() expected_plan = { ('app2', '2_auto'), ('app1', '3_auto'), ('app1', '4_auto'), } self.assertEqual(plan, expected_plan) @override_settings(MIGRATION_MODULES={'migrations': 'migrations.test_migrations_private'}) def test_ignore_files(self): """Files prefixed with underscore, tilde, or dot aren't loaded.""" loader = MigrationLoader(connection) loader.load_disk() migrations = [name for app, name in loader.disk_migrations if app == 'migrations'] self.assertEqual(migrations, ['0001_initial']) @override_settings( MIGRATION_MODULES={'migrations': 'migrations.test_migrations_namespace_package'}, ) def test_loading_namespace_package(self): """Migration directories without an __init__.py file are ignored.""" loader = MigrationLoader(connection) loader.load_disk() migrations = [name for app, name in loader.disk_migrations if app == 'migrations'] self.assertEqual(migrations, []) class PycLoaderTests(MigrationTestBase): def test_valid(self): """ To support frozen environments, MigrationLoader loads .pyc migrations. """ with self.temporary_migration_module(module='migrations.test_migrations') as migration_dir: # Compile .py files to .pyc files and delete .py files. compileall.compile_dir(migration_dir, force=True, quiet=1, legacy=True) for name in os.listdir(migration_dir): if name.endswith('.py'): os.remove(os.path.join(migration_dir, name)) loader = MigrationLoader(connection) self.assertIn(('migrations', '0001_initial'), loader.disk_migrations) def test_invalid(self): """ MigrationLoader reraises ImportErrors caused by "bad magic number" pyc files with a more helpful message. """ with self.temporary_migration_module(module='migrations.test_migrations_bad_pyc') as migration_dir: # The -tpl suffix is to avoid the pyc exclusion in MANIFEST.in. os.rename( os.path.join(migration_dir, '0001_initial.pyc-tpl'), os.path.join(migration_dir, '0001_initial.pyc'), ) msg = ( r"Couldn't import '\w+.migrations.0001_initial' as it appears " "to be a stale .pyc file." ) with self.assertRaisesRegex(ImportError, msg): MigrationLoader(connection)