hash
stringlengths
64
64
content
stringlengths
0
1.51M
0f73435012f585600743d3d3ece2d5af4a8f6580afccbe18213570a2eb3ae5a4
from django.contrib.admin import ModelAdmin, TabularInline from django.contrib.admin.helpers import InlineAdminForm from django.contrib.admin.tests import AdminSeleniumTestCase from django.contrib.auth.models import Permission, User from django.contrib.contenttypes.models import ContentType from django.test import RequestFactory, TestCase, override_settings from django.urls import reverse from .admin import InnerInline, site as admin_site from .models import ( Author, BinaryTree, Book, Chapter, Child, ChildModel1, ChildModel2, Fashionista, FootNote, Holder, Holder2, Holder3, Holder4, Inner, Inner2, Inner3, Inner4Stacked, Inner4Tabular, Novel, OutfitItem, Parent, ParentModelWithCustomPk, Person, Poll, Profile, ProfileCollection, Question, Sighting, SomeChildModel, SomeParentModel, Teacher, ) INLINE_CHANGELINK_HTML = 'class="inlinechangelink">Change</a>' class TestDataMixin: @classmethod def setUpTestData(cls): cls.superuser = User.objects.create_superuser(username='super', email='[email protected]', password='secret') @override_settings(ROOT_URLCONF='admin_inlines.urls') class TestInline(TestDataMixin, TestCase): factory = RequestFactory() @classmethod def setUpTestData(cls): super().setUpTestData() cls.holder = Holder.objects.create(dummy=13) Inner.objects.create(dummy=42, holder=cls.holder) def setUp(self): self.client.force_login(self.superuser) def test_can_delete(self): """ can_delete should be passed to inlineformset factory. """ response = self.client.get( reverse('admin:admin_inlines_holder_change', args=(self.holder.id,)) ) inner_formset = response.context['inline_admin_formsets'][0].formset expected = InnerInline.can_delete actual = inner_formset.can_delete self.assertEqual(expected, actual, 'can_delete must be equal') def test_readonly_stacked_inline_label(self): """Bug #13174.""" holder = Holder.objects.create(dummy=42) Inner.objects.create(holder=holder, dummy=42, readonly='') response = self.client.get( reverse('admin:admin_inlines_holder_change', args=(holder.id,)) ) self.assertContains(response, '<label>Inner readonly label:</label>') def test_many_to_many_inlines(self): "Autogenerated many-to-many inlines are displayed correctly (#13407)" response = self.client.get(reverse('admin:admin_inlines_author_add')) # The heading for the m2m inline block uses the right text self.assertContains(response, '<h2>Author-book relationships</h2>') # The "add another" label is correct self.assertContains(response, 'Add another Author-book relationship') # The '+' is dropped from the autogenerated form prefix (Author_books+) self.assertContains(response, 'id="id_Author_books-TOTAL_FORMS"') def test_inline_primary(self): person = Person.objects.create(firstname='Imelda') item = OutfitItem.objects.create(name='Shoes') # Imelda likes shoes, but can't carry her own bags. data = { 'shoppingweakness_set-TOTAL_FORMS': 1, 'shoppingweakness_set-INITIAL_FORMS': 0, 'shoppingweakness_set-MAX_NUM_FORMS': 0, '_save': 'Save', 'person': person.id, 'max_weight': 0, 'shoppingweakness_set-0-item': item.id, } response = self.client.post(reverse('admin:admin_inlines_fashionista_add'), data) self.assertEqual(response.status_code, 302) self.assertEqual(len(Fashionista.objects.filter(person__firstname='Imelda')), 1) def test_tabular_inline_column_css_class(self): """ Field names are included in the context to output a field-specific CSS class name in the column headers. """ response = self.client.get(reverse('admin:admin_inlines_poll_add')) text_field, call_me_field = list(response.context['inline_admin_formset'].fields()) # Editable field. self.assertEqual(text_field['name'], 'text') self.assertContains(response, '<th class="column-text required">') # Read-only field. self.assertEqual(call_me_field['name'], 'call_me') self.assertContains(response, '<th class="column-call_me">') def test_custom_form_tabular_inline_label(self): """ A model form with a form field specified (TitleForm.title1) should have its label rendered in the tabular inline. """ response = self.client.get(reverse('admin:admin_inlines_titlecollection_add')) self.assertContains(response, '<th class="column-title1 required">Title1</th>', html=True) def test_custom_form_tabular_inline_extra_field_label(self): response = self.client.get(reverse('admin:admin_inlines_outfititem_add')) _, extra_field = list(response.context['inline_admin_formset'].fields()) self.assertEqual(extra_field['label'], 'Extra field') def test_non_editable_custom_form_tabular_inline_extra_field_label(self): response = self.client.get(reverse('admin:admin_inlines_chapter_add')) _, extra_field = list(response.context['inline_admin_formset'].fields()) self.assertEqual(extra_field['label'], 'Extra field') def test_custom_form_tabular_inline_overridden_label(self): """ SomeChildModelForm.__init__() overrides the label of a form field. That label is displayed in the TabularInline. """ response = self.client.get(reverse('admin:admin_inlines_someparentmodel_add')) field = list(response.context['inline_admin_formset'].fields())[0] self.assertEqual(field['label'], 'new label') self.assertContains(response, '<th class="column-name required">New label</th>', html=True) def test_tabular_non_field_errors(self): """ non_field_errors are displayed correctly, including the correct value for colspan. """ data = { 'title_set-TOTAL_FORMS': 1, 'title_set-INITIAL_FORMS': 0, 'title_set-MAX_NUM_FORMS': 0, '_save': 'Save', 'title_set-0-title1': 'a title', 'title_set-0-title2': 'a different title', } response = self.client.post(reverse('admin:admin_inlines_titlecollection_add'), data) # Here colspan is "4": two fields (title1 and title2), one hidden field and the delete checkbox. self.assertContains( response, '<tr class="row-form-errors"><td colspan="4"><ul class="errorlist nonfield">' '<li>The two titles must be the same</li></ul></td></tr>' ) def test_no_parent_callable_lookup(self): """Admin inline `readonly_field` shouldn't invoke parent ModelAdmin callable""" # Identically named callable isn't present in the parent ModelAdmin, # rendering of the add view shouldn't explode response = self.client.get(reverse('admin:admin_inlines_novel_add')) # View should have the child inlines section self.assertContains( response, '<div class="js-inline-admin-formset inline-group" id="chapter_set-group"' ) def test_callable_lookup(self): """Admin inline should invoke local callable when its name is listed in readonly_fields""" response = self.client.get(reverse('admin:admin_inlines_poll_add')) # Add parent object view should have the child inlines section self.assertContains( response, '<div class="js-inline-admin-formset inline-group" id="question_set-group"' ) # The right callable should be used for the inline readonly_fields # column cells self.assertContains(response, '<p>Callable in QuestionInline</p>') def test_help_text(self): """ The inlines' model field help texts are displayed when using both the stacked and tabular layouts. """ response = self.client.get(reverse('admin:admin_inlines_holder4_add')) self.assertContains(response, '<div class="help">Awesome stacked help text is awesome.</div>', 4) self.assertContains( response, '<img src="/static/admin/img/icon-unknown.svg" ' 'class="help help-tooltip" width="10" height="10" ' 'alt="(Awesome tabular help text is awesome.)" ' 'title="Awesome tabular help text is awesome.">', 1 ) # ReadOnly fields response = self.client.get(reverse('admin:admin_inlines_capofamiglia_add')) self.assertContains( response, '<img src="/static/admin/img/icon-unknown.svg" ' 'class="help help-tooltip" width="10" height="10" ' 'alt="(Help text for ReadOnlyInline)" ' 'title="Help text for ReadOnlyInline">', 1 ) def test_tabular_model_form_meta_readonly_field(self): """ Tabular inlines use ModelForm.Meta.help_texts and labels for read-only fields. """ response = self.client.get(reverse('admin:admin_inlines_someparentmodel_add')) self.assertContains( response, '<img src="/static/admin/img/icon-unknown.svg" ' 'class="help help-tooltip" width="10" height="10" ' 'alt="(Help text from ModelForm.Meta)" ' 'title="Help text from ModelForm.Meta">' ) self.assertContains(response, 'Label from ModelForm.Meta') def test_inline_hidden_field_no_column(self): """#18263 -- Make sure hidden fields don't get a column in tabular inlines""" parent = SomeParentModel.objects.create(name='a') SomeChildModel.objects.create(name='b', position='0', parent=parent) SomeChildModel.objects.create(name='c', position='1', parent=parent) response = self.client.get(reverse('admin:admin_inlines_someparentmodel_change', args=(parent.pk,))) self.assertNotContains(response, '<td class="field-position">') self.assertInHTML( '<input id="id_somechildmodel_set-1-position" ' 'name="somechildmodel_set-1-position" type="hidden" value="1">', response.rendered_content, ) def test_non_related_name_inline(self): """ Multiple inlines with related_name='+' have correct form prefixes. """ response = self.client.get(reverse('admin:admin_inlines_capofamiglia_add')) self.assertContains(response, '<input type="hidden" name="-1-0-id" id="id_-1-0-id">', html=True) self.assertContains( response, '<input type="hidden" name="-1-0-capo_famiglia" id="id_-1-0-capo_famiglia">', html=True ) self.assertContains( response, '<input id="id_-1-0-name" type="text" class="vTextField" name="-1-0-name" maxlength="100">', html=True ) self.assertContains(response, '<input type="hidden" name="-2-0-id" id="id_-2-0-id">', html=True) self.assertContains( response, '<input type="hidden" name="-2-0-capo_famiglia" id="id_-2-0-capo_famiglia">', html=True ) self.assertContains( response, '<input id="id_-2-0-name" type="text" class="vTextField" name="-2-0-name" maxlength="100">', html=True ) @override_settings(USE_L10N=True, USE_THOUSAND_SEPARATOR=True) def test_localize_pk_shortcut(self): """ The "View on Site" link is correct for locales that use thousand separators. """ holder = Holder.objects.create(pk=123456789, dummy=42) inner = Inner.objects.create(pk=987654321, holder=holder, dummy=42, readonly='') response = self.client.get(reverse('admin:admin_inlines_holder_change', args=(holder.id,))) inner_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(inner).pk, inner.pk) self.assertContains(response, inner_shortcut) def test_custom_pk_shortcut(self): """ The "View on Site" link is correct for models with a custom primary key field. """ parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo") child1 = ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent) child2 = ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent) response = self.client.get(reverse('admin:admin_inlines_parentmodelwithcustompk_change', args=('foo',))) child1_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child1).pk, child1.pk) child2_shortcut = 'r/%s/%s/' % (ContentType.objects.get_for_model(child2).pk, child2.pk) self.assertContains(response, child1_shortcut) self.assertContains(response, child2_shortcut) def test_create_inlines_on_inherited_model(self): """ An object can be created with inlines when it inherits another class. """ data = { 'name': 'Martian', 'sighting_set-TOTAL_FORMS': 1, 'sighting_set-INITIAL_FORMS': 0, 'sighting_set-MAX_NUM_FORMS': 0, 'sighting_set-0-place': 'Zone 51', '_save': 'Save', } response = self.client.post(reverse('admin:admin_inlines_extraterrestrial_add'), data) self.assertEqual(response.status_code, 302) self.assertEqual(Sighting.objects.filter(et__name='Martian').count(), 1) def test_custom_get_extra_form(self): bt_head = BinaryTree.objects.create(name="Tree Head") BinaryTree.objects.create(name="First Child", parent=bt_head) # The maximum number of forms should respect 'get_max_num' on the # ModelAdmin max_forms_input = ( '<input id="id_binarytree_set-MAX_NUM_FORMS" ' 'name="binarytree_set-MAX_NUM_FORMS" type="hidden" value="%d">' ) # The total number of forms will remain the same in either case total_forms_hidden = ( '<input id="id_binarytree_set-TOTAL_FORMS" ' 'name="binarytree_set-TOTAL_FORMS" type="hidden" value="2">' ) response = self.client.get(reverse('admin:admin_inlines_binarytree_add')) self.assertInHTML(max_forms_input % 3, response.rendered_content) self.assertInHTML(total_forms_hidden, response.rendered_content) response = self.client.get(reverse('admin:admin_inlines_binarytree_change', args=(bt_head.id,))) self.assertInHTML(max_forms_input % 2, response.rendered_content) self.assertInHTML(total_forms_hidden, response.rendered_content) def test_min_num(self): """ min_num and extra determine number of forms. """ class MinNumInline(TabularInline): model = BinaryTree min_num = 2 extra = 3 modeladmin = ModelAdmin(BinaryTree, admin_site) modeladmin.inlines = [MinNumInline] min_forms = ( '<input id="id_binarytree_set-MIN_NUM_FORMS" ' 'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="2">' ) total_forms = ( '<input id="id_binarytree_set-TOTAL_FORMS" ' 'name="binarytree_set-TOTAL_FORMS" type="hidden" value="5">' ) request = self.factory.get(reverse('admin:admin_inlines_binarytree_add')) request.user = User(username='super', is_superuser=True) response = modeladmin.changeform_view(request) self.assertInHTML(min_forms, response.rendered_content) self.assertInHTML(total_forms, response.rendered_content) def test_custom_min_num(self): bt_head = BinaryTree.objects.create(name="Tree Head") BinaryTree.objects.create(name="First Child", parent=bt_head) class MinNumInline(TabularInline): model = BinaryTree extra = 3 def get_min_num(self, request, obj=None, **kwargs): if obj: return 5 return 2 modeladmin = ModelAdmin(BinaryTree, admin_site) modeladmin.inlines = [MinNumInline] min_forms = ( '<input id="id_binarytree_set-MIN_NUM_FORMS" ' 'name="binarytree_set-MIN_NUM_FORMS" type="hidden" value="%d">' ) total_forms = ( '<input id="id_binarytree_set-TOTAL_FORMS" ' 'name="binarytree_set-TOTAL_FORMS" type="hidden" value="%d">' ) request = self.factory.get(reverse('admin:admin_inlines_binarytree_add')) request.user = User(username='super', is_superuser=True) response = modeladmin.changeform_view(request) self.assertInHTML(min_forms % 2, response.rendered_content) self.assertInHTML(total_forms % 5, response.rendered_content) request = self.factory.get(reverse('admin:admin_inlines_binarytree_change', args=(bt_head.id,))) request.user = User(username='super', is_superuser=True) response = modeladmin.changeform_view(request, object_id=str(bt_head.id)) self.assertInHTML(min_forms % 5, response.rendered_content) self.assertInHTML(total_forms % 8, response.rendered_content) def test_inline_nonauto_noneditable_pk(self): response = self.client.get(reverse('admin:admin_inlines_author_add')) self.assertContains( response, '<input id="id_nonautopkbook_set-0-rand_pk" ' 'name="nonautopkbook_set-0-rand_pk" type="hidden">', html=True ) self.assertContains( response, '<input id="id_nonautopkbook_set-2-0-rand_pk" ' 'name="nonautopkbook_set-2-0-rand_pk" type="hidden">', html=True ) def test_inline_nonauto_noneditable_inherited_pk(self): response = self.client.get(reverse('admin:admin_inlines_author_add')) self.assertContains( response, '<input id="id_nonautopkbookchild_set-0-nonautopkbook_ptr" ' 'name="nonautopkbookchild_set-0-nonautopkbook_ptr" type="hidden">', html=True ) self.assertContains( response, '<input id="id_nonautopkbookchild_set-2-nonautopkbook_ptr" ' 'name="nonautopkbookchild_set-2-nonautopkbook_ptr" type="hidden">', html=True ) def test_inline_editable_pk(self): response = self.client.get(reverse('admin:admin_inlines_author_add')) self.assertContains( response, '<input class="vIntegerField" id="id_editablepkbook_set-0-manual_pk" ' 'name="editablepkbook_set-0-manual_pk" type="number">', html=True, count=1 ) self.assertContains( response, '<input class="vIntegerField" id="id_editablepkbook_set-2-0-manual_pk" ' 'name="editablepkbook_set-2-0-manual_pk" type="number">', html=True, count=1 ) def test_stacked_inline_edit_form_contains_has_original_class(self): holder = Holder.objects.create(dummy=1) holder.inner_set.create(dummy=1) response = self.client.get(reverse('admin:admin_inlines_holder_change', args=(holder.pk,))) self.assertContains( response, '<div class="inline-related has_original" id="inner_set-0">', count=1 ) self.assertContains( response, '<div class="inline-related" id="inner_set-1">', count=1 ) def test_inlines_show_change_link_registered(self): "Inlines `show_change_link` for registered models when enabled." holder = Holder4.objects.create(dummy=1) item1 = Inner4Stacked.objects.create(dummy=1, holder=holder) item2 = Inner4Tabular.objects.create(dummy=1, holder=holder) items = ( ('inner4stacked', item1.pk), ('inner4tabular', item2.pk), ) response = self.client.get(reverse('admin:admin_inlines_holder4_change', args=(holder.pk,))) self.assertTrue(response.context['inline_admin_formset'].opts.has_registered_model) for model, pk in items: url = reverse('admin:admin_inlines_%s_change' % model, args=(pk,)) self.assertContains(response, '<a href="%s" %s' % (url, INLINE_CHANGELINK_HTML)) def test_inlines_show_change_link_unregistered(self): "Inlines `show_change_link` disabled for unregistered models." parent = ParentModelWithCustomPk.objects.create(my_own_pk="foo", name="Foo") ChildModel1.objects.create(my_own_pk="bar", name="Bar", parent=parent) ChildModel2.objects.create(my_own_pk="baz", name="Baz", parent=parent) response = self.client.get(reverse('admin:admin_inlines_parentmodelwithcustompk_change', args=('foo',))) self.assertFalse(response.context['inline_admin_formset'].opts.has_registered_model) self.assertNotContains(response, INLINE_CHANGELINK_HTML) def test_tabular_inline_show_change_link_false_registered(self): "Inlines `show_change_link` disabled by default." poll = Poll.objects.create(name="New poll") Question.objects.create(poll=poll) response = self.client.get(reverse('admin:admin_inlines_poll_change', args=(poll.pk,))) self.assertTrue(response.context['inline_admin_formset'].opts.has_registered_model) self.assertNotContains(response, INLINE_CHANGELINK_HTML) def test_noneditable_inline_has_field_inputs(self): """Inlines without change permission shows field inputs on add form.""" response = self.client.get(reverse('admin:admin_inlines_novelreadonlychapter_add')) self.assertContains( response, '<input type="text" name="chapter_set-0-name" ' 'class="vTextField" maxlength="40" id="id_chapter_set-0-name">', html=True ) def test_inlines_plural_heading_foreign_key(self): response = self.client.get(reverse('admin:admin_inlines_holder4_add')) self.assertContains(response, '<h2>Inner4 stackeds</h2>', html=True) self.assertContains(response, '<h2>Inner4 tabulars</h2>', html=True) def test_inlines_singular_heading_one_to_one(self): response = self.client.get(reverse('admin:admin_inlines_person_add')) self.assertContains(response, '<h2>Author</h2>', html=True) # Tabular. self.assertContains(response, '<h2>Fashionista</h2>', html=True) # Stacked. @override_settings(ROOT_URLCONF='admin_inlines.urls') class TestInlineMedia(TestDataMixin, TestCase): def setUp(self): self.client.force_login(self.superuser) def test_inline_media_only_base(self): holder = Holder(dummy=13) holder.save() Inner(dummy=42, holder=holder).save() change_url = reverse('admin:admin_inlines_holder_change', args=(holder.id,)) response = self.client.get(change_url) self.assertContains(response, 'my_awesome_admin_scripts.js') def test_inline_media_only_inline(self): holder = Holder3(dummy=13) holder.save() Inner3(dummy=42, holder=holder).save() change_url = reverse('admin:admin_inlines_holder3_change', args=(holder.id,)) response = self.client.get(change_url) self.assertEqual( response.context['inline_admin_formsets'][0].media._js, [ 'admin/js/vendor/jquery/jquery.min.js', 'my_awesome_inline_scripts.js', 'custom_number.js', 'admin/js/jquery.init.js', 'admin/js/inlines.js', ] ) self.assertContains(response, 'my_awesome_inline_scripts.js') def test_all_inline_media(self): holder = Holder2(dummy=13) holder.save() Inner2(dummy=42, holder=holder).save() change_url = reverse('admin:admin_inlines_holder2_change', args=(holder.id,)) response = self.client.get(change_url) self.assertContains(response, 'my_awesome_admin_scripts.js') self.assertContains(response, 'my_awesome_inline_scripts.js') @override_settings(ROOT_URLCONF='admin_inlines.urls') class TestInlineAdminForm(TestCase): def test_immutable_content_type(self): """Regression for #9362 The problem depends only on InlineAdminForm and its "original" argument, so we can safely set the other arguments to None/{}. We just need to check that the content_type argument of Child isn't altered by the internals of the inline form.""" sally = Teacher.objects.create(name='Sally') john = Parent.objects.create(name='John') joe = Child.objects.create(name='Joe', teacher=sally, parent=john) iaf = InlineAdminForm(None, None, {}, {}, joe) parent_ct = ContentType.objects.get_for_model(Parent) self.assertEqual(iaf.original.content_type, parent_ct) @override_settings(ROOT_URLCONF='admin_inlines.urls') class TestInlineProtectedOnDelete(TestDataMixin, TestCase): def setUp(self): self.client.force_login(self.superuser) def test_deleting_inline_with_protected_delete_does_not_validate(self): lotr = Novel.objects.create(name='Lord of the rings') chapter = Chapter.objects.create(novel=lotr, name='Many Meetings') foot_note = FootNote.objects.create(chapter=chapter, note='yadda yadda') change_url = reverse('admin:admin_inlines_novel_change', args=(lotr.id,)) response = self.client.get(change_url) data = { 'name': lotr.name, 'chapter_set-TOTAL_FORMS': 1, 'chapter_set-INITIAL_FORMS': 1, 'chapter_set-MAX_NUM_FORMS': 1000, '_save': 'Save', 'chapter_set-0-id': chapter.id, 'chapter_set-0-name': chapter.name, 'chapter_set-0-novel': lotr.id, 'chapter_set-0-DELETE': 'on' } response = self.client.post(change_url, data) self.assertContains(response, "Deleting chapter %s would require deleting " "the following protected related objects: foot note %s" % (chapter, foot_note)) @override_settings(ROOT_URLCONF='admin_inlines.urls') class TestInlinePermissions(TestCase): """ Make sure the admin respects permissions for objects that are edited inline. Refs #8060. """ @classmethod def setUpTestData(cls): cls.user = User(username='admin', is_staff=True, is_active=True) cls.user.set_password('secret') cls.user.save() cls.author_ct = ContentType.objects.get_for_model(Author) cls.holder_ct = ContentType.objects.get_for_model(Holder2) cls.book_ct = ContentType.objects.get_for_model(Book) cls.inner_ct = ContentType.objects.get_for_model(Inner2) # User always has permissions to add and change Authors, and Holders, # the main (parent) models of the inlines. Permissions on the inlines # vary per test. permission = Permission.objects.get(codename='add_author', content_type=cls.author_ct) cls.user.user_permissions.add(permission) permission = Permission.objects.get(codename='change_author', content_type=cls.author_ct) cls.user.user_permissions.add(permission) permission = Permission.objects.get(codename='add_holder2', content_type=cls.holder_ct) cls.user.user_permissions.add(permission) permission = Permission.objects.get(codename='change_holder2', content_type=cls.holder_ct) cls.user.user_permissions.add(permission) author = Author.objects.create(pk=1, name='The Author') cls.book = author.books.create(name='The inline Book') cls.author_change_url = reverse('admin:admin_inlines_author_change', args=(author.id,)) # Get the ID of the automatically created intermediate model for the Author-Book m2m author_book_auto_m2m_intermediate = Author.books.through.objects.get(author=author, book=cls.book) cls.author_book_auto_m2m_intermediate_id = author_book_auto_m2m_intermediate.pk cls.holder = Holder2.objects.create(dummy=13) cls.inner2 = Inner2.objects.create(dummy=42, holder=cls.holder) def setUp(self): self.holder_change_url = reverse('admin:admin_inlines_holder2_change', args=(self.holder.id,)) self.client.force_login(self.user) def test_inline_add_m2m_noperm(self): response = self.client.get(reverse('admin:admin_inlines_author_add')) # No change permission on books, so no inline self.assertNotContains(response, '<h2>Author-book relationships</h2>') self.assertNotContains(response, 'Add another Author-Book Relationship') self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"') def test_inline_add_fk_noperm(self): response = self.client.get(reverse('admin:admin_inlines_holder2_add')) # No permissions on Inner2s, so no inline self.assertNotContains(response, '<h2>Inner2s</h2>') self.assertNotContains(response, 'Add another Inner2') self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"') def test_inline_change_m2m_noperm(self): response = self.client.get(self.author_change_url) # No change permission on books, so no inline self.assertNotContains(response, '<h2>Author-book relationships</h2>') self.assertNotContains(response, 'Add another Author-Book Relationship') self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"') def test_inline_change_fk_noperm(self): response = self.client.get(self.holder_change_url) # No permissions on Inner2s, so no inline self.assertNotContains(response, '<h2>Inner2s</h2>') self.assertNotContains(response, 'Add another Inner2') self.assertNotContains(response, 'id="id_inner2_set-TOTAL_FORMS"') def test_inline_add_m2m_view_only_perm(self): permission = Permission.objects.get(codename='view_book', content_type=self.book_ct) self.user.user_permissions.add(permission) response = self.client.get(reverse('admin:admin_inlines_author_add')) # View-only inlines. (It could be nicer to hide the empty, non-editable # inlines on the add page.) self.assertIs(response.context['inline_admin_formset'].has_view_permission, True) self.assertIs(response.context['inline_admin_formset'].has_add_permission, False) self.assertIs(response.context['inline_admin_formset'].has_change_permission, False) self.assertIs(response.context['inline_admin_formset'].has_delete_permission, False) self.assertContains(response, '<h2>Author-book relationships</h2>') self.assertContains( response, '<input type="hidden" name="Author_books-TOTAL_FORMS" value="0" ' 'id="id_Author_books-TOTAL_FORMS">', html=True, ) self.assertNotContains(response, 'Add another Author-Book Relationship') def test_inline_add_m2m_add_perm(self): permission = Permission.objects.get(codename='add_book', content_type=self.book_ct) self.user.user_permissions.add(permission) response = self.client.get(reverse('admin:admin_inlines_author_add')) # No change permission on Books, so no inline self.assertNotContains(response, '<h2>Author-book relationships</h2>') self.assertNotContains(response, 'Add another Author-Book Relationship') self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"') def test_inline_add_fk_add_perm(self): permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get(reverse('admin:admin_inlines_holder2_add')) # Add permission on inner2s, so we get the inline self.assertContains(response, '<h2>Inner2s</h2>') self.assertContains(response, 'Add another Inner2') self.assertContains(response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" ' 'value="3" name="inner2_set-TOTAL_FORMS">', html=True) def test_inline_change_m2m_add_perm(self): permission = Permission.objects.get(codename='add_book', content_type=self.book_ct) self.user.user_permissions.add(permission) response = self.client.get(self.author_change_url) # No change permission on books, so no inline self.assertNotContains(response, '<h2>Author-book relationships</h2>') self.assertNotContains(response, 'Add another Author-Book Relationship') self.assertNotContains(response, 'id="id_Author_books-TOTAL_FORMS"') self.assertNotContains(response, 'id="id_Author_books-0-DELETE"') def test_inline_change_m2m_view_only_perm(self): permission = Permission.objects.get(codename='view_book', content_type=self.book_ct) self.user.user_permissions.add(permission) response = self.client.get(self.author_change_url) # View-only inlines. self.assertIs(response.context['inline_admin_formset'].has_view_permission, True) self.assertIs(response.context['inline_admin_formset'].has_add_permission, False) self.assertIs(response.context['inline_admin_formset'].has_change_permission, False) self.assertIs(response.context['inline_admin_formset'].has_delete_permission, False) self.assertContains(response, '<h2>Author-book relationships</h2>') self.assertContains( response, '<input type="hidden" name="Author_books-TOTAL_FORMS" value="1" ' 'id="id_Author_books-TOTAL_FORMS">', html=True, ) # The field in the inline is read-only. self.assertContains(response, '<p>%s</p>' % self.book) self.assertNotContains( response, '<input type="checkbox" name="Author_books-0-DELETE" id="id_Author_books-0-DELETE">', html=True, ) def test_inline_change_m2m_change_perm(self): permission = Permission.objects.get(codename='change_book', content_type=self.book_ct) self.user.user_permissions.add(permission) response = self.client.get(self.author_change_url) # We have change perm on books, so we can add/change/delete inlines self.assertIs(response.context['inline_admin_formset'].has_view_permission, True) self.assertIs(response.context['inline_admin_formset'].has_add_permission, True) self.assertIs(response.context['inline_admin_formset'].has_change_permission, True) self.assertIs(response.context['inline_admin_formset'].has_delete_permission, True) self.assertContains(response, '<h2>Author-book relationships</h2>') self.assertContains(response, 'Add another Author-book relationship') self.assertContains(response, '<input type="hidden" id="id_Author_books-TOTAL_FORMS" ' 'value="4" name="Author_books-TOTAL_FORMS">', html=True) self.assertContains( response, '<input type="hidden" id="id_Author_books-0-id" value="%i" ' 'name="Author_books-0-id">' % self.author_book_auto_m2m_intermediate_id, html=True ) self.assertContains(response, 'id="id_Author_books-0-DELETE"') def test_inline_change_fk_add_perm(self): permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # Add permission on inner2s, so we can add but not modify existing self.assertContains(response, '<h2>Inner2s</h2>') self.assertContains(response, 'Add another Inner2') # 3 extra forms only, not the existing instance form self.assertContains( response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="3" ' 'name="inner2_set-TOTAL_FORMS">', html=True ) self.assertNotContains( response, '<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id, html=True ) def test_inline_change_fk_change_perm(self): permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # Change permission on inner2s, so we can change existing but not add new self.assertContains(response, '<h2>Inner2s</h2>', count=2) # Just the one form for existing instances self.assertContains( response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="1" name="inner2_set-TOTAL_FORMS">', html=True ) self.assertContains( response, '<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id, html=True ) # max-num 0 means we can't add new ones self.assertContains( response, '<input type="hidden" id="id_inner2_set-MAX_NUM_FORMS" value="0" name="inner2_set-MAX_NUM_FORMS">', html=True ) # TabularInline self.assertContains(response, '<th class="column-dummy required">Dummy</th>', html=True) self.assertContains( response, '<input type="number" name="inner2_set-2-0-dummy" value="%s" ' 'class="vIntegerField" id="id_inner2_set-2-0-dummy">' % self.inner2.dummy, html=True, ) def test_inline_change_fk_add_change_perm(self): permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # Add/change perm, so we can add new and change existing self.assertContains(response, '<h2>Inner2s</h2>') # One form for existing instance and three extra for new self.assertContains( response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="4" name="inner2_set-TOTAL_FORMS">', html=True ) self.assertContains( response, '<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id, html=True ) def test_inline_change_fk_change_del_perm(self): permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # Change/delete perm on inner2s, so we can change/delete existing self.assertContains(response, '<h2>Inner2s</h2>') # One form for existing instance only, no new self.assertContains( response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="1" name="inner2_set-TOTAL_FORMS">', html=True ) self.assertContains( response, '<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id, html=True ) self.assertContains(response, 'id="id_inner2_set-0-DELETE"') def test_inline_change_fk_all_perms(self): permission = Permission.objects.get(codename='add_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) permission = Permission.objects.get(codename='change_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) permission = Permission.objects.get(codename='delete_inner2', content_type=self.inner_ct) self.user.user_permissions.add(permission) response = self.client.get(self.holder_change_url) # All perms on inner2s, so we can add/change/delete self.assertContains(response, '<h2>Inner2s</h2>', count=2) # One form for existing instance only, three for new self.assertContains( response, '<input type="hidden" id="id_inner2_set-TOTAL_FORMS" value="4" name="inner2_set-TOTAL_FORMS">', html=True ) self.assertContains( response, '<input type="hidden" id="id_inner2_set-0-id" value="%i" name="inner2_set-0-id">' % self.inner2.id, html=True ) self.assertContains(response, 'id="id_inner2_set-0-DELETE"') # TabularInline self.assertContains(response, '<th class="column-dummy required">Dummy</th>', html=True) self.assertContains( response, '<input type="number" name="inner2_set-2-0-dummy" value="%s" ' 'class="vIntegerField" id="id_inner2_set-2-0-dummy">' % self.inner2.dummy, html=True, ) @override_settings(ROOT_URLCONF='admin_inlines.urls') class TestReadOnlyChangeViewInlinePermissions(TestCase): @classmethod def setUpTestData(cls): cls.user = User.objects.create_user('testing', password='password', is_staff=True) cls.user.user_permissions.add( Permission.objects.get(codename='view_poll', content_type=ContentType.objects.get_for_model(Poll)) ) cls.user.user_permissions.add( *Permission.objects.filter( codename__endswith="question", content_type=ContentType.objects.get_for_model(Question) ).values_list('pk', flat=True) ) cls.poll = Poll.objects.create(name="Survey") cls.add_url = reverse('admin:admin_inlines_poll_add') cls.change_url = reverse('admin:admin_inlines_poll_change', args=(cls.poll.id,)) def setUp(self): self.client.force_login(self.user) def test_add_url_not_allowed(self): response = self.client.get(self.add_url) self.assertEqual(response.status_code, 403) response = self.client.post(self.add_url, {}) self.assertEqual(response.status_code, 403) def test_post_to_change_url_not_allowed(self): response = self.client.post(self.change_url, {}) self.assertEqual(response.status_code, 403) def test_get_to_change_url_is_allowed(self): response = self.client.get(self.change_url) self.assertEqual(response.status_code, 200) def test_main_model_is_rendered_as_read_only(self): response = self.client.get(self.change_url) self.assertContains( response, '<div class="readonly">%s</div>' % self.poll.name, html=True ) input = '<input type="text" name="name" value="%s" class="vTextField" maxlength="40" required id="id_name">' self.assertNotContains( response, input % self.poll.name, html=True ) def test_inlines_are_rendered_as_read_only(self): question = Question.objects.create(text="How will this be rendered?", poll=self.poll) response = self.client.get(self.change_url) self.assertContains( response, '<td class="field-text"><p>%s</p></td>' % question.text, html=True ) self.assertNotContains(response, 'id="id_question_set-0-text"') self.assertNotContains(response, 'id="id_related_objs-0-DELETE"') def test_submit_line_shows_only_close_button(self): response = self.client.get(self.change_url) self.assertContains( response, '<a href="/admin/admin_inlines/poll/" class="closelink">Close</a>', html=True ) delete_link = '<p class="deletelink-box"><a href="/admin/admin_inlines/poll/%s/delete/" class="deletelink">Delete</a></p>' # noqa self.assertNotContains( response, delete_link % self.poll.id, html=True ) self.assertNotContains(response, '<input type="submit" value="Save and add another" name="_addanother">') self.assertNotContains(response, '<input type="submit" value="Save and continue editing" name="_continue">') def test_inline_delete_buttons_are_not_shown(self): Question.objects.create(text="How will this be rendered?", poll=self.poll) response = self.client.get(self.change_url) self.assertNotContains( response, '<input type="checkbox" name="question_set-0-DELETE" id="id_question_set-0-DELETE">', html=True ) def test_extra_inlines_are_not_shown(self): response = self.client.get(self.change_url) self.assertNotContains(response, 'id="id_question_set-0-text"') @override_settings(ROOT_URLCONF='admin_inlines.urls') class SeleniumTests(AdminSeleniumTestCase): available_apps = ['admin_inlines'] + AdminSeleniumTestCase.available_apps def setUp(self): User.objects.create_superuser(username='super', password='secret', email='[email protected]') def test_add_stackeds(self): """ The "Add another XXX" link correctly adds items to the stacked formset. """ self.admin_login(username='super', password='secret') self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder4_add')) inline_id = '#inner4stacked_set-group' def rows_length(): return len(self.selenium.find_elements_by_css_selector('%s .dynamic-inner4stacked_set' % inline_id)) self.assertEqual(rows_length(), 3) add_button = self.selenium.find_element_by_link_text( 'Add another Inner4 stacked') add_button.click() self.assertEqual(rows_length(), 4) def test_delete_stackeds(self): self.admin_login(username='super', password='secret') self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder4_add')) inline_id = '#inner4stacked_set-group' def rows_length(): return len(self.selenium.find_elements_by_css_selector('%s .dynamic-inner4stacked_set' % inline_id)) self.assertEqual(rows_length(), 3) add_button = self.selenium.find_element_by_link_text( 'Add another Inner4 stacked') add_button.click() add_button.click() self.assertEqual(rows_length(), 5, msg="sanity check") for delete_link in self.selenium.find_elements_by_css_selector('%s .inline-deletelink' % inline_id): delete_link.click() with self.disable_implicit_wait(): self.assertEqual(rows_length(), 0) def test_delete_invalid_stacked_inlines(self): from selenium.common.exceptions import NoSuchElementException self.admin_login(username='super', password='secret') self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder4_add')) inline_id = '#inner4stacked_set-group' def rows_length(): return len(self.selenium.find_elements_by_css_selector('%s .dynamic-inner4stacked_set' % inline_id)) self.assertEqual(rows_length(), 3) add_button = self.selenium.find_element_by_link_text( 'Add another Inner4 stacked') add_button.click() add_button.click() self.assertEqual(len(self.selenium.find_elements_by_css_selector('#id_inner4stacked_set-4-dummy')), 1) # Enter some data and click 'Save'. self.selenium.find_element_by_name('dummy').send_keys('1') self.selenium.find_element_by_name('inner4stacked_set-0-dummy').send_keys('100') self.selenium.find_element_by_name('inner4stacked_set-1-dummy').send_keys('101') self.selenium.find_element_by_name('inner4stacked_set-2-dummy').send_keys('222') self.selenium.find_element_by_name('inner4stacked_set-3-dummy').send_keys('103') self.selenium.find_element_by_name('inner4stacked_set-4-dummy').send_keys('222') with self.wait_page_loaded(): self.selenium.find_element_by_xpath('//input[@value="Save"]').click() self.assertEqual(rows_length(), 5, msg="sanity check") errorlist = self.selenium.find_element_by_css_selector( '%s .dynamic-inner4stacked_set .errorlist li' % inline_id ) self.assertEqual('Please correct the duplicate values below.', errorlist.text) delete_link = self.selenium.find_element_by_css_selector('#inner4stacked_set-4 .inline-deletelink') delete_link.click() self.assertEqual(rows_length(), 4) with self.disable_implicit_wait(), self.assertRaises(NoSuchElementException): self.selenium.find_element_by_css_selector('%s .dynamic-inner4stacked_set .errorlist li' % inline_id) with self.wait_page_loaded(): self.selenium.find_element_by_xpath('//input[@value="Save"]').click() # The objects have been created in the database. self.assertEqual(Inner4Stacked.objects.all().count(), 4) def test_delete_invalid_tabular_inlines(self): from selenium.common.exceptions import NoSuchElementException self.admin_login(username='super', password='secret') self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder4_add')) inline_id = '#inner4tabular_set-group' def rows_length(): return len(self.selenium.find_elements_by_css_selector('%s .dynamic-inner4tabular_set' % inline_id)) self.assertEqual(rows_length(), 3) add_button = self.selenium.find_element_by_link_text( 'Add another Inner4 tabular') add_button.click() add_button.click() self.assertEqual(len(self.selenium.find_elements_by_css_selector('#id_inner4tabular_set-4-dummy')), 1) # Enter some data and click 'Save'. self.selenium.find_element_by_name('dummy').send_keys('1') self.selenium.find_element_by_name('inner4tabular_set-0-dummy').send_keys('100') self.selenium.find_element_by_name('inner4tabular_set-1-dummy').send_keys('101') self.selenium.find_element_by_name('inner4tabular_set-2-dummy').send_keys('222') self.selenium.find_element_by_name('inner4tabular_set-3-dummy').send_keys('103') self.selenium.find_element_by_name('inner4tabular_set-4-dummy').send_keys('222') with self.wait_page_loaded(): self.selenium.find_element_by_xpath('//input[@value="Save"]').click() self.assertEqual(rows_length(), 5, msg="sanity check") # Non-field errorlist is in its own <tr> just before # tr#inner4tabular_set-3: errorlist = self.selenium.find_element_by_css_selector( '%s #inner4tabular_set-3 + .row-form-errors .errorlist li' % inline_id ) self.assertEqual('Please correct the duplicate values below.', errorlist.text) delete_link = self.selenium.find_element_by_css_selector('#inner4tabular_set-4 .inline-deletelink') delete_link.click() self.assertEqual(rows_length(), 4) with self.disable_implicit_wait(), self.assertRaises(NoSuchElementException): self.selenium.find_element_by_css_selector('%s .dynamic-inner4tabular_set .errorlist li' % inline_id) with self.wait_page_loaded(): self.selenium.find_element_by_xpath('//input[@value="Save"]').click() # The objects have been created in the database. self.assertEqual(Inner4Tabular.objects.all().count(), 4) def test_add_inlines(self): """ The "Add another XXX" link correctly adds items to the inline form. """ self.admin_login(username='super', password='secret') self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_profilecollection_add')) # There's only one inline to start with and it has the correct ID. self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set')), 1) self.assertEqual(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set')[0].get_attribute('id'), 'profile_set-0') self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-0 input[name=profile_set-0-first_name]')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-0 input[name=profile_set-0-last_name]')), 1) # Add an inline self.selenium.find_element_by_link_text('Add another Profile').click() # The inline has been added, it has the right id, and it contains the # correct fields. self.assertEqual(len(self.selenium.find_elements_by_css_selector('.dynamic-profile_set')), 2) self.assertEqual(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set')[1].get_attribute('id'), 'profile_set-1') self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-1 input[name=profile_set-1-first_name]')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-1 input[name=profile_set-1-last_name]')), 1) # Let's add another one to be sure self.selenium.find_element_by_link_text('Add another Profile').click() self.assertEqual(len(self.selenium.find_elements_by_css_selector('.dynamic-profile_set')), 3) self.assertEqual(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set')[2].get_attribute('id'), 'profile_set-2') self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-2 input[name=profile_set-2-first_name]')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( '.dynamic-profile_set#profile_set-2 input[name=profile_set-2-last_name]')), 1) # Enter some data and click 'Save' self.selenium.find_element_by_name('profile_set-0-first_name').send_keys('0 first name 1') self.selenium.find_element_by_name('profile_set-0-last_name').send_keys('0 last name 2') self.selenium.find_element_by_name('profile_set-1-first_name').send_keys('1 first name 1') self.selenium.find_element_by_name('profile_set-1-last_name').send_keys('1 last name 2') self.selenium.find_element_by_name('profile_set-2-first_name').send_keys('2 first name 1') self.selenium.find_element_by_name('profile_set-2-last_name').send_keys('2 last name 2') with self.wait_page_loaded(): self.selenium.find_element_by_xpath('//input[@value="Save"]').click() # The objects have been created in the database self.assertEqual(ProfileCollection.objects.all().count(), 1) self.assertEqual(Profile.objects.all().count(), 3) def test_add_inline_link_absent_for_view_only_parent_model(self): from selenium.common.exceptions import NoSuchElementException user = User.objects.create_user('testing', password='password', is_staff=True) user.user_permissions.add( Permission.objects.get(codename='view_poll', content_type=ContentType.objects.get_for_model(Poll)) ) user.user_permissions.add( *Permission.objects.filter( codename__endswith="question", content_type=ContentType.objects.get_for_model(Question) ).values_list('pk', flat=True) ) self.admin_login(username='testing', password='password') poll = Poll.objects.create(name="Survey") change_url = reverse('admin:admin_inlines_poll_change', args=(poll.id,)) self.selenium.get(self.live_server_url + change_url) with self.disable_implicit_wait(): with self.assertRaises(NoSuchElementException): self.selenium.find_element_by_link_text('Add another Question') def test_delete_inlines(self): self.admin_login(username='super', password='secret') self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_profilecollection_add')) # Add a few inlines self.selenium.find_element_by_link_text('Add another Profile').click() self.selenium.find_element_by_link_text('Add another Profile').click() self.selenium.find_element_by_link_text('Add another Profile').click() self.selenium.find_element_by_link_text('Add another Profile').click() self.assertEqual(len(self.selenium.find_elements_by_css_selector( '#profile_set-group table tr.dynamic-profile_set')), 5) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-3')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-4')), 1) # Click on a few delete buttons self.selenium.find_element_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-1 td.delete a').click() self.selenium.find_element_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-2 td.delete a').click() # The rows are gone and the IDs have been re-sequenced self.assertEqual(len(self.selenium.find_elements_by_css_selector( '#profile_set-group table tr.dynamic-profile_set')), 3) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-0')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-1')), 1) self.assertEqual(len(self.selenium.find_elements_by_css_selector( 'form#profilecollection_form tr.dynamic-profile_set#profile_set-2')), 1) def test_collapsed_inlines(self): # Collapsed inlines have SHOW/HIDE links. self.admin_login(username='super', password='secret') self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_author_add')) # One field is in a stacked inline, other in a tabular one. test_fields = ['#id_nonautopkbook_set-0-title', '#id_nonautopkbook_set-2-0-title'] show_links = self.selenium.find_elements_by_link_text('SHOW') self.assertEqual(len(show_links), 3) for show_index, field_name in enumerate(test_fields, 0): self.wait_until_invisible(field_name) show_links[show_index].click() self.wait_until_visible(field_name) hide_links = self.selenium.find_elements_by_link_text('HIDE') self.assertEqual(len(hide_links), 2) for hide_index, field_name in enumerate(test_fields, 0): self.wait_until_visible(field_name) hide_links[hide_index].click() self.wait_until_invisible(field_name) def test_added_stacked_inline_with_collapsed_fields(self): self.admin_login(username='super', password='secret') self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_teacher_add')) self.selenium.find_element_by_link_text('Add another Child').click() test_fields = ['#id_child_set-0-name', '#id_child_set-1-name'] show_links = self.selenium.find_elements_by_link_text('SHOW') self.assertEqual(len(show_links), 2) for show_index, field_name in enumerate(test_fields, 0): self.wait_until_invisible(field_name) show_links[show_index].click() self.wait_until_visible(field_name) hide_links = self.selenium.find_elements_by_link_text('HIDE') self.assertEqual(len(hide_links), 2) for hide_index, field_name in enumerate(test_fields, 0): self.wait_until_visible(field_name) hide_links[hide_index].click() self.wait_until_invisible(field_name) def assertBorder(self, element, border): width, style, color = border.split(' ') border_properties = [ 'border-bottom-%s', 'border-left-%s', 'border-right-%s', 'border-top-%s', ] for prop in border_properties: prop = prop % 'width' self.assertEqual(element.value_of_css_property(prop), width) for prop in border_properties: prop = prop % 'style' self.assertEqual(element.value_of_css_property(prop), style) # Convert hex color to rgb. self.assertRegex(color, '#[0-9a-f]{6}') r, g, b = int(color[1:3], 16), int(color[3:5], 16), int(color[5:], 16) # The value may be expressed as either rgb() or rgba() depending on the # browser. colors = [ 'rgb(%d, %d, %d)' % (r, g, b), 'rgba(%d, %d, %d, 1)' % (r, g, b), ] for prop in border_properties: prop = prop % 'color' self.assertIn(element.value_of_css_property(prop), colors) def test_inline_formset_error_input_border(self): self.admin_login(username='super', password='secret') self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder5_add')) self.wait_until_visible('#id_dummy') self.selenium.find_element_by_id('id_dummy').send_keys(1) fields = ['id_inner5stacked_set-0-dummy', 'id_inner5tabular_set-0-dummy'] show_links = self.selenium.find_elements_by_link_text('SHOW') for show_index, field_name in enumerate(fields): show_links[show_index].click() self.wait_until_visible('#' + field_name) self.selenium.find_element_by_id(field_name).send_keys(1) # Before save all inputs have default border for inline in ('stacked', 'tabular'): for field_name in ('name', 'select', 'text'): element_id = 'id_inner5%s_set-0-%s' % (inline, field_name) self.assertBorder( self.selenium.find_element_by_id(element_id), '1px solid #cccccc', ) self.selenium.find_element_by_xpath('//input[@value="Save"]').click() # Test the red border around inputs by css selectors stacked_selectors = ['.errors input', '.errors select', '.errors textarea'] for selector in stacked_selectors: self.assertBorder( self.selenium.find_element_by_css_selector(selector), '1px solid #ba2121', ) tabular_selectors = [ 'td ul.errorlist + input', 'td ul.errorlist + select', 'td ul.errorlist + textarea' ] for selector in tabular_selectors: self.assertBorder( self.selenium.find_element_by_css_selector(selector), '1px solid #ba2121', ) def test_inline_formset_error(self): self.admin_login(username='super', password='secret') self.selenium.get(self.live_server_url + reverse('admin:admin_inlines_holder5_add')) stacked_inline_formset_selector = 'div#inner5stacked_set-group fieldset.module.collapse' tabular_inline_formset_selector = 'div#inner5tabular_set-group fieldset.module.collapse' # Inlines without errors, both inlines collapsed self.selenium.find_element_by_xpath('//input[@value="Save"]').click() self.assertEqual( len(self.selenium.find_elements_by_css_selector(stacked_inline_formset_selector + '.collapsed')), 1 ) self.assertEqual( len(self.selenium.find_elements_by_css_selector(tabular_inline_formset_selector + '.collapsed')), 1 ) show_links = self.selenium.find_elements_by_link_text('SHOW') self.assertEqual(len(show_links), 2) # Inlines with errors, both inlines expanded test_fields = ['#id_inner5stacked_set-0-dummy', '#id_inner5tabular_set-0-dummy'] for show_index, field_name in enumerate(test_fields): show_links[show_index].click() self.wait_until_visible(field_name) self.selenium.find_element_by_id(field_name[1:]).send_keys(1) hide_links = self.selenium.find_elements_by_link_text('HIDE') self.assertEqual(len(hide_links), 2) for hide_index, field_name in enumerate(test_fields): hide_link = hide_links[hide_index] self.selenium.execute_script('window.scrollTo(0, %s);' % hide_link.location['y']) hide_link.click() self.wait_until_invisible(field_name) self.selenium.find_element_by_xpath('//input[@value="Save"]').click() self.assertEqual( len(self.selenium.find_elements_by_css_selector(stacked_inline_formset_selector + '.collapsed')), 0 ) self.assertEqual( len(self.selenium.find_elements_by_css_selector(tabular_inline_formset_selector + '.collapsed')), 0 ) self.assertEqual( len(self.selenium.find_elements_by_css_selector(stacked_inline_formset_selector)), 1 ) self.assertEqual( len(self.selenium.find_elements_by_css_selector(tabular_inline_formset_selector)), 1 ) def test_inlines_verbose_name(self): """ The item added by the "Add another XXX" link must use the correct verbose_name in the inline form. """ self.admin_login(username='super', password='secret') # Each combination of horizontal/vertical fiter with stacked/tabular # inlines. tests = [ 'admin:admin_inlines_course_add', 'admin:admin_inlines_courseproxy_add', 'admin:admin_inlines_courseproxy1_add', 'admin:admin_inlines_courseproxy2_add', ] css_selector = '.dynamic-class_set#class_set-%s h2' for url_name in tests: with self.subTest(url=url_name): self.selenium.get(self.live_server_url + reverse(url_name)) # First inline shows the verbose_name. available, chosen = self.selenium.find_elements_by_css_selector(css_selector % 0) self.assertEqual(available.text, 'AVAILABLE ATTENDANT') self.assertEqual(chosen.text, 'CHOSEN ATTENDANT') # Added inline should also have the correct verbose_name. self.selenium.find_element_by_link_text('Add another Class').click() available, chosen = self.selenium.find_elements_by_css_selector(css_selector % 1) self.assertEqual(available.text, 'AVAILABLE ATTENDANT') self.assertEqual(chosen.text, 'CHOSEN ATTENDANT') # Third inline should also have the correct verbose_name. self.selenium.find_element_by_link_text('Add another Class').click() available, chosen = self.selenium.find_elements_by_css_selector(css_selector % 2) self.assertEqual(available.text, 'AVAILABLE ATTENDANT') self.assertEqual(chosen.text, 'CHOSEN ATTENDANT')
deaba3754d2c951ee62906db398052f5f785bf0c231c81243fd38de58c796048
import operator import uuid from unittest import mock from django import forms from django.core import serializers from django.core.exceptions import ValidationError from django.core.serializers.json import DjangoJSONEncoder from django.db import ( DataError, IntegrityError, NotSupportedError, OperationalError, connection, models, ) from django.db.models import ( Count, ExpressionWrapper, F, IntegerField, OuterRef, Q, Subquery, Transform, Value, ) from django.db.models.expressions import RawSQL from django.db.models.fields.json import ( KeyTextTransform, KeyTransform, KeyTransformFactory, KeyTransformTextLookupMixin, ) from django.db.models.functions import Cast from django.test import ( SimpleTestCase, TestCase, skipIfDBFeature, skipUnlessDBFeature, ) from django.test.utils import CaptureQueriesContext from .models import ( CustomJSONDecoder, JSONModel, NullableJSONModel, RelatedJSONModel, ) @skipUnlessDBFeature('supports_json_field') class JSONFieldTests(TestCase): def test_invalid_value(self): msg = 'is not JSON serializable' with self.assertRaisesMessage(TypeError, msg): NullableJSONModel.objects.create(value={ 'uuid': uuid.UUID('d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475'), }) def test_custom_encoder_decoder(self): value = {'uuid': uuid.UUID('{d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475}')} obj = NullableJSONModel(value_custom=value) obj.clean_fields() obj.save() obj.refresh_from_db() self.assertEqual(obj.value_custom, value) def test_db_check_constraints(self): value = '{@!invalid json value 123 $!@#' with mock.patch.object(DjangoJSONEncoder, 'encode', return_value=value): with self.assertRaises((IntegrityError, DataError, OperationalError)): NullableJSONModel.objects.create(value_custom=value) class TestMethods(SimpleTestCase): def test_deconstruct(self): field = models.JSONField() name, path, args, kwargs = field.deconstruct() self.assertEqual(path, 'django.db.models.JSONField') self.assertEqual(args, []) self.assertEqual(kwargs, {}) def test_deconstruct_custom_encoder_decoder(self): field = models.JSONField(encoder=DjangoJSONEncoder, decoder=CustomJSONDecoder) name, path, args, kwargs = field.deconstruct() self.assertEqual(kwargs['encoder'], DjangoJSONEncoder) self.assertEqual(kwargs['decoder'], CustomJSONDecoder) def test_get_transforms(self): @models.JSONField.register_lookup class MyTransform(Transform): lookup_name = 'my_transform' field = models.JSONField() transform = field.get_transform('my_transform') self.assertIs(transform, MyTransform) models.JSONField._unregister_lookup(MyTransform) models.JSONField._clear_cached_lookups() transform = field.get_transform('my_transform') self.assertIsInstance(transform, KeyTransformFactory) def test_key_transform_text_lookup_mixin_non_key_transform(self): transform = Transform('test') msg = ( 'Transform should be an instance of KeyTransform in order to use ' 'this lookup.' ) with self.assertRaisesMessage(TypeError, msg): KeyTransformTextLookupMixin(transform) class TestValidation(SimpleTestCase): def test_invalid_encoder(self): msg = 'The encoder parameter must be a callable object.' with self.assertRaisesMessage(ValueError, msg): models.JSONField(encoder=DjangoJSONEncoder()) def test_invalid_decoder(self): msg = 'The decoder parameter must be a callable object.' with self.assertRaisesMessage(ValueError, msg): models.JSONField(decoder=CustomJSONDecoder()) def test_validation_error(self): field = models.JSONField() msg = 'Value must be valid JSON.' value = uuid.UUID('{d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475}') with self.assertRaisesMessage(ValidationError, msg): field.clean({'uuid': value}, None) def test_custom_encoder(self): field = models.JSONField(encoder=DjangoJSONEncoder) value = uuid.UUID('{d85e2076-b67c-4ee7-8c3a-2bf5a2cc2475}') field.clean({'uuid': value}, None) class TestFormField(SimpleTestCase): def test_formfield(self): model_field = models.JSONField() form_field = model_field.formfield() self.assertIsInstance(form_field, forms.JSONField) def test_formfield_custom_encoder_decoder(self): model_field = models.JSONField(encoder=DjangoJSONEncoder, decoder=CustomJSONDecoder) form_field = model_field.formfield() self.assertIs(form_field.encoder, DjangoJSONEncoder) self.assertIs(form_field.decoder, CustomJSONDecoder) class TestSerialization(SimpleTestCase): test_data = ( '[{"fields": {"value": %s}, ' '"model": "model_fields.jsonmodel", "pk": null}]' ) test_values = ( # (Python value, serialized value), ({'a': 'b', 'c': None}, '{"a": "b", "c": null}'), ('abc', '"abc"'), ('{"a": "a"}', '"{\\"a\\": \\"a\\"}"'), ) def test_dumping(self): for value, serialized in self.test_values: with self.subTest(value=value): instance = JSONModel(value=value) data = serializers.serialize('json', [instance]) self.assertJSONEqual(data, self.test_data % serialized) def test_loading(self): for value, serialized in self.test_values: with self.subTest(value=value): instance = list( serializers.deserialize('json', self.test_data % serialized) )[0].object self.assertEqual(instance.value, value) def test_xml_serialization(self): test_xml_data = ( '<django-objects version="1.0">' '<object model="model_fields.nullablejsonmodel">' '<field name="value" type="JSONField">%s' '</field></object></django-objects>' ) for value, serialized in self.test_values: with self.subTest(value=value): instance = NullableJSONModel(value=value) data = serializers.serialize('xml', [instance], fields=['value']) self.assertXMLEqual(data, test_xml_data % serialized) new_instance = list(serializers.deserialize('xml', data))[0].object self.assertEqual(new_instance.value, instance.value) @skipUnlessDBFeature('supports_json_field') class TestSaveLoad(TestCase): def test_null(self): obj = NullableJSONModel(value=None) obj.save() obj.refresh_from_db() self.assertIsNone(obj.value) @skipUnlessDBFeature('supports_primitives_in_json_field') def test_json_null_different_from_sql_null(self): json_null = NullableJSONModel.objects.create(value=Value('null')) json_null.refresh_from_db() sql_null = NullableJSONModel.objects.create(value=None) sql_null.refresh_from_db() # 'null' is not equal to NULL in the database. self.assertSequenceEqual( NullableJSONModel.objects.filter(value=Value('null')), [json_null], ) self.assertSequenceEqual( NullableJSONModel.objects.filter(value=None), [json_null], ) self.assertSequenceEqual( NullableJSONModel.objects.filter(value__isnull=True), [sql_null], ) # 'null' is equal to NULL in Python (None). self.assertEqual(json_null.value, sql_null.value) @skipUnlessDBFeature('supports_primitives_in_json_field') def test_primitives(self): values = [ True, 1, 1.45, 'String', '', ] for value in values: with self.subTest(value=value): obj = JSONModel(value=value) obj.save() obj.refresh_from_db() self.assertEqual(obj.value, value) def test_dict(self): values = [ {}, {'name': 'John', 'age': 20, 'height': 180.3}, {'a': True, 'b': {'b1': False, 'b2': None}}, ] for value in values: with self.subTest(value=value): obj = JSONModel.objects.create(value=value) obj.refresh_from_db() self.assertEqual(obj.value, value) def test_list(self): values = [ [], ['John', 20, 180.3], [True, [False, None]], ] for value in values: with self.subTest(value=value): obj = JSONModel.objects.create(value=value) obj.refresh_from_db() self.assertEqual(obj.value, value) def test_realistic_object(self): value = { 'name': 'John', 'age': 20, 'pets': [ {'name': 'Kit', 'type': 'cat', 'age': 2}, {'name': 'Max', 'type': 'dog', 'age': 1}, ], 'courses': [ ['A1', 'A2', 'A3'], ['B1', 'B2'], ['C1'], ], } obj = JSONModel.objects.create(value=value) obj.refresh_from_db() self.assertEqual(obj.value, value) @skipUnlessDBFeature('supports_json_field') class TestQuerying(TestCase): @classmethod def setUpTestData(cls): cls.primitives = [True, False, 'yes', 7, 9.6] values = [ None, [], {}, {'a': 'b', 'c': 14}, { 'a': 'b', 'c': 14, 'd': ['e', {'f': 'g'}], 'h': True, 'i': False, 'j': None, 'k': {'l': 'm'}, 'n': [None], 'o': '"quoted"', 'p': 4.2, }, [1, [2]], {'k': True, 'l': False, 'foo': 'bax'}, { 'foo': 'bar', 'baz': {'a': 'b', 'c': 'd'}, 'bar': ['foo', 'bar'], 'bax': {'foo': 'bar'}, }, ] cls.objs = [ NullableJSONModel.objects.create(value=value) for value in values ] if connection.features.supports_primitives_in_json_field: cls.objs.extend([ NullableJSONModel.objects.create(value=value) for value in cls.primitives ]) cls.raw_sql = '%s::jsonb' if connection.vendor == 'postgresql' else '%s' def test_exact(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__exact={}), [self.objs[2]], ) def test_exact_complex(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__exact={'a': 'b', 'c': 14}), [self.objs[3]], ) def test_icontains(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__icontains='BaX'), self.objs[6:8], ) def test_isnull(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__isnull=True), [self.objs[0]], ) def test_ordering_by_transform(self): mariadb = connection.vendor == 'mysql' and connection.mysql_is_mariadb values = [ {'ord': 93, 'name': 'bar'}, {'ord': 22.1, 'name': 'foo'}, {'ord': -1, 'name': 'baz'}, {'ord': 21.931902, 'name': 'spam'}, {'ord': -100291029, 'name': 'eggs'}, ] for field_name in ['value', 'value_custom']: with self.subTest(field=field_name): objs = [ NullableJSONModel.objects.create(**{field_name: value}) for value in values ] query = NullableJSONModel.objects.filter( **{'%s__name__isnull' % field_name: False}, ).order_by('%s__ord' % field_name) expected = [objs[4], objs[2], objs[3], objs[1], objs[0]] if mariadb or connection.vendor == 'oracle': # MariaDB and Oracle return JSON values as strings. expected = [objs[2], objs[4], objs[3], objs[1], objs[0]] self.assertSequenceEqual(query, expected) def test_ordering_grouping_by_key_transform(self): base_qs = NullableJSONModel.objects.filter(value__d__0__isnull=False) for qs in ( base_qs.order_by('value__d__0'), base_qs.annotate(key=KeyTransform('0', KeyTransform('d', 'value'))).order_by('key'), ): self.assertSequenceEqual(qs, [self.objs[4]]) qs = NullableJSONModel.objects.filter(value__isnull=False) self.assertQuerysetEqual( qs.filter(value__isnull=False).annotate( key=KeyTextTransform('f', KeyTransform('1', KeyTransform('d', 'value'))), ).values('key').annotate(count=Count('key')).order_by('count'), [(None, 0), ('g', 1)], operator.itemgetter('key', 'count'), ) def test_ordering_grouping_by_count(self): qs = NullableJSONModel.objects.filter( value__isnull=False, ).values('value__d__0').annotate(count=Count('value__d__0')).order_by('count') self.assertQuerysetEqual(qs, [0, 1], operator.itemgetter('count')) def test_order_grouping_custom_decoder(self): NullableJSONModel.objects.create(value_custom={'a': 'b'}) qs = NullableJSONModel.objects.filter(value_custom__isnull=False) self.assertSequenceEqual( qs.values( 'value_custom__a', ).annotate( count=Count('id'), ).order_by('value_custom__a'), [{'value_custom__a': 'b', 'count': 1}], ) def test_key_transform_raw_expression(self): expr = RawSQL(self.raw_sql, ['{"x": "bar"}']) self.assertSequenceEqual( NullableJSONModel.objects.filter(value__foo=KeyTransform('x', expr)), [self.objs[7]], ) def test_nested_key_transform_raw_expression(self): expr = RawSQL(self.raw_sql, ['{"x": {"y": "bar"}}']) self.assertSequenceEqual( NullableJSONModel.objects.filter(value__foo=KeyTransform('y', KeyTransform('x', expr))), [self.objs[7]], ) def test_key_transform_expression(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__d__0__isnull=False).annotate( key=KeyTransform('d', 'value'), chain=KeyTransform('0', 'key'), expr=KeyTransform('0', Cast('key', models.JSONField())), ).filter(chain=F('expr')), [self.objs[4]], ) def test_key_transform_annotation_expression(self): obj = NullableJSONModel.objects.create(value={'d': ['e', 'e']}) self.assertSequenceEqual( NullableJSONModel.objects.filter(value__d__0__isnull=False).annotate( key=F('value__d'), chain=F('key__0'), expr=Cast('key', models.JSONField()), ).filter(chain=F('expr__1')), [obj], ) def test_nested_key_transform_expression(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__d__0__isnull=False).annotate( key=KeyTransform('d', 'value'), chain=KeyTransform('f', KeyTransform('1', 'key')), expr=KeyTransform('f', KeyTransform('1', Cast('key', models.JSONField()))), ).filter(chain=F('expr')), [self.objs[4]], ) def test_nested_key_transform_annotation_expression(self): obj = NullableJSONModel.objects.create( value={'d': ['e', {'f': 'g'}, {'f': 'g'}]}, ) self.assertSequenceEqual( NullableJSONModel.objects.filter(value__d__0__isnull=False).annotate( key=F('value__d'), chain=F('key__1__f'), expr=Cast('key', models.JSONField()), ).filter(chain=F('expr__2__f')), [obj], ) def test_nested_key_transform_on_subquery(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__d__0__isnull=False).annotate( subquery_value=Subquery( NullableJSONModel.objects.filter(pk=OuterRef('pk')).values('value') ), key=KeyTransform('d', 'subquery_value'), chain=KeyTransform('f', KeyTransform('1', 'key')), ).filter(chain='g'), [self.objs[4]], ) def test_expression_wrapper_key_transform(self): self.assertSequenceEqual( NullableJSONModel.objects.annotate( expr=ExpressionWrapper( KeyTransform('c', 'value'), output_field=IntegerField(), ), ).filter(expr__isnull=False), self.objs[3:5], ) def test_has_key(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__has_key='a'), [self.objs[3], self.objs[4]], ) def test_has_key_null_value(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__has_key='j'), [self.objs[4]], ) def test_has_key_deep(self): tests = [ (Q(value__baz__has_key='a'), self.objs[7]), (Q(value__has_key=KeyTransform('a', KeyTransform('baz', 'value'))), self.objs[7]), (Q(value__has_key=F('value__baz__a')), self.objs[7]), (Q(value__has_key=KeyTransform('c', KeyTransform('baz', 'value'))), self.objs[7]), (Q(value__has_key=F('value__baz__c')), self.objs[7]), (Q(value__d__1__has_key='f'), self.objs[4]), ( Q(value__has_key=KeyTransform('f', KeyTransform('1', KeyTransform('d', 'value')))), self.objs[4], ), (Q(value__has_key=F('value__d__1__f')), self.objs[4]), ] for condition, expected in tests: with self.subTest(condition=condition): self.assertSequenceEqual( NullableJSONModel.objects.filter(condition), [expected], ) def test_has_key_list(self): obj = NullableJSONModel.objects.create(value=[{'a': 1}, {'b': 'x'}]) tests = [ Q(value__1__has_key='b'), Q(value__has_key=KeyTransform('b', KeyTransform(1, 'value'))), Q(value__has_key=KeyTransform('b', KeyTransform('1', 'value'))), Q(value__has_key=F('value__1__b')), ] for condition in tests: with self.subTest(condition=condition): self.assertSequenceEqual( NullableJSONModel.objects.filter(condition), [obj], ) def test_has_keys(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__has_keys=['a', 'c', 'h']), [self.objs[4]], ) def test_has_any_keys(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__has_any_keys=['c', 'l']), [self.objs[3], self.objs[4], self.objs[6]], ) @skipUnlessDBFeature('supports_json_field_contains') def test_contains(self): tests = [ ({}, self.objs[2:5] + self.objs[6:8]), ({'baz': {'a': 'b', 'c': 'd'}}, [self.objs[7]]), ({'baz': {'a': 'b'}}, [self.objs[7]]), ({'baz': {'c': 'd'}}, [self.objs[7]]), ({'k': True, 'l': False}, [self.objs[6]]), ({'d': ['e', {'f': 'g'}]}, [self.objs[4]]), ({'d': ['e']}, [self.objs[4]]), ({'d': [{'f': 'g'}]}, [self.objs[4]]), ([1, [2]], [self.objs[5]]), ([1], [self.objs[5]]), ([[2]], [self.objs[5]]), ({'n': [None]}, [self.objs[4]]), ({'j': None}, [self.objs[4]]), ] for value, expected in tests: with self.subTest(value=value): qs = NullableJSONModel.objects.filter(value__contains=value) self.assertSequenceEqual(qs, expected) @skipIfDBFeature('supports_json_field_contains') def test_contains_unsupported(self): msg = 'contains lookup is not supported on this database backend.' with self.assertRaisesMessage(NotSupportedError, msg): NullableJSONModel.objects.filter( value__contains={'baz': {'a': 'b', 'c': 'd'}}, ).get() @skipUnlessDBFeature( 'supports_primitives_in_json_field', 'supports_json_field_contains', ) def test_contains_primitives(self): for value in self.primitives: with self.subTest(value=value): qs = NullableJSONModel.objects.filter(value__contains=value) self.assertIs(qs.exists(), True) @skipUnlessDBFeature('supports_json_field_contains') def test_contained_by(self): qs = NullableJSONModel.objects.filter(value__contained_by={'a': 'b', 'c': 14, 'h': True}) self.assertSequenceEqual(qs, self.objs[2:4]) @skipIfDBFeature('supports_json_field_contains') def test_contained_by_unsupported(self): msg = 'contained_by lookup is not supported on this database backend.' with self.assertRaisesMessage(NotSupportedError, msg): NullableJSONModel.objects.filter(value__contained_by={'a': 'b'}).get() def test_deep_values(self): qs = NullableJSONModel.objects.values_list('value__k__l') expected_objs = [(None,)] * len(self.objs) expected_objs[4] = ('m',) self.assertSequenceEqual(qs, expected_objs) @skipUnlessDBFeature('can_distinct_on_fields') def test_deep_distinct(self): query = NullableJSONModel.objects.distinct('value__k__l').values_list('value__k__l') self.assertSequenceEqual(query, [('m',), (None,)]) def test_isnull_key(self): # key__isnull=False works the same as has_key='key'. self.assertSequenceEqual( NullableJSONModel.objects.filter(value__a__isnull=True), self.objs[:3] + self.objs[5:], ) self.assertSequenceEqual( NullableJSONModel.objects.filter(value__j__isnull=True), self.objs[:4] + self.objs[5:], ) self.assertSequenceEqual( NullableJSONModel.objects.filter(value__a__isnull=False), [self.objs[3], self.objs[4]], ) self.assertSequenceEqual( NullableJSONModel.objects.filter(value__j__isnull=False), [self.objs[4]], ) def test_isnull_key_or_none(self): obj = NullableJSONModel.objects.create(value={'a': None}) self.assertSequenceEqual( NullableJSONModel.objects.filter(Q(value__a__isnull=True) | Q(value__a=None)), self.objs[:3] + self.objs[5:] + [obj], ) def test_none_key(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__j=None), [self.objs[4]], ) def test_none_key_exclude(self): obj = NullableJSONModel.objects.create(value={'j': 1}) if connection.vendor == 'oracle': # Oracle supports filtering JSON objects with NULL keys, but the # current implementation doesn't support it. self.assertSequenceEqual( NullableJSONModel.objects.exclude(value__j=None), self.objs[1:4] + self.objs[5:] + [obj], ) else: self.assertSequenceEqual(NullableJSONModel.objects.exclude(value__j=None), [obj]) def test_shallow_list_lookup(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__0=1), [self.objs[5]], ) def test_shallow_obj_lookup(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__a='b'), [self.objs[3], self.objs[4]], ) def test_obj_subquery_lookup(self): qs = NullableJSONModel.objects.annotate( field=Subquery(NullableJSONModel.objects.filter(pk=OuterRef('pk')).values('value')), ).filter(field__a='b') self.assertSequenceEqual(qs, [self.objs[3], self.objs[4]]) def test_deep_lookup_objs(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__k__l='m'), [self.objs[4]], ) def test_shallow_lookup_obj_target(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__k={'l': 'm'}), [self.objs[4]], ) def test_deep_lookup_array(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__1__0=2), [self.objs[5]], ) def test_deep_lookup_mixed(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__d__1__f='g'), [self.objs[4]], ) def test_deep_lookup_transform(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__c__gt=2), [self.objs[3], self.objs[4]], ) self.assertSequenceEqual( NullableJSONModel.objects.filter(value__c__gt=2.33), [self.objs[3], self.objs[4]], ) self.assertIs(NullableJSONModel.objects.filter(value__c__lt=5).exists(), False) def test_lookup_exclude(self): tests = [ (Q(value__a='b'), [self.objs[0]]), (Q(value__foo='bax'), [self.objs[0], self.objs[7]]), ] for condition, expected in tests: self.assertSequenceEqual( NullableJSONModel.objects.exclude(condition), expected, ) self.assertSequenceEqual( NullableJSONModel.objects.filter(~condition), expected, ) def test_lookup_exclude_nonexistent_key(self): # Values without the key are ignored. condition = Q(value__foo='bax') objs_with_value = [self.objs[6]] objs_with_different_value = [self.objs[0], self.objs[7]] self.assertSequenceEqual( NullableJSONModel.objects.exclude(condition), objs_with_different_value, ) self.assertSequenceEqual( NullableJSONModel.objects.exclude(~condition), objs_with_value, ) self.assertCountEqual( NullableJSONModel.objects.filter(condition | ~condition), objs_with_value + objs_with_different_value, ) self.assertCountEqual( NullableJSONModel.objects.exclude(condition & ~condition), objs_with_value + objs_with_different_value, ) # Add the __isnull lookup to get an exhaustive set. self.assertSequenceEqual( NullableJSONModel.objects.exclude(condition & Q(value__foo__isnull=False)), self.objs[0:6] + self.objs[7:], ) self.assertSequenceEqual( NullableJSONModel.objects.filter(condition & Q(value__foo__isnull=False)), objs_with_value, ) def test_usage_in_subquery(self): self.assertSequenceEqual( NullableJSONModel.objects.filter( id__in=NullableJSONModel.objects.filter(value__c=14), ), self.objs[3:5], ) @skipUnlessDBFeature('supports_json_field_contains') def test_array_key_contains(self): tests = [ ([], [self.objs[7]]), ('bar', [self.objs[7]]), (['bar'], [self.objs[7]]), ('ar', []), ] for value, expected in tests: with self.subTest(value=value): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__bar__contains=value), expected, ) def test_key_iexact(self): self.assertIs(NullableJSONModel.objects.filter(value__foo__iexact='BaR').exists(), True) self.assertIs(NullableJSONModel.objects.filter(value__foo__iexact='"BaR"').exists(), False) def test_key_in(self): tests = [ ('value__c__in', [14], self.objs[3:5]), ('value__c__in', [14, 15], self.objs[3:5]), ('value__0__in', [1], [self.objs[5]]), ('value__0__in', [1, 3], [self.objs[5]]), ('value__foo__in', ['bar'], [self.objs[7]]), ( 'value__foo__in', [KeyTransform('foo', KeyTransform('bax', 'value'))], [self.objs[7]], ), ('value__foo__in', [F('value__bax__foo')], [self.objs[7]]), ( 'value__foo__in', [KeyTransform('foo', KeyTransform('bax', 'value')), 'baz'], [self.objs[7]], ), ('value__foo__in', [F('value__bax__foo'), 'baz'], [self.objs[7]]), ('value__foo__in', ['bar', 'baz'], [self.objs[7]]), ('value__bar__in', [['foo', 'bar']], [self.objs[7]]), ('value__bar__in', [['foo', 'bar'], ['a']], [self.objs[7]]), ('value__bax__in', [{'foo': 'bar'}, {'a': 'b'}], [self.objs[7]]), ] for lookup, value, expected in tests: with self.subTest(lookup=lookup, value=value): self.assertSequenceEqual( NullableJSONModel.objects.filter(**{lookup: value}), expected, ) def test_key_values(self): qs = NullableJSONModel.objects.filter(value__h=True) tests = [ ('value__a', 'b'), ('value__c', 14), ('value__d', ['e', {'f': 'g'}]), ('value__h', True), ('value__i', False), ('value__j', None), ('value__k', {'l': 'm'}), ('value__n', [None]), ('value__p', 4.2), ] for lookup, expected in tests: with self.subTest(lookup=lookup): self.assertEqual(qs.values_list(lookup, flat=True).get(), expected) @skipUnlessDBFeature('supports_json_field_contains') def test_key_contains(self): self.assertIs(NullableJSONModel.objects.filter(value__foo__contains='ar').exists(), False) self.assertIs(NullableJSONModel.objects.filter(value__foo__contains='bar').exists(), True) def test_key_icontains(self): self.assertIs(NullableJSONModel.objects.filter(value__foo__icontains='Ar').exists(), True) def test_key_startswith(self): self.assertIs(NullableJSONModel.objects.filter(value__foo__startswith='b').exists(), True) def test_key_istartswith(self): self.assertIs(NullableJSONModel.objects.filter(value__foo__istartswith='B').exists(), True) def test_key_endswith(self): self.assertIs(NullableJSONModel.objects.filter(value__foo__endswith='r').exists(), True) def test_key_iendswith(self): self.assertIs(NullableJSONModel.objects.filter(value__foo__iendswith='R').exists(), True) def test_key_regex(self): self.assertIs(NullableJSONModel.objects.filter(value__foo__regex=r'^bar$').exists(), True) def test_key_iregex(self): self.assertIs(NullableJSONModel.objects.filter(value__foo__iregex=r'^bAr$').exists(), True) def test_key_quoted_string(self): self.assertEqual( NullableJSONModel.objects.filter(value__o='"quoted"').get(), self.objs[4], ) @skipUnlessDBFeature('has_json_operators') def test_key_sql_injection(self): with CaptureQueriesContext(connection) as queries: self.assertIs( NullableJSONModel.objects.filter(**{ """value__test' = '"a"') OR 1 = 1 OR ('d""": 'x', }).exists(), False, ) self.assertIn( """."value" -> 'test'' = ''"a"'') OR 1 = 1 OR (''d') = '"x"' """, queries[0]['sql'], ) @skipIfDBFeature('has_json_operators') def test_key_sql_injection_escape(self): query = str(JSONModel.objects.filter(**{ """value__test") = '"a"' OR 1 = 1 OR ("d""": 'x', }).query) self.assertIn('"test\\"', query) self.assertIn('\\"d', query) def test_key_escape(self): obj = NullableJSONModel.objects.create(value={'%total': 10}) self.assertEqual(NullableJSONModel.objects.filter(**{'value__%total': 10}).get(), obj) def test_none_key_and_exact_lookup(self): self.assertSequenceEqual( NullableJSONModel.objects.filter(value__a='b', value__j=None), [self.objs[4]], ) def test_lookups_with_key_transform(self): tests = ( ('value__baz__has_key', 'c'), ('value__baz__has_keys', ['a', 'c']), ('value__baz__has_any_keys', ['a', 'x']), ('value__has_key', KeyTextTransform('foo', 'value')), ) for lookup, value in tests: with self.subTest(lookup=lookup): self.assertIs(NullableJSONModel.objects.filter( **{lookup: value}, ).exists(), True) @skipUnlessDBFeature('supports_json_field_contains') def test_contains_contained_by_with_key_transform(self): tests = [ ('value__d__contains', 'e'), ('value__d__contains', [{'f': 'g'}]), ('value__contains', KeyTransform('bax', 'value')), ('value__contains', F('value__bax')), ('value__baz__contains', {'a': 'b'}), ('value__baz__contained_by', {'a': 'b', 'c': 'd', 'e': 'f'}), ( 'value__contained_by', KeyTransform('x', RawSQL( self.raw_sql, ['{"x": {"a": "b", "c": 1, "d": "e"}}'], )), ), ] # For databases where {'f': 'g'} (without surrounding []) matches # [{'f': 'g'}]. if not connection.features.json_key_contains_list_matching_requires_list: tests.append(('value__d__contains', {'f': 'g'})) for lookup, value in tests: with self.subTest(lookup=lookup, value=value): self.assertIs(NullableJSONModel.objects.filter( **{lookup: value}, ).exists(), True) def test_join_key_transform_annotation_expression(self): related_obj = RelatedJSONModel.objects.create( value={'d': ['f', 'e']}, json_model=self.objs[4], ) RelatedJSONModel.objects.create( value={'d': ['e', 'f']}, json_model=self.objs[4], ) self.assertSequenceEqual( RelatedJSONModel.objects.annotate( key=F('value__d'), related_key=F('json_model__value__d'), chain=F('key__1'), expr=Cast('key', models.JSONField()), ).filter(chain=F('related_key__0')), [related_obj], )
bacbd63d413a5709e1c40eb7c8577c3c09419256b460693fe99588cb9face828
import os import sys from distutils.sysconfig import get_python_lib from setuptools import setup # Warn if we are installing over top of an existing installation. This can # cause issues where files that were deleted from a more recent Django are # still present in site-packages. See #18115. overlay_warning = False if "install" in sys.argv: lib_paths = [get_python_lib()] if lib_paths[0].startswith("/usr/lib/"): # We have to try also with an explicit prefix of /usr/local in order to # catch Debian's custom user site-packages directory. lib_paths.append(get_python_lib(prefix="/usr/local")) for lib_path in lib_paths: existing_path = os.path.abspath(os.path.join(lib_path, "django")) if os.path.exists(existing_path): # We note the need for the warning here, but present it after the # command is run, so it's more likely to be seen. overlay_warning = True break setup() if overlay_warning: sys.stderr.write(""" ======== WARNING! ======== You have just installed Django over top of an existing installation, without removing it first. Because of this, your install may now include extraneous files from a previous version that have since been removed from Django. This is known to cause a variety of problems. You should manually remove the %(existing_path)s directory and re-install Django. """ % {"existing_path": existing_path})
979af634b0a2fe07bacb6e800943895c5eb32640f7b697e648389bacf8aa4261
#!/usr/bin/env python import argparse import atexit import copy import gc import os import shutil import socket import subprocess import sys import tempfile import warnings from pathlib import Path try: import django except ImportError as e: raise RuntimeError( 'Django module not found, reference tests/README.rst for instructions.' ) from e else: from django.apps import apps from django.conf import settings from django.db import connection, connections from django.test import TestCase, TransactionTestCase from django.test.runner import parallel_type from django.test.selenium import SeleniumTestCaseBase from django.test.utils import NullTimeKeeper, TimeKeeper, get_runner from django.utils.deprecation import ( RemovedInDjango41Warning, RemovedInDjango50Warning, ) from django.utils.log import DEFAULT_LOGGING try: import MySQLdb except ImportError: pass else: # Ignore informational warnings from QuerySet.explain(). warnings.filterwarnings('ignore', r'\(1003, *', category=MySQLdb.Warning) # Make deprecation warnings errors to ensure no usage of deprecated features. warnings.simplefilter('error', RemovedInDjango50Warning) warnings.simplefilter('error', RemovedInDjango41Warning) # Make resource and runtime warning errors to ensure no usage of error prone # patterns. warnings.simplefilter("error", ResourceWarning) warnings.simplefilter("error", RuntimeWarning) # Ignore known warnings in test dependencies. warnings.filterwarnings("ignore", "'U' mode is deprecated", DeprecationWarning, module='docutils.io') # RemovedInDjango41Warning: Ignore MemcachedCache deprecation warning. warnings.filterwarnings( 'ignore', 'MemcachedCache is deprecated', category=RemovedInDjango41Warning, ) # Reduce garbage collection frequency to improve performance. Since CPython # uses refcounting, garbage collection only collects objects with cyclic # references, which are a minority, so the garbage collection threshold can be # larger than the default threshold of 700 allocations + deallocations without # much increase in memory usage. gc.set_threshold(100_000) RUNTESTS_DIR = os.path.abspath(os.path.dirname(__file__)) TEMPLATE_DIR = os.path.join(RUNTESTS_DIR, 'templates') # Create a specific subdirectory for the duration of the test suite. TMPDIR = tempfile.mkdtemp(prefix='django_') # Set the TMPDIR environment variable in addition to tempfile.tempdir # so that children processes inherit it. tempfile.tempdir = os.environ['TMPDIR'] = TMPDIR # Removing the temporary TMPDIR. atexit.register(shutil.rmtree, TMPDIR) # This is a dict mapping RUNTESTS_DIR subdirectory to subdirectories of that # directory to skip when searching for test modules. SUBDIRS_TO_SKIP = { '': {'import_error_package', 'test_runner_apps'}, 'gis_tests': {'data'}, } ALWAYS_INSTALLED_APPS = [ 'django.contrib.contenttypes', 'django.contrib.auth', 'django.contrib.sites', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.admin.apps.SimpleAdminConfig', 'django.contrib.staticfiles', ] ALWAYS_MIDDLEWARE = [ 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ] # Need to add the associated contrib app to INSTALLED_APPS in some cases to # avoid "RuntimeError: Model class X doesn't declare an explicit app_label # and isn't in an application in INSTALLED_APPS." CONTRIB_TESTS_TO_APPS = { 'deprecation': ['django.contrib.flatpages', 'django.contrib.redirects'], 'flatpages_tests': ['django.contrib.flatpages'], 'redirects_tests': ['django.contrib.redirects'], } def get_test_modules(gis_enabled): """ Scan the tests directory and yield the names of all test modules. The yielded names have either one dotted part like "test_runner" or, in the case of GIS tests, two dotted parts like "gis_tests.gdal_tests". """ discovery_dirs = [''] if gis_enabled: # GIS tests are in nested apps discovery_dirs.append('gis_tests') else: SUBDIRS_TO_SKIP[''].add('gis_tests') for dirname in discovery_dirs: dirpath = os.path.join(RUNTESTS_DIR, dirname) subdirs_to_skip = SUBDIRS_TO_SKIP[dirname] with os.scandir(dirpath) as entries: for f in entries: if ( '.' in f.name or os.path.basename(f.name) in subdirs_to_skip or f.is_file() or not os.path.exists(os.path.join(f.path, '__init__.py')) ): continue test_module = f.name if dirname: test_module = dirname + '.' + test_module yield test_module def get_label_module(label): """Return the top-level module part for a test label.""" path = Path(label) if len(path.parts) == 1: # Interpret the label as a dotted module name. return label.split('.')[0] # Otherwise, interpret the label as a path. Check existence first to # provide a better error message than relative_to() if it doesn't exist. if not path.exists(): raise RuntimeError(f'Test label path {label} does not exist') path = path.resolve() rel_path = path.relative_to(RUNTESTS_DIR) return rel_path.parts[0] def get_filtered_test_modules(start_at, start_after, gis_enabled, test_labels=None): if test_labels is None: test_labels = [] # Reduce each test label to just the top-level module part. label_modules = set() for label in test_labels: test_module = get_label_module(label) label_modules.add(test_module) # It would be nice to put this validation earlier but it must come after # django.setup() so that connection.features.gis_enabled can be accessed. if 'gis_tests' in label_modules and not gis_enabled: print('Aborting: A GIS database backend is required to run gis_tests.') sys.exit(1) def _module_match_label(module_name, label): # Exact or ancestor match. return module_name == label or module_name.startswith(label + '.') start_label = start_at or start_after for test_module in get_test_modules(gis_enabled): if start_label: if not _module_match_label(test_module, start_label): continue start_label = '' if not start_at: assert start_after # Skip the current one before starting. continue # If the module (or an ancestor) was named on the command line, or # no modules were named (i.e., run all), include the test module. if not test_labels or any( _module_match_label(test_module, label_module) for label_module in label_modules ): yield test_module def setup_collect_tests(start_at, start_after, test_labels=None): state = { 'INSTALLED_APPS': settings.INSTALLED_APPS, 'ROOT_URLCONF': getattr(settings, "ROOT_URLCONF", ""), 'TEMPLATES': settings.TEMPLATES, 'LANGUAGE_CODE': settings.LANGUAGE_CODE, 'STATIC_URL': settings.STATIC_URL, 'STATIC_ROOT': settings.STATIC_ROOT, 'MIDDLEWARE': settings.MIDDLEWARE, } # Redirect some settings for the duration of these tests. settings.INSTALLED_APPS = ALWAYS_INSTALLED_APPS settings.ROOT_URLCONF = 'urls' settings.STATIC_URL = 'static/' settings.STATIC_ROOT = os.path.join(TMPDIR, 'static') settings.TEMPLATES = [{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [TEMPLATE_DIR], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }] settings.LANGUAGE_CODE = 'en' settings.SITE_ID = 1 settings.MIDDLEWARE = ALWAYS_MIDDLEWARE settings.MIGRATION_MODULES = { # This lets us skip creating migrations for the test models as many of # them depend on one of the following contrib applications. 'auth': None, 'contenttypes': None, 'sessions': None, } log_config = copy.deepcopy(DEFAULT_LOGGING) # Filter out non-error logging so we don't have to capture it in lots of # tests. log_config['loggers']['django']['level'] = 'ERROR' settings.LOGGING = log_config settings.SILENCED_SYSTEM_CHECKS = [ 'fields.W342', # ForeignKey(unique=True) -> OneToOneField ] # Load all the ALWAYS_INSTALLED_APPS. django.setup() # This flag must be evaluated after django.setup() because otherwise it can # raise AppRegistryNotReady when running gis_tests in isolation on some # backends (e.g. PostGIS). gis_enabled = connection.features.gis_enabled test_modules = list(get_filtered_test_modules( start_at, start_after, gis_enabled, test_labels=test_labels, )) return test_modules, state def teardown_collect_tests(state): # Restore the old settings. for key, value in state.items(): setattr(settings, key, value) def get_installed(): return [app_config.name for app_config in apps.get_app_configs()] # This function should be called only after calling django.setup(), # since it calls connection.features.gis_enabled. def get_apps_to_install(test_modules): for test_module in test_modules: if test_module in CONTRIB_TESTS_TO_APPS: yield from CONTRIB_TESTS_TO_APPS[test_module] yield test_module # Add contrib.gis to INSTALLED_APPS if needed (rather than requiring # @override_settings(INSTALLED_APPS=...) on all test cases. if connection.features.gis_enabled: yield 'django.contrib.gis' def setup_run_tests(verbosity, start_at, start_after, test_labels=None): test_modules, state = setup_collect_tests(start_at, start_after, test_labels=test_labels) installed_apps = set(get_installed()) for app in get_apps_to_install(test_modules): if app in installed_apps: continue if verbosity >= 2: print(f'Importing application {app}') settings.INSTALLED_APPS.append(app) installed_apps.add(app) apps.set_installed_apps(settings.INSTALLED_APPS) # Force declaring available_apps in TransactionTestCase for faster tests. def no_available_apps(self): raise Exception( 'Please define available_apps in TransactionTestCase and its ' 'subclasses.' ) TransactionTestCase.available_apps = property(no_available_apps) TestCase.available_apps = None # Set an environment variable that other code may consult to see if # Django's own test suite is running. os.environ['RUNNING_DJANGOS_TEST_SUITE'] = 'true' test_labels = test_labels or test_modules return test_labels, state def teardown_run_tests(state): teardown_collect_tests(state) # Discard the multiprocessing.util finalizer that tries to remove a # temporary directory that's already removed by this script's # atexit.register(shutil.rmtree, TMPDIR) handler. Prevents # FileNotFoundError at the end of a test run (#27890). from multiprocessing.util import _finalizer_registry _finalizer_registry.pop((-100, 0), None) del os.environ['RUNNING_DJANGOS_TEST_SUITE'] def actual_test_processes(parallel): if parallel == 0: # This doesn't work before django.setup() on some databases. if all(conn.features.can_clone_databases for conn in connections.all()): return parallel_type('auto') else: return 1 else: return parallel class ActionSelenium(argparse.Action): """ Validate the comma-separated list of requested browsers. """ def __call__(self, parser, namespace, values, option_string=None): browsers = values.split(',') for browser in browsers: try: SeleniumTestCaseBase.import_webdriver(browser) except ImportError: raise argparse.ArgumentError(self, "Selenium browser specification '%s' is not valid." % browser) setattr(namespace, self.dest, browsers) def django_tests(verbosity, interactive, failfast, keepdb, reverse, test_labels, debug_sql, parallel, tags, exclude_tags, test_name_patterns, start_at, start_after, pdb, buffer, timing, shuffle): actual_parallel = actual_test_processes(parallel) if verbosity >= 1: msg = "Testing against Django installed in '%s'" % os.path.dirname(django.__file__) if actual_parallel > 1: msg += " with up to %d processes" % actual_parallel print(msg) test_labels, state = setup_run_tests(verbosity, start_at, start_after, test_labels) # Run the test suite, including the extra validation tests. if not hasattr(settings, 'TEST_RUNNER'): settings.TEST_RUNNER = 'django.test.runner.DiscoverRunner' TestRunner = get_runner(settings) test_runner = TestRunner( verbosity=verbosity, interactive=interactive, failfast=failfast, keepdb=keepdb, reverse=reverse, debug_sql=debug_sql, parallel=actual_parallel, tags=tags, exclude_tags=exclude_tags, test_name_patterns=test_name_patterns, pdb=pdb, buffer=buffer, timing=timing, shuffle=shuffle, ) failures = test_runner.run_tests(test_labels) teardown_run_tests(state) return failures def collect_test_modules(start_at, start_after): test_modules, state = setup_collect_tests(start_at, start_after) teardown_collect_tests(state) return test_modules def get_subprocess_args(options): subprocess_args = [ sys.executable, __file__, '--settings=%s' % options.settings ] if options.failfast: subprocess_args.append('--failfast') if options.verbosity: subprocess_args.append('--verbosity=%s' % options.verbosity) if not options.interactive: subprocess_args.append('--noinput') if options.tags: subprocess_args.append('--tag=%s' % options.tags) if options.exclude_tags: subprocess_args.append('--exclude_tag=%s' % options.exclude_tags) if options.shuffle is not False: if options.shuffle is None: subprocess_args.append('--shuffle') else: subprocess_args.append('--shuffle=%s' % options.shuffle) return subprocess_args def bisect_tests(bisection_label, options, test_labels, start_at, start_after): if not test_labels: test_labels = collect_test_modules(start_at, start_after) print('***** Bisecting test suite: %s' % ' '.join(test_labels)) # Make sure the bisection point isn't in the test list # Also remove tests that need to be run in specific combinations for label in [bisection_label, 'model_inheritance_same_model_name']: try: test_labels.remove(label) except ValueError: pass subprocess_args = get_subprocess_args(options) iteration = 1 while len(test_labels) > 1: midpoint = len(test_labels) // 2 test_labels_a = test_labels[:midpoint] + [bisection_label] test_labels_b = test_labels[midpoint:] + [bisection_label] print('***** Pass %da: Running the first half of the test suite' % iteration) print('***** Test labels: %s' % ' '.join(test_labels_a)) failures_a = subprocess.run(subprocess_args + test_labels_a) print('***** Pass %db: Running the second half of the test suite' % iteration) print('***** Test labels: %s' % ' '.join(test_labels_b)) print('') failures_b = subprocess.run(subprocess_args + test_labels_b) if failures_a.returncode and not failures_b.returncode: print("***** Problem found in first half. Bisecting again...") iteration += 1 test_labels = test_labels_a[:-1] elif failures_b.returncode and not failures_a.returncode: print("***** Problem found in second half. Bisecting again...") iteration += 1 test_labels = test_labels_b[:-1] elif failures_a.returncode and failures_b.returncode: print("***** Multiple sources of failure found") break else: print("***** No source of failure found... try pair execution (--pair)") break if len(test_labels) == 1: print("***** Source of error: %s" % test_labels[0]) def paired_tests(paired_test, options, test_labels, start_at, start_after): if not test_labels: test_labels = collect_test_modules(start_at, start_after) print('***** Trying paired execution') # Make sure the constant member of the pair isn't in the test list # Also remove tests that need to be run in specific combinations for label in [paired_test, 'model_inheritance_same_model_name']: try: test_labels.remove(label) except ValueError: pass subprocess_args = get_subprocess_args(options) for i, label in enumerate(test_labels): print('***** %d of %d: Check test pairing with %s' % ( i + 1, len(test_labels), label)) failures = subprocess.call(subprocess_args + [label, paired_test]) if failures: print('***** Found problem pair with %s' % label) return print('***** No problem pair found') if __name__ == "__main__": parser = argparse.ArgumentParser(description="Run the Django test suite.") parser.add_argument( 'modules', nargs='*', metavar='module', help='Optional path(s) to test modules; e.g. "i18n" or ' '"i18n.tests.TranslationTests.test_lazy_objects".', ) parser.add_argument( '-v', '--verbosity', default=1, type=int, choices=[0, 1, 2, 3], help='Verbosity level; 0=minimal output, 1=normal output, 2=all output', ) parser.add_argument( '--noinput', action='store_false', dest='interactive', help='Tells Django to NOT prompt the user for input of any kind.', ) parser.add_argument( '--failfast', action='store_true', help='Tells Django to stop running the test suite after first failed test.', ) parser.add_argument( '--keepdb', action='store_true', help='Tells Django to preserve the test database between runs.', ) parser.add_argument( '--settings', help='Python path to settings module, e.g. "myproject.settings". If ' 'this isn\'t provided, either the DJANGO_SETTINGS_MODULE ' 'environment variable or "test_sqlite" will be used.', ) parser.add_argument( '--bisect', help='Bisect the test suite to discover a test that causes a test ' 'failure when combined with the named test.', ) parser.add_argument( '--pair', help='Run the test suite in pairs with the named test to find problem pairs.', ) parser.add_argument( '--shuffle', nargs='?', default=False, type=int, metavar='SEED', help=( 'Shuffle the order of test cases to help check that tests are ' 'properly isolated.' ), ) parser.add_argument( '--reverse', action='store_true', help='Sort test suites and test cases in opposite order to debug ' 'test side effects not apparent with normal execution lineup.', ) parser.add_argument( '--selenium', action=ActionSelenium, metavar='BROWSERS', help='A comma-separated list of browsers to run the Selenium tests against.', ) parser.add_argument( '--headless', action='store_true', help='Run selenium tests in headless mode, if the browser supports the option.', ) parser.add_argument( '--selenium-hub', help='A URL for a selenium hub instance to use in combination with --selenium.', ) parser.add_argument( '--external-host', default=socket.gethostname(), help='The external host that can be reached by the selenium hub instance when running Selenium ' 'tests via Selenium Hub.', ) parser.add_argument( '--debug-sql', action='store_true', help='Turn on the SQL query logger within tests.', ) try: default_parallel = int(os.environ['DJANGO_TEST_PROCESSES']) except KeyError: # actual_test_processes() converts this to "auto" later on. default_parallel = 0 parser.add_argument( '--parallel', nargs='?', const='auto', default=default_parallel, type=parallel_type, metavar='N', help=( 'Run tests using up to N parallel processes. Use the value "auto" ' 'to run one test process for each processor core.' ), ) parser.add_argument( '--tag', dest='tags', action='append', help='Run only tests with the specified tags. Can be used multiple times.', ) parser.add_argument( '--exclude-tag', dest='exclude_tags', action='append', help='Do not run tests with the specified tag. Can be used multiple times.', ) parser.add_argument( '--start-after', dest='start_after', help='Run tests starting after the specified top-level module.', ) parser.add_argument( '--start-at', dest='start_at', help='Run tests starting at the specified top-level module.', ) parser.add_argument( '--pdb', action='store_true', help='Runs the PDB debugger on error or failure.' ) parser.add_argument( '-b', '--buffer', action='store_true', help='Discard output of passing tests.', ) parser.add_argument( '--timing', action='store_true', help='Output timings, including database set up and total run time.', ) parser.add_argument( '-k', dest='test_name_patterns', action='append', help=( 'Only run test methods and classes matching test name pattern. ' 'Same as unittest -k option. Can be used multiple times.' ), ) options = parser.parse_args() using_selenium_hub = options.selenium and options.selenium_hub if options.selenium_hub and not options.selenium: parser.error('--selenium-hub and --external-host require --selenium to be used.') if using_selenium_hub and not options.external_host: parser.error('--selenium-hub and --external-host must be used together.') # Allow including a trailing slash on app_labels for tab completion convenience options.modules = [os.path.normpath(labels) for labels in options.modules] mutually_exclusive_options = [options.start_at, options.start_after, options.modules] enabled_module_options = [bool(option) for option in mutually_exclusive_options].count(True) if enabled_module_options > 1: print('Aborting: --start-at, --start-after, and test labels are mutually exclusive.') sys.exit(1) for opt_name in ['start_at', 'start_after']: opt_val = getattr(options, opt_name) if opt_val: if '.' in opt_val: print('Aborting: --%s must be a top-level module.' % opt_name.replace('_', '-')) sys.exit(1) setattr(options, opt_name, os.path.normpath(opt_val)) if options.settings: os.environ['DJANGO_SETTINGS_MODULE'] = options.settings else: os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_sqlite') options.settings = os.environ['DJANGO_SETTINGS_MODULE'] if options.selenium: if not options.tags: options.tags = ['selenium'] elif 'selenium' not in options.tags: options.tags.append('selenium') if options.selenium_hub: SeleniumTestCaseBase.selenium_hub = options.selenium_hub SeleniumTestCaseBase.external_host = options.external_host SeleniumTestCaseBase.headless = options.headless SeleniumTestCaseBase.browsers = options.selenium if options.bisect: bisect_tests( options.bisect, options, options.modules, options.start_at, options.start_after, ) elif options.pair: paired_tests( options.pair, options, options.modules, options.start_at, options.start_after, ) else: time_keeper = TimeKeeper() if options.timing else NullTimeKeeper() with time_keeper.timed('Total run'): failures = django_tests( options.verbosity, options.interactive, options.failfast, options.keepdb, options.reverse, options.modules, options.debug_sql, options.parallel, options.tags, options.exclude_tags, getattr(options, 'test_name_patterns', None), options.start_at, options.start_after, options.pdb, options.buffer, options.timing, options.shuffle, ) time_keeper.print_results() if failures: sys.exit(1)
f2750117059b25e41144b8d993f2c484bdfdc7b2118bcd6bfb60afdddd954401
# This is an example test settings file for use with the Django test suite. # # The 'sqlite3' backend requires only the ENGINE setting (an in- # memory database will be used). All other backends will require a # NAME and potentially authentication information. See the # following section in the docs for more information: # # https://docs.djangoproject.com/en/dev/internals/contributing/writing-code/unit-tests/ # # The different databases that Django supports behave differently in certain # situations, so it is recommended to run the test suite against as many # database backends as possible. You may want to create a separate settings # file for each of the backends you test against. DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', }, 'other': { 'ENGINE': 'django.db.backends.sqlite3', } } SECRET_KEY = "django_tests_secret_key" # Use a fast hasher to speed up tests. PASSWORD_HASHERS = [ 'django.contrib.auth.hashers.MD5PasswordHasher', ] DEFAULT_AUTO_FIELD = 'django.db.models.AutoField' USE_TZ = False
23e573f134fe6144cc0ea89451c9e96c57e05764488d02f5bedf8f5735e713c8
# Django documentation build configuration file, created by # sphinx-quickstart on Thu Mar 27 09:06:53 2008. # # This file is execfile()d with the current directory set to its containing dir. # # The contents of this file are pickled, so don't put values in the namespace # that aren't picklable (module imports are okay, they're removed automatically). # # All configuration values have a default; values that are commented out # serve to show the default. import sys from os.path import abspath, dirname, join # Workaround for sphinx-build recursion limit overflow: # pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL) # RuntimeError: maximum recursion depth exceeded while pickling an object # # Python's default allowed recursion depth is 1000 but this isn't enough for # building docs/ref/settings.txt sometimes. # https://groups.google.com/g/sphinx-dev/c/MtRf64eGtv4/discussion sys.setrecursionlimit(2000) # Make sure we get the version of this copy of Django sys.path.insert(1, dirname(dirname(abspath(__file__)))) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(abspath(join(dirname(__file__), "_ext"))) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.6.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ "djangodocs", 'sphinx.ext.extlinks', "sphinx.ext.intersphinx", "sphinx.ext.viewcode", "sphinx.ext.autosectionlabel", ] # AutosectionLabel settings. # Uses a <page>:<label> schema which doesn't work for duplicate sub-section # labels, so set max depth. autosectionlabel_prefix_document = True autosectionlabel_maxdepth = 2 # Linkcheck settings. linkcheck_ignore = [ # Special-use addresses and domain names. (RFC 6761/6890) r'^https?://(?:127\.0\.0\.1|\[::1\])(?::\d+)?/', r'^https?://(?:[^/\.]+\.)*example\.(?:com|net|org)(?::\d+)?/', r'^https?://(?:[^/\.]+\.)*(?:example|invalid|localhost|test)(?::\d+)?/', # Pages that are inaccessible because they require authentication. r'^https://github\.com/[^/]+/[^/]+/fork', r'^https://code\.djangoproject\.com/github/login', r'^https://code\.djangoproject\.com/newticket', r'^https://(?:code|www)\.djangoproject\.com/admin/', r'^https://www\.djangoproject\.com/community/add/blogs/', r'^https://www\.google\.com/webmasters/tools/ping', r'^https://search\.google\.com/search-console/welcome', # Fragments used to dynamically switch content or populate fields. r'^https://web\.libera\.chat/#', r'^https://github\.com/[^#]+#L\d+-L\d+$', r'^https://help\.apple\.com/itc/podcasts_connect/#/itc', # Anchors on certain pages with missing a[name] attributes. r'^https://tools\.ietf\.org/html/rfc1123\.html#section-', ] # Spelling check needs an additional module that is not installed by default. # Add it only if spelling check is requested so docs can be generated without it. if 'spelling' in sys.argv: extensions.append("sphinxcontrib.spelling") # Spelling language. spelling_lang = 'en_US' # Location of word list. spelling_word_list_filename = 'spelling_wordlist' spelling_warning = True # Add any paths that contain templates here, relative to this directory. # templates_path = [] # The suffix of source filenames. source_suffix = '.txt' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'contents' # General substitutions. project = 'Django' copyright = 'Django Software Foundation and contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '4.0' # The full version, including alpha/beta/rc tags. try: from django import VERSION, get_version except ImportError: release = version else: def django_release(): pep440ver = get_version() if VERSION[3:5] == ('alpha', 0) and 'dev' not in pep440ver: return pep440ver + '.dev' return pep440ver release = django_release() # The "development version" of Django django_next_version = '4.0' extlinks = { 'bpo': ('https://bugs.python.org/issue%s', 'bpo-'), 'commit': ('https://github.com/django/django/commit/%s', ''), 'cve': ('https://nvd.nist.gov/vuln/detail/CVE-%s', 'CVE-'), # A file or directory. GitHub redirects from blob to tree if needed. 'source': ('https://github.com/django/django/blob/main/%s', ''), 'ticket': ('https://code.djangoproject.com/ticket/%s', '#'), } # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # Location for .po/.mo translation files used when language is set locale_dirs = ['locale/'] # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build', '_theme', 'requirements.txt'] # The reST default role (used for this markup: `text`) to use for all documents. default_role = "default-role-error" # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'trac' # Links to Python's docs should reference the most recent version of the 3.x # branch, which is located at this URL. intersphinx_mapping = { 'python': ('https://docs.python.org/3/', None), 'sphinx': ('https://www.sphinx-doc.org/en/master/', None), 'psycopg2': ('https://www.psycopg.org/docs/', None), } # Python's docs don't change every week. intersphinx_cache_limit = 90 # days # The 'versionadded' and 'versionchanged' directives are overridden. suppress_warnings = ['app.add_directive'] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "djangodocs" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ["_theme"] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # Content template for the index page. # html_index = '' # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Djangodoc' modindex_common_prefix = ["django."] # Appended to every page rst_epilog = """ .. |django-users| replace:: :ref:`django-users <django-users-mailing-list>` .. |django-developers| replace:: :ref:`django-developers <django-developers-mailing-list>` .. |django-announce| replace:: :ref:`django-announce <django-announce-mailing-list>` .. |django-updates| replace:: :ref:`django-updates <django-updates-mailing-list>` """ # -- Options for LaTeX output -------------------------------------------------- latex_elements = { 'preamble': ( '\\DeclareUnicodeCharacter{2264}{\\ensuremath{\\le}}' '\\DeclareUnicodeCharacter{2265}{\\ensuremath{\\ge}}' '\\DeclareUnicodeCharacter{2665}{[unicode-heart]}' '\\DeclareUnicodeCharacter{2713}{[unicode-checkmark]}' ), } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). # latex_documents = [] latex_documents = [ ('contents', 'django.tex', 'Django Documentation', 'Django Software Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [( 'ref/django-admin', 'django-admin', 'Utility script for the Django web framework', ['Django Software Foundation'], 1 )] # -- Options for Texinfo output ------------------------------------------------ # List of tuples (startdocname, targetname, title, author, dir_entry, # description, category, toctree_only) texinfo_documents = [( master_doc, "django", "", "", "Django", "Documentation of the Django framework", "Web development", False )] # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. epub_title = project epub_author = 'Django Software Foundation' epub_publisher = 'Django Software Foundation' epub_copyright = copyright # The basename for the epub file. It defaults to the project name. # epub_basename = 'Django' # The HTML theme for the epub output. Since the default themes are not optimized # for small screen space, using the same theme for HTML and epub output is # usually not wise. This defaults to 'epub', a theme designed to save visual # space. epub_theme = 'djangodocs-epub' # The language of the text. It defaults to the language option # or en if the language is not set. # epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. # epub_scheme = '' # The unique identifier of the text. This can be an ISBN number # or the project homepage. # epub_identifier = '' # A unique identification for the text. # epub_uid = '' # A tuple containing the cover image and cover page html template filenames. epub_cover = ('', 'epub-cover.html') # A sequence of (type, uri, title) tuples for the guide element of content.opf. # epub_guide = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. # epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. # epub_post_files = [] # A list of files that should not be packed into the epub file. # epub_exclude_files = [] # The depth of the table of contents in toc.ncx. # epub_tocdepth = 3 # Allow duplicate toc entries. # epub_tocdup = True # Choose between 'default' and 'includehidden'. # epub_tocscope = 'default' # Fix unsupported image types using the PIL. # epub_fix_images = False # Scale large images. # epub_max_image_width = 0 # How to display URL addresses: 'footnote', 'no', or 'inline'. # epub_show_urls = 'inline' # If false, no index is generated. # epub_use_index = True
b9de15702c7e64b2a7619ae07c4583061a2da16669da5543fb062fbec26db78c
import logging import threading import weakref from django.utils.inspect import func_accepts_kwargs logger = logging.getLogger('django.dispatch') def _make_id(target): if hasattr(target, '__func__'): return (id(target.__self__), id(target.__func__)) return id(target) NONE_ID = _make_id(None) # A marker for caching NO_RECEIVERS = object() class Signal: """ Base class for all signals Internal attributes: receivers { receiverkey (id) : weakref(receiver) } """ def __init__(self, use_caching=False): """ Create a new signal. """ self.receivers = [] self.lock = threading.Lock() self.use_caching = use_caching # For convenience we create empty caches even if they are not used. # A note about caching: if use_caching is defined, then for each # distinct sender we cache the receivers that sender has in # 'sender_receivers_cache'. The cache is cleaned when .connect() or # .disconnect() is called and populated on send(). self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {} self._dead_receivers = False def connect(self, receiver, sender=None, weak=True, dispatch_uid=None): """ Connect receiver to sender for signal. Arguments: receiver A function or an instance method which is to receive signals. Receivers must be hashable objects. If weak is True, then receiver must be weak referenceable. Receivers must be able to accept keyword arguments. If a receiver is connected with a dispatch_uid argument, it will not be added if another receiver was already connected with that dispatch_uid. sender The sender to which the receiver should respond. Must either be a Python object, or None to receive events from any sender. weak Whether to use weak references to the receiver. By default, the module will attempt to use weak references to the receiver objects. If this parameter is false, then strong references will be used. dispatch_uid An identifier used to uniquely identify a particular instance of a receiver. This will usually be a string, though it may be anything hashable. """ from django.conf import settings # If DEBUG is on, check that we got a good receiver if settings.configured and settings.DEBUG: if not callable(receiver): raise TypeError('Signal receivers must be callable.') # Check for **kwargs if not func_accepts_kwargs(receiver): raise ValueError("Signal receivers must accept keyword arguments (**kwargs).") if dispatch_uid: lookup_key = (dispatch_uid, _make_id(sender)) else: lookup_key = (_make_id(receiver), _make_id(sender)) if weak: ref = weakref.ref receiver_object = receiver # Check for bound methods if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'): ref = weakref.WeakMethod receiver_object = receiver.__self__ receiver = ref(receiver) weakref.finalize(receiver_object, self._remove_receiver) with self.lock: self._clear_dead_receivers() if not any(r_key == lookup_key for r_key, _ in self.receivers): self.receivers.append((lookup_key, receiver)) self.sender_receivers_cache.clear() def disconnect(self, receiver=None, sender=None, dispatch_uid=None): """ Disconnect receiver from sender for signal. If weak references are used, disconnect need not be called. The receiver will be removed from dispatch automatically. Arguments: receiver The registered receiver to disconnect. May be none if dispatch_uid is specified. sender The registered sender to disconnect dispatch_uid the unique identifier of the receiver to disconnect """ if dispatch_uid: lookup_key = (dispatch_uid, _make_id(sender)) else: lookup_key = (_make_id(receiver), _make_id(sender)) disconnected = False with self.lock: self._clear_dead_receivers() for index in range(len(self.receivers)): (r_key, _) = self.receivers[index] if r_key == lookup_key: disconnected = True del self.receivers[index] break self.sender_receivers_cache.clear() return disconnected def has_listeners(self, sender=None): return bool(self._live_receivers(sender)) def send(self, sender, **named): """ Send signal from sender to all connected receivers. If any receiver raises an error, the error propagates back through send, terminating the dispatch loop. So it's possible that all receivers won't be called if an error is raised. Arguments: sender The sender of the signal. Either a specific object or None. named Named arguments which will be passed to receivers. Return a list of tuple pairs [(receiver, response), ... ]. """ if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS: return [] return [ (receiver, receiver(signal=self, sender=sender, **named)) for receiver in self._live_receivers(sender) ] def send_robust(self, sender, **named): """ Send signal from sender to all connected receivers catching errors. Arguments: sender The sender of the signal. Can be any Python object (normally one registered with a connect if you actually want something to occur). named Named arguments which will be passed to receivers. Return a list of tuple pairs [(receiver, response), ... ]. If any receiver raises an error (specifically any subclass of Exception), return the error instance as the result for that receiver. """ if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS: return [] # Call each receiver with whatever arguments it can accept. # Return a list of tuple pairs [(receiver, response), ... ]. responses = [] for receiver in self._live_receivers(sender): try: response = receiver(signal=self, sender=sender, **named) except Exception as err: logger.error( 'Error calling %s in Signal.send_robust() (%s)', receiver.__qualname__, err, exc_info=err, ) responses.append((receiver, err)) else: responses.append((receiver, response)) return responses def _clear_dead_receivers(self): # Note: caller is assumed to hold self.lock. if self._dead_receivers: self._dead_receivers = False self.receivers = [ r for r in self.receivers if not(isinstance(r[1], weakref.ReferenceType) and r[1]() is None) ] def _live_receivers(self, sender): """ Filter sequence of receivers to get resolved, live receivers. This checks for weak references and resolves them, then returning only live receivers. """ receivers = None if self.use_caching and not self._dead_receivers: receivers = self.sender_receivers_cache.get(sender) # We could end up here with NO_RECEIVERS even if we do check this case in # .send() prior to calling _live_receivers() due to concurrent .send() call. if receivers is NO_RECEIVERS: return [] if receivers is None: with self.lock: self._clear_dead_receivers() senderkey = _make_id(sender) receivers = [] for (receiverkey, r_senderkey), receiver in self.receivers: if r_senderkey == NONE_ID or r_senderkey == senderkey: receivers.append(receiver) if self.use_caching: if not receivers: self.sender_receivers_cache[sender] = NO_RECEIVERS else: # Note, we must cache the weakref versions. self.sender_receivers_cache[sender] = receivers non_weak_receivers = [] for receiver in receivers: if isinstance(receiver, weakref.ReferenceType): # Dereference the weak reference. receiver = receiver() if receiver is not None: non_weak_receivers.append(receiver) else: non_weak_receivers.append(receiver) return non_weak_receivers def _remove_receiver(self, receiver=None): # Mark that the self.receivers list has dead weakrefs. If so, we will # clean those up in connect, disconnect and _live_receivers while # holding self.lock. Note that doing the cleanup here isn't a good # idea, _remove_receiver() will be called as side effect of garbage # collection, and so the call can happen while we are already holding # self.lock. self._dead_receivers = True def receiver(signal, **kwargs): """ A decorator for connecting receivers to signals. Used by passing in the signal (or list of signals) and keyword arguments to connect:: @receiver(post_save, sender=MyModel) def signal_receiver(sender, **kwargs): ... @receiver([post_save, post_delete], sender=MyModel) def signals_receiver(sender, **kwargs): ... """ def _decorator(func): if isinstance(signal, (list, tuple)): for s in signal: s.connect(func, **kwargs) else: signal.connect(func, **kwargs) return func return _decorator
9b25073c5d0c4efd344368f8464c66cf7d310c9fb59b8bc91b063d74122883b2
import argparse import ctypes import faulthandler import hashlib import io import itertools import logging import multiprocessing import os import pickle import random import sys import textwrap import unittest import warnings from collections import defaultdict from contextlib import contextmanager from importlib import import_module from io import StringIO from django.core.management import call_command from django.db import connections from django.test import SimpleTestCase, TestCase from django.test.utils import ( NullTimeKeeper, TimeKeeper, iter_test_cases, setup_databases as _setup_databases, setup_test_environment, teardown_databases as _teardown_databases, teardown_test_environment, ) from django.utils.datastructures import OrderedSet from django.utils.deprecation import RemovedInDjango50Warning try: import ipdb as pdb except ImportError: import pdb try: import tblib.pickling_support except ImportError: tblib = None class DebugSQLTextTestResult(unittest.TextTestResult): def __init__(self, stream, descriptions, verbosity): self.logger = logging.getLogger('django.db.backends') self.logger.setLevel(logging.DEBUG) self.debug_sql_stream = None super().__init__(stream, descriptions, verbosity) def startTest(self, test): self.debug_sql_stream = StringIO() self.handler = logging.StreamHandler(self.debug_sql_stream) self.logger.addHandler(self.handler) super().startTest(test) def stopTest(self, test): super().stopTest(test) self.logger.removeHandler(self.handler) if self.showAll: self.debug_sql_stream.seek(0) self.stream.write(self.debug_sql_stream.read()) self.stream.writeln(self.separator2) def addError(self, test, err): super().addError(test, err) if self.debug_sql_stream is None: # Error before tests e.g. in setUpTestData(). sql = '' else: self.debug_sql_stream.seek(0) sql = self.debug_sql_stream.read() self.errors[-1] = self.errors[-1] + (sql,) def addFailure(self, test, err): super().addFailure(test, err) self.debug_sql_stream.seek(0) self.failures[-1] = self.failures[-1] + (self.debug_sql_stream.read(),) def addSubTest(self, test, subtest, err): super().addSubTest(test, subtest, err) if err is not None: self.debug_sql_stream.seek(0) errors = self.failures if issubclass(err[0], test.failureException) else self.errors errors[-1] = errors[-1] + (self.debug_sql_stream.read(),) def printErrorList(self, flavour, errors): for test, err, sql_debug in errors: self.stream.writeln(self.separator1) self.stream.writeln("%s: %s" % (flavour, self.getDescription(test))) self.stream.writeln(self.separator2) self.stream.writeln(err) self.stream.writeln(self.separator2) self.stream.writeln(sql_debug) class PDBDebugResult(unittest.TextTestResult): """ Custom result class that triggers a PDB session when an error or failure occurs. """ def addError(self, test, err): super().addError(test, err) self.debug(err) def addFailure(self, test, err): super().addFailure(test, err) self.debug(err) def debug(self, error): self._restoreStdout() self.buffer = False exc_type, exc_value, traceback = error print("\nOpening PDB: %r" % exc_value) pdb.post_mortem(traceback) class DummyList: """ Dummy list class for faking storage of results in unittest.TestResult. """ __slots__ = () def append(self, item): pass class RemoteTestResult(unittest.TestResult): """ Extend unittest.TestResult to record events in the child processes so they can be replayed in the parent process. Events include things like which tests succeeded or failed. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Fake storage of results to reduce memory usage. These are used by the # unittest default methods, but here 'events' is used instead. dummy_list = DummyList() self.failures = dummy_list self.errors = dummy_list self.skipped = dummy_list self.expectedFailures = dummy_list self.unexpectedSuccesses = dummy_list if tblib is not None: tblib.pickling_support.install() self.events = [] def __getstate__(self): # Make this class picklable by removing the file-like buffer # attributes. This is possible since they aren't used after unpickling # after being sent to ParallelTestSuite. state = self.__dict__.copy() state.pop('_stdout_buffer', None) state.pop('_stderr_buffer', None) state.pop('_original_stdout', None) state.pop('_original_stderr', None) return state @property def test_index(self): return self.testsRun - 1 def _confirm_picklable(self, obj): """ Confirm that obj can be pickled and unpickled as multiprocessing will need to pickle the exception in the child process and unpickle it in the parent process. Let the exception rise, if not. """ pickle.loads(pickle.dumps(obj)) def _print_unpicklable_subtest(self, test, subtest, pickle_exc): print(""" Subtest failed: test: {} subtest: {} Unfortunately, the subtest that failed cannot be pickled, so the parallel test runner cannot handle it cleanly. Here is the pickling error: > {} You should re-run this test with --parallel=1 to reproduce the failure with a cleaner failure message. """.format(test, subtest, pickle_exc)) def check_picklable(self, test, err): # Ensure that sys.exc_info() tuples are picklable. This displays a # clear multiprocessing.pool.RemoteTraceback generated in the child # process instead of a multiprocessing.pool.MaybeEncodingError, making # the root cause easier to figure out for users who aren't familiar # with the multiprocessing module. Since we're in a forked process, # our best chance to communicate with them is to print to stdout. try: self._confirm_picklable(err) except Exception as exc: original_exc_txt = repr(err[1]) original_exc_txt = textwrap.fill(original_exc_txt, 75, initial_indent=' ', subsequent_indent=' ') pickle_exc_txt = repr(exc) pickle_exc_txt = textwrap.fill(pickle_exc_txt, 75, initial_indent=' ', subsequent_indent=' ') if tblib is None: print(""" {} failed: {} Unfortunately, tracebacks cannot be pickled, making it impossible for the parallel test runner to handle this exception cleanly. In order to see the traceback, you should install tblib: python -m pip install tblib """.format(test, original_exc_txt)) else: print(""" {} failed: {} Unfortunately, the exception it raised cannot be pickled, making it impossible for the parallel test runner to handle it cleanly. Here's the error encountered while trying to pickle the exception: {} You should re-run this test with the --parallel=1 option to reproduce the failure and get a correct traceback. """.format(test, original_exc_txt, pickle_exc_txt)) raise def check_subtest_picklable(self, test, subtest): try: self._confirm_picklable(subtest) except Exception as exc: self._print_unpicklable_subtest(test, subtest, exc) raise def startTestRun(self): super().startTestRun() self.events.append(('startTestRun',)) def stopTestRun(self): super().stopTestRun() self.events.append(('stopTestRun',)) def startTest(self, test): super().startTest(test) self.events.append(('startTest', self.test_index)) def stopTest(self, test): super().stopTest(test) self.events.append(('stopTest', self.test_index)) def addError(self, test, err): self.check_picklable(test, err) self.events.append(('addError', self.test_index, err)) super().addError(test, err) def addFailure(self, test, err): self.check_picklable(test, err) self.events.append(('addFailure', self.test_index, err)) super().addFailure(test, err) def addSubTest(self, test, subtest, err): # Follow Python's implementation of unittest.TestResult.addSubTest() by # not doing anything when a subtest is successful. if err is not None: # Call check_picklable() before check_subtest_picklable() since # check_picklable() performs the tblib check. self.check_picklable(test, err) self.check_subtest_picklable(test, subtest) self.events.append(('addSubTest', self.test_index, subtest, err)) super().addSubTest(test, subtest, err) def addSuccess(self, test): self.events.append(('addSuccess', self.test_index)) super().addSuccess(test) def addSkip(self, test, reason): self.events.append(('addSkip', self.test_index, reason)) super().addSkip(test, reason) def addExpectedFailure(self, test, err): # If tblib isn't installed, pickling the traceback will always fail. # However we don't want tblib to be required for running the tests # when they pass or fail as expected. Drop the traceback when an # expected failure occurs. if tblib is None: err = err[0], err[1], None self.check_picklable(test, err) self.events.append(('addExpectedFailure', self.test_index, err)) super().addExpectedFailure(test, err) def addUnexpectedSuccess(self, test): self.events.append(('addUnexpectedSuccess', self.test_index)) super().addUnexpectedSuccess(test) def wasSuccessful(self): """Tells whether or not this result was a success.""" failure_types = {'addError', 'addFailure', 'addSubTest', 'addUnexpectedSuccess'} return all(e[0] not in failure_types for e in self.events) def _exc_info_to_string(self, err, test): # Make this method no-op. It only powers the default unittest behavior # for recording errors, but this class pickles errors into 'events' # instead. return '' class RemoteTestRunner: """ Run tests and record everything but don't display anything. The implementation matches the unpythonic coding style of unittest2. """ resultclass = RemoteTestResult def __init__(self, failfast=False, resultclass=None, buffer=False): self.failfast = failfast self.buffer = buffer if resultclass is not None: self.resultclass = resultclass def run(self, test): result = self.resultclass() unittest.registerResult(result) result.failfast = self.failfast result.buffer = self.buffer test(result) return result def parallel_type(value): """Parse value passed to the --parallel option.""" # The current implementation of the parallel test runner requires # multiprocessing to start subprocesses with fork(). if multiprocessing.get_start_method() != 'fork': return 1 if value == 'auto': return multiprocessing.cpu_count() try: return int(value) except ValueError: raise argparse.ArgumentTypeError( f"{value!r} is not an integer or the string 'auto'" ) _worker_id = 0 def _init_worker(counter): """ Switch to databases dedicated to this worker. This helper lives at module-level because of the multiprocessing module's requirements. """ global _worker_id with counter.get_lock(): counter.value += 1 _worker_id = counter.value for alias in connections: connection = connections[alias] settings_dict = connection.creation.get_test_db_clone_settings(str(_worker_id)) # connection.settings_dict must be updated in place for changes to be # reflected in django.db.connections. If the following line assigned # connection.settings_dict = settings_dict, new threads would connect # to the default database instead of the appropriate clone. connection.settings_dict.update(settings_dict) connection.close() def _run_subsuite(args): """ Run a suite of tests with a RemoteTestRunner and return a RemoteTestResult. This helper lives at module-level and its arguments are wrapped in a tuple because of the multiprocessing module's requirements. """ runner_class, subsuite_index, subsuite, failfast, buffer = args runner = runner_class(failfast=failfast, buffer=buffer) result = runner.run(subsuite) return subsuite_index, result.events class ParallelTestSuite(unittest.TestSuite): """ Run a series of tests in parallel in several processes. While the unittest module's documentation implies that orchestrating the execution of tests is the responsibility of the test runner, in practice, it appears that TestRunner classes are more concerned with formatting and displaying test results. Since there are fewer use cases for customizing TestSuite than TestRunner, implementing parallelization at the level of the TestSuite improves interoperability with existing custom test runners. A single instance of a test runner can still collect results from all tests without being aware that they have been run in parallel. """ # In case someone wants to modify these in a subclass. init_worker = _init_worker run_subsuite = _run_subsuite runner_class = RemoteTestRunner def __init__(self, subsuites, processes, failfast=False, buffer=False): self.subsuites = subsuites self.processes = processes self.failfast = failfast self.buffer = buffer super().__init__() def run(self, result): """ Distribute test cases across workers. Return an identifier of each test case with its result in order to use imap_unordered to show results as soon as they're available. To minimize pickling errors when getting results from workers: - pass back numeric indexes in self.subsuites instead of tests - make tracebacks picklable with tblib, if available Even with tblib, errors may still occur for dynamically created exception classes which cannot be unpickled. """ counter = multiprocessing.Value(ctypes.c_int, 0) pool = multiprocessing.Pool( processes=self.processes, initializer=self.init_worker.__func__, initargs=[counter], ) args = [ (self.runner_class, index, subsuite, self.failfast, self.buffer) for index, subsuite in enumerate(self.subsuites) ] test_results = pool.imap_unordered(self.run_subsuite.__func__, args) while True: if result.shouldStop: pool.terminate() break try: subsuite_index, events = test_results.next(timeout=0.1) except multiprocessing.TimeoutError: continue except StopIteration: pool.close() break tests = list(self.subsuites[subsuite_index]) for event in events: event_name = event[0] handler = getattr(result, event_name, None) if handler is None: continue test = tests[event[1]] args = event[2:] handler(test, *args) pool.join() return result def __iter__(self): return iter(self.subsuites) class Shuffler: """ This class implements shuffling with a special consistency property. Consistency means that, for a given seed and key function, if two sets of items are shuffled, the resulting order will agree on the intersection of the two sets. For example, if items are removed from an original set, the shuffled order for the new set will be the shuffled order of the original set restricted to the smaller set. """ # This doesn't need to be cryptographically strong, so use what's fastest. hash_algorithm = 'md5' @classmethod def _hash_text(cls, text): h = hashlib.new(cls.hash_algorithm) h.update(text.encode('utf-8')) return h.hexdigest() def __init__(self, seed=None): if seed is None: # Limit seeds to 10 digits for simpler output. seed = random.randint(0, 10**10 - 1) seed_source = 'generated' else: seed_source = 'given' self.seed = seed self.seed_source = seed_source @property def seed_display(self): return f'{self.seed!r} ({self.seed_source})' def _hash_item(self, item, key): text = '{}{}'.format(self.seed, key(item)) return self._hash_text(text) def shuffle(self, items, key): """ Return a new list of the items in a shuffled order. The `key` is a function that accepts an item in `items` and returns a string unique for that item that can be viewed as a string id. The order of the return value is deterministic. It depends on the seed and key function but not on the original order. """ hashes = {} for item in items: hashed = self._hash_item(item, key) if hashed in hashes: msg = 'item {!r} has same hash {!r} as item {!r}'.format( item, hashed, hashes[hashed], ) raise RuntimeError(msg) hashes[hashed] = item return [hashes[hashed] for hashed in sorted(hashes)] class DiscoverRunner: """A Django test runner that uses unittest2 test discovery.""" test_suite = unittest.TestSuite parallel_test_suite = ParallelTestSuite test_runner = unittest.TextTestRunner test_loader = unittest.defaultTestLoader reorder_by = (TestCase, SimpleTestCase) def __init__(self, pattern=None, top_level=None, verbosity=1, interactive=True, failfast=False, keepdb=False, reverse=False, debug_mode=False, debug_sql=False, parallel=0, tags=None, exclude_tags=None, test_name_patterns=None, pdb=False, buffer=False, enable_faulthandler=True, timing=False, shuffle=False, **kwargs): self.pattern = pattern self.top_level = top_level self.verbosity = verbosity self.interactive = interactive self.failfast = failfast self.keepdb = keepdb self.reverse = reverse self.debug_mode = debug_mode self.debug_sql = debug_sql self.parallel = parallel self.tags = set(tags or []) self.exclude_tags = set(exclude_tags or []) if not faulthandler.is_enabled() and enable_faulthandler: try: faulthandler.enable(file=sys.stderr.fileno()) except (AttributeError, io.UnsupportedOperation): faulthandler.enable(file=sys.__stderr__.fileno()) self.pdb = pdb if self.pdb and self.parallel > 1: raise ValueError('You cannot use --pdb with parallel tests; pass --parallel=1 to use it.') self.buffer = buffer self.test_name_patterns = None self.time_keeper = TimeKeeper() if timing else NullTimeKeeper() if test_name_patterns: # unittest does not export the _convert_select_pattern function # that converts command-line arguments to patterns. self.test_name_patterns = { pattern if '*' in pattern else '*%s*' % pattern for pattern in test_name_patterns } self.shuffle = shuffle self._shuffler = None @classmethod def add_arguments(cls, parser): parser.add_argument( '-t', '--top-level-directory', dest='top_level', help='Top level of project for unittest discovery.', ) parser.add_argument( '-p', '--pattern', default="test*.py", help='The test matching pattern. Defaults to test*.py.', ) parser.add_argument( '--keepdb', action='store_true', help='Preserves the test DB between runs.' ) parser.add_argument( '--shuffle', nargs='?', default=False, type=int, metavar='SEED', help='Shuffles test case order.', ) parser.add_argument( '-r', '--reverse', action='store_true', help='Reverses test case order.', ) parser.add_argument( '--debug-mode', action='store_true', help='Sets settings.DEBUG to True.', ) parser.add_argument( '-d', '--debug-sql', action='store_true', help='Prints logged SQL queries on failure.', ) try: default_parallel = int(os.environ['DJANGO_TEST_PROCESSES']) except KeyError: default_parallel = 0 parser.add_argument( '--parallel', nargs='?', const='auto', default=default_parallel, type=parallel_type, metavar='N', help=( 'Run tests using up to N parallel processes. Use the value ' '"auto" to run one test process for each processor core.' ), ) parser.add_argument( '--tag', action='append', dest='tags', help='Run only tests with the specified tag. Can be used multiple times.', ) parser.add_argument( '--exclude-tag', action='append', dest='exclude_tags', help='Do not run tests with the specified tag. Can be used multiple times.', ) parser.add_argument( '--pdb', action='store_true', help='Runs a debugger (pdb, or ipdb if installed) on error or failure.' ) parser.add_argument( '-b', '--buffer', action='store_true', help='Discard output from passing tests.', ) parser.add_argument( '--no-faulthandler', action='store_false', dest='enable_faulthandler', help='Disables the Python faulthandler module during tests.', ) parser.add_argument( '--timing', action='store_true', help=( 'Output timings, including database set up and total run time.' ), ) parser.add_argument( '-k', action='append', dest='test_name_patterns', help=( 'Only run test methods and classes that match the pattern ' 'or substring. Can be used multiple times. Same as ' 'unittest -k option.' ), ) @property def shuffle_seed(self): if self._shuffler is None: return None return self._shuffler.seed def log(self, msg, level=None): """ Log the given message at the given logging level. A verbosity of 1 logs INFO (the default level) or above, and verbosity 2 or higher logs all levels. """ if self.verbosity <= 0 or ( self.verbosity == 1 and level is not None and level < logging.INFO ): return print(msg) def setup_test_environment(self, **kwargs): setup_test_environment(debug=self.debug_mode) unittest.installHandler() def setup_shuffler(self): if self.shuffle is False: return shuffler = Shuffler(seed=self.shuffle) self.log(f'Using shuffle seed: {shuffler.seed_display}') self._shuffler = shuffler @contextmanager def load_with_patterns(self): original_test_name_patterns = self.test_loader.testNamePatterns self.test_loader.testNamePatterns = self.test_name_patterns try: yield finally: # Restore the original patterns. self.test_loader.testNamePatterns = original_test_name_patterns def load_tests_for_label(self, label, discover_kwargs): label_as_path = os.path.abspath(label) tests = None # If a module, or "module.ClassName[.method_name]", just run those. if not os.path.exists(label_as_path): with self.load_with_patterns(): tests = self.test_loader.loadTestsFromName(label) if tests.countTestCases(): return tests # Try discovery if "label" is a package or directory. is_importable, is_package = try_importing(label) if is_importable: if not is_package: return tests elif not os.path.isdir(label_as_path): if os.path.exists(label_as_path): assert tests is None raise RuntimeError( f'One of the test labels is a path to a file: {label!r}, ' f'which is not supported. Use a dotted module name or ' f'path to a directory instead.' ) return tests kwargs = discover_kwargs.copy() if os.path.isdir(label_as_path) and not self.top_level: kwargs['top_level_dir'] = find_top_level(label_as_path) with self.load_with_patterns(): tests = self.test_loader.discover(start_dir=label, **kwargs) # Make unittest forget the top-level dir it calculated from this run, # to support running tests from two different top-levels. self.test_loader._top_level_dir = None return tests def build_suite(self, test_labels=None, extra_tests=None, **kwargs): if extra_tests is not None: warnings.warn( 'The extra_tests argument is deprecated.', RemovedInDjango50Warning, stacklevel=2, ) test_labels = test_labels or ['.'] extra_tests = extra_tests or [] discover_kwargs = {} if self.pattern is not None: discover_kwargs['pattern'] = self.pattern if self.top_level is not None: discover_kwargs['top_level_dir'] = self.top_level self.setup_shuffler() all_tests = [] for label in test_labels: tests = self.load_tests_for_label(label, discover_kwargs) all_tests.extend(iter_test_cases(tests)) all_tests.extend(iter_test_cases(extra_tests)) if self.tags or self.exclude_tags: if self.tags: self.log( 'Including test tag(s): %s.' % ', '.join(sorted(self.tags)), level=logging.DEBUG, ) if self.exclude_tags: self.log( 'Excluding test tag(s): %s.' % ', '.join(sorted(self.exclude_tags)), level=logging.DEBUG, ) all_tests = filter_tests_by_tags(all_tests, self.tags, self.exclude_tags) # Put the failures detected at load time first for quicker feedback. # _FailedTest objects include things like test modules that couldn't be # found or that couldn't be loaded due to syntax errors. test_types = (unittest.loader._FailedTest, *self.reorder_by) all_tests = list(reorder_tests( all_tests, test_types, shuffler=self._shuffler, reverse=self.reverse, )) self.log('Found %d test(s).' % len(all_tests)) suite = self.test_suite(all_tests) if self.parallel > 1: subsuites = partition_suite_by_case(suite) # Since tests are distributed across processes on a per-TestCase # basis, there's no need for more processes than TestCases. processes = min(self.parallel, len(subsuites)) if processes > 1: suite = self.parallel_test_suite( subsuites, processes, self.failfast, self.buffer, ) return suite def setup_databases(self, **kwargs): return _setup_databases( self.verbosity, self.interactive, time_keeper=self.time_keeper, keepdb=self.keepdb, debug_sql=self.debug_sql, parallel=self.parallel, **kwargs ) def get_resultclass(self): if self.debug_sql: return DebugSQLTextTestResult elif self.pdb: return PDBDebugResult def get_test_runner_kwargs(self): return { 'failfast': self.failfast, 'resultclass': self.get_resultclass(), 'verbosity': self.verbosity, 'buffer': self.buffer, } def run_checks(self, databases): # Checks are run after database creation since some checks require # database access. call_command('check', verbosity=self.verbosity, databases=databases) def run_suite(self, suite, **kwargs): kwargs = self.get_test_runner_kwargs() runner = self.test_runner(**kwargs) try: return runner.run(suite) finally: if self._shuffler is not None: seed_display = self._shuffler.seed_display self.log(f'Used shuffle seed: {seed_display}') def teardown_databases(self, old_config, **kwargs): """Destroy all the non-mirror databases.""" _teardown_databases( old_config, verbosity=self.verbosity, parallel=self.parallel, keepdb=self.keepdb, ) def teardown_test_environment(self, **kwargs): unittest.removeHandler() teardown_test_environment() def suite_result(self, suite, result, **kwargs): return len(result.failures) + len(result.errors) def _get_databases(self, suite): databases = {} for test in iter_test_cases(suite): test_databases = getattr(test, 'databases', None) if test_databases == '__all__': test_databases = connections if test_databases: serialized_rollback = getattr(test, 'serialized_rollback', False) databases.update( (alias, serialized_rollback or databases.get(alias, False)) for alias in test_databases ) return databases def get_databases(self, suite): databases = self._get_databases(suite) unused_databases = [alias for alias in connections if alias not in databases] if unused_databases: self.log( 'Skipping setup of unused database(s): %s.' % ', '.join(sorted(unused_databases)), level=logging.DEBUG, ) return databases def run_tests(self, test_labels, extra_tests=None, **kwargs): """ Run the unit tests for all the test labels in the provided list. Test labels should be dotted Python paths to test modules, test classes, or test methods. Return the number of tests that failed. """ if extra_tests is not None: warnings.warn( 'The extra_tests argument is deprecated.', RemovedInDjango50Warning, stacklevel=2, ) self.setup_test_environment() suite = self.build_suite(test_labels, extra_tests) databases = self.get_databases(suite) serialized_aliases = set( alias for alias, serialize in databases.items() if serialize ) with self.time_keeper.timed('Total database setup'): old_config = self.setup_databases( aliases=databases, serialized_aliases=serialized_aliases, ) run_failed = False try: self.run_checks(databases) result = self.run_suite(suite) except Exception: run_failed = True raise finally: try: with self.time_keeper.timed('Total database teardown'): self.teardown_databases(old_config) self.teardown_test_environment() except Exception: # Silence teardown exceptions if an exception was raised during # runs to avoid shadowing it. if not run_failed: raise self.time_keeper.print_results() return self.suite_result(suite, result) def try_importing(label): """ Try importing a test label, and return (is_importable, is_package). Relative labels like "." and ".." are seen as directories. """ try: mod = import_module(label) except (ImportError, TypeError): return (False, False) return (True, hasattr(mod, '__path__')) def find_top_level(top_level): # Try to be a bit smarter than unittest about finding the default top-level # for a given directory path, to avoid breaking relative imports. # (Unittest's default is to set top-level equal to the path, which means # relative imports will result in "Attempted relative import in # non-package."). # We'd be happy to skip this and require dotted module paths (which don't # cause this problem) instead of file paths (which do), but in the case of # a directory in the cwd, which would be equally valid if considered as a # top-level module or as a directory path, unittest unfortunately prefers # the latter. while True: init_py = os.path.join(top_level, '__init__.py') if not os.path.exists(init_py): break try_next = os.path.dirname(top_level) if try_next == top_level: # __init__.py all the way down? give up. break top_level = try_next return top_level def _class_shuffle_key(cls): return f'{cls.__module__}.{cls.__qualname__}' def shuffle_tests(tests, shuffler): """ Return an iterator over the given tests in a shuffled order, keeping tests next to other tests of their class. `tests` should be an iterable of tests. """ tests_by_type = {} for _, class_tests in itertools.groupby(tests, type): class_tests = list(class_tests) test_type = type(class_tests[0]) class_tests = shuffler.shuffle(class_tests, key=lambda test: test.id()) tests_by_type[test_type] = class_tests classes = shuffler.shuffle(tests_by_type, key=_class_shuffle_key) return itertools.chain(*(tests_by_type[cls] for cls in classes)) def reorder_test_bin(tests, shuffler=None, reverse=False): """ Return an iterator that reorders the given tests, keeping tests next to other tests of their class. `tests` should be an iterable of tests that supports reversed(). """ if shuffler is None: if reverse: return reversed(tests) # The function must return an iterator. return iter(tests) tests = shuffle_tests(tests, shuffler) if not reverse: return tests # Arguments to reversed() must be reversible. return reversed(list(tests)) def reorder_tests(tests, classes, reverse=False, shuffler=None): """ Reorder an iterable of tests, grouping by the given TestCase classes. This function also removes any duplicates and reorders so that tests of the same type are consecutive. The result is returned as an iterator. `classes` is a sequence of types. Tests that are instances of `classes[0]` are grouped first, followed by instances of `classes[1]`, etc. Tests that are not instances of any of the classes are grouped last. If `reverse` is True, the tests within each `classes` group are reversed, but without reversing the order of `classes` itself. The `shuffler` argument is an optional instance of this module's `Shuffler` class. If provided, tests will be shuffled within each `classes` group, but keeping tests with other tests of their TestCase class. Reversing is applied after shuffling to allow reversing the same random order. """ # Each bin maps TestCase class to OrderedSet of tests. This permits tests # to be grouped by TestCase class even if provided non-consecutively. bins = [defaultdict(OrderedSet) for i in range(len(classes) + 1)] *class_bins, last_bin = bins for test in tests: for test_bin, test_class in zip(class_bins, classes): if isinstance(test, test_class): break else: test_bin = last_bin test_bin[type(test)].add(test) for test_bin in bins: # Call list() since reorder_test_bin()'s input must support reversed(). tests = list(itertools.chain.from_iterable(test_bin.values())) yield from reorder_test_bin(tests, shuffler=shuffler, reverse=reverse) def partition_suite_by_case(suite): """Partition a test suite by test case, preserving the order of tests.""" suite_class = type(suite) all_tests = iter_test_cases(suite) return [ suite_class(tests) for _, tests in itertools.groupby(all_tests, type) ] def test_match_tags(test, tags, exclude_tags): if isinstance(test, unittest.loader._FailedTest): # Tests that couldn't load always match to prevent tests from falsely # passing due e.g. to syntax errors. return True test_tags = set(getattr(test, 'tags', [])) test_fn_name = getattr(test, '_testMethodName', str(test)) if hasattr(test, test_fn_name): test_fn = getattr(test, test_fn_name) test_fn_tags = list(getattr(test_fn, 'tags', [])) test_tags = test_tags.union(test_fn_tags) if tags and test_tags.isdisjoint(tags): return False return test_tags.isdisjoint(exclude_tags) def filter_tests_by_tags(tests, tags, exclude_tags): """Return the matching tests as an iterator.""" return (test for test in tests if test_match_tags(test, tags, exclude_tags))
a716c582bc2a572177f6fb9adc75d896c8590b98b720ec0bee3f0692614c6f6c
import json import mimetypes import os import sys from copy import copy from functools import partial from http import HTTPStatus from importlib import import_module from io import BytesIO from urllib.parse import unquote_to_bytes, urljoin, urlparse, urlsplit from asgiref.sync import sync_to_async from django.conf import settings from django.core.handlers.asgi import ASGIRequest from django.core.handlers.base import BaseHandler from django.core.handlers.wsgi import WSGIRequest from django.core.serializers.json import DjangoJSONEncoder from django.core.signals import ( got_request_exception, request_finished, request_started, ) from django.db import close_old_connections from django.http import HttpRequest, QueryDict, SimpleCookie from django.test import signals from django.test.utils import ContextList from django.urls import resolve from django.utils.encoding import force_bytes from django.utils.functional import SimpleLazyObject from django.utils.http import urlencode from django.utils.itercompat import is_iterable from django.utils.regex_helper import _lazy_re_compile __all__ = ( 'AsyncClient', 'AsyncRequestFactory', 'Client', 'RedirectCycleError', 'RequestFactory', 'encode_file', 'encode_multipart', ) BOUNDARY = 'BoUnDaRyStRiNg' MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY CONTENT_TYPE_RE = _lazy_re_compile(r'.*; charset=([\w\d-]+);?') # Structured suffix spec: https://tools.ietf.org/html/rfc6838#section-4.2.8 JSON_CONTENT_TYPE_RE = _lazy_re_compile(r'^application\/(.+\+)?json') class RedirectCycleError(Exception): """The test client has been asked to follow a redirect loop.""" def __init__(self, message, last_response): super().__init__(message) self.last_response = last_response self.redirect_chain = last_response.redirect_chain class FakePayload: """ A wrapper around BytesIO that restricts what can be read since data from the network can't be sought and cannot be read outside of its content length. This makes sure that views can't do anything under the test client that wouldn't work in real life. """ def __init__(self, content=None): self.__content = BytesIO() self.__len = 0 self.read_started = False if content is not None: self.write(content) def __len__(self): return self.__len def read(self, num_bytes=None): if not self.read_started: self.__content.seek(0) self.read_started = True if num_bytes is None: num_bytes = self.__len or 0 assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data." content = self.__content.read(num_bytes) self.__len -= num_bytes return content def write(self, content): if self.read_started: raise ValueError("Unable to write a payload after it's been read") content = force_bytes(content) self.__content.write(content) self.__len += len(content) def closing_iterator_wrapper(iterable, close): try: yield from iterable finally: request_finished.disconnect(close_old_connections) close() # will fire request_finished request_finished.connect(close_old_connections) def conditional_content_removal(request, response): """ Simulate the behavior of most web servers by removing the content of responses for HEAD requests, 1xx, 204, and 304 responses. Ensure compliance with RFC 7230, section 3.3.3. """ if 100 <= response.status_code < 200 or response.status_code in (204, 304): if response.streaming: response.streaming_content = [] else: response.content = b'' if request.method == 'HEAD': if response.streaming: response.streaming_content = [] else: response.content = b'' return response class ClientHandler(BaseHandler): """ A HTTP Handler that can be used for testing purposes. Use the WSGI interface to compose requests, but return the raw HttpResponse object with the originating WSGIRequest attached to its ``wsgi_request`` attribute. """ def __init__(self, enforce_csrf_checks=True, *args, **kwargs): self.enforce_csrf_checks = enforce_csrf_checks super().__init__(*args, **kwargs) def __call__(self, environ): # Set up middleware if needed. We couldn't do this earlier, because # settings weren't available. if self._middleware_chain is None: self.load_middleware() request_started.disconnect(close_old_connections) request_started.send(sender=self.__class__, environ=environ) request_started.connect(close_old_connections) request = WSGIRequest(environ) # sneaky little hack so that we can easily get round # CsrfViewMiddleware. This makes life easier, and is probably # required for backwards compatibility with external tests against # admin views. request._dont_enforce_csrf_checks = not self.enforce_csrf_checks # Request goes through middleware. response = self.get_response(request) # Simulate behaviors of most web servers. conditional_content_removal(request, response) # Attach the originating request to the response so that it could be # later retrieved. response.wsgi_request = request # Emulate a WSGI server by calling the close method on completion. if response.streaming: response.streaming_content = closing_iterator_wrapper( response.streaming_content, response.close) else: request_finished.disconnect(close_old_connections) response.close() # will fire request_finished request_finished.connect(close_old_connections) return response class AsyncClientHandler(BaseHandler): """An async version of ClientHandler.""" def __init__(self, enforce_csrf_checks=True, *args, **kwargs): self.enforce_csrf_checks = enforce_csrf_checks super().__init__(*args, **kwargs) async def __call__(self, scope): # Set up middleware if needed. We couldn't do this earlier, because # settings weren't available. if self._middleware_chain is None: self.load_middleware(is_async=True) # Extract body file from the scope, if provided. if '_body_file' in scope: body_file = scope.pop('_body_file') else: body_file = FakePayload('') request_started.disconnect(close_old_connections) await sync_to_async(request_started.send, thread_sensitive=False)(sender=self.__class__, scope=scope) request_started.connect(close_old_connections) request = ASGIRequest(scope, body_file) # Sneaky little hack so that we can easily get round # CsrfViewMiddleware. This makes life easier, and is probably required # for backwards compatibility with external tests against admin views. request._dont_enforce_csrf_checks = not self.enforce_csrf_checks # Request goes through middleware. response = await self.get_response_async(request) # Simulate behaviors of most web servers. conditional_content_removal(request, response) # Attach the originating ASGI request to the response so that it could # be later retrieved. response.asgi_request = request # Emulate a server by calling the close method on completion. if response.streaming: response.streaming_content = await sync_to_async(closing_iterator_wrapper, thread_sensitive=False)( response.streaming_content, response.close, ) else: request_finished.disconnect(close_old_connections) # Will fire request_finished. await sync_to_async(response.close, thread_sensitive=False)() request_finished.connect(close_old_connections) return response def store_rendered_templates(store, signal, sender, template, context, **kwargs): """ Store templates and contexts that are rendered. The context is copied so that it is an accurate representation at the time of rendering. """ store.setdefault('templates', []).append(template) if 'context' not in store: store['context'] = ContextList() store['context'].append(copy(context)) def encode_multipart(boundary, data): """ Encode multipart POST data from a dictionary of form values. The key will be used as the form data name; the value will be transmitted as content. If the value is a file, the contents of the file will be sent as an application/octet-stream; otherwise, str(value) will be sent. """ lines = [] def to_bytes(s): return force_bytes(s, settings.DEFAULT_CHARSET) # Not by any means perfect, but good enough for our purposes. def is_file(thing): return hasattr(thing, "read") and callable(thing.read) # Each bit of the multipart form data could be either a form value or a # file, or a *list* of form values and/or files. Remember that HTTP field # names can be duplicated! for (key, value) in data.items(): if value is None: raise TypeError( "Cannot encode None for key '%s' as POST data. Did you mean " "to pass an empty string or omit the value?" % key ) elif is_file(value): lines.extend(encode_file(boundary, key, value)) elif not isinstance(value, str) and is_iterable(value): for item in value: if is_file(item): lines.extend(encode_file(boundary, key, item)) else: lines.extend(to_bytes(val) for val in [ '--%s' % boundary, 'Content-Disposition: form-data; name="%s"' % key, '', item ]) else: lines.extend(to_bytes(val) for val in [ '--%s' % boundary, 'Content-Disposition: form-data; name="%s"' % key, '', value ]) lines.extend([ to_bytes('--%s--' % boundary), b'', ]) return b'\r\n'.join(lines) def encode_file(boundary, key, file): def to_bytes(s): return force_bytes(s, settings.DEFAULT_CHARSET) # file.name might not be a string. For example, it's an int for # tempfile.TemporaryFile(). file_has_string_name = hasattr(file, 'name') and isinstance(file.name, str) filename = os.path.basename(file.name) if file_has_string_name else '' if hasattr(file, 'content_type'): content_type = file.content_type elif filename: content_type = mimetypes.guess_type(filename)[0] else: content_type = None if content_type is None: content_type = 'application/octet-stream' filename = filename or key return [ to_bytes('--%s' % boundary), to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)), to_bytes('Content-Type: %s' % content_type), b'', to_bytes(file.read()) ] class RequestFactory: """ Class that lets you create mock Request objects for use in testing. Usage: rf = RequestFactory() get_request = rf.get('/hello/') post_request = rf.post('/submit/', {'foo': 'bar'}) Once you have a request object you can pass it to any view function, just as if that view had been hooked up using a URLconf. """ def __init__(self, *, json_encoder=DjangoJSONEncoder, **defaults): self.json_encoder = json_encoder self.defaults = defaults self.cookies = SimpleCookie() self.errors = BytesIO() def _base_environ(self, **request): """ The base environment for a request. """ # This is a minimal valid WSGI environ dictionary, plus: # - HTTP_COOKIE: for cookie support, # - REMOTE_ADDR: often useful, see #8551. # See https://www.python.org/dev/peps/pep-3333/#environ-variables return { 'HTTP_COOKIE': '; '.join(sorted( '%s=%s' % (morsel.key, morsel.coded_value) for morsel in self.cookies.values() )), 'PATH_INFO': '/', 'REMOTE_ADDR': '127.0.0.1', 'REQUEST_METHOD': 'GET', 'SCRIPT_NAME': '', 'SERVER_NAME': 'testserver', 'SERVER_PORT': '80', 'SERVER_PROTOCOL': 'HTTP/1.1', 'wsgi.version': (1, 0), 'wsgi.url_scheme': 'http', 'wsgi.input': FakePayload(b''), 'wsgi.errors': self.errors, 'wsgi.multiprocess': True, 'wsgi.multithread': False, 'wsgi.run_once': False, **self.defaults, **request, } def request(self, **request): "Construct a generic request object." return WSGIRequest(self._base_environ(**request)) def _encode_data(self, data, content_type): if content_type is MULTIPART_CONTENT: return encode_multipart(BOUNDARY, data) else: # Encode the content so that the byte representation is correct. match = CONTENT_TYPE_RE.match(content_type) if match: charset = match[1] else: charset = settings.DEFAULT_CHARSET return force_bytes(data, encoding=charset) def _encode_json(self, data, content_type): """ Return encoded JSON if data is a dict, list, or tuple and content_type is application/json. """ should_encode = JSON_CONTENT_TYPE_RE.match(content_type) and isinstance(data, (dict, list, tuple)) return json.dumps(data, cls=self.json_encoder) if should_encode else data def _get_path(self, parsed): path = parsed.path # If there are parameters, add them if parsed.params: path += ";" + parsed.params path = unquote_to_bytes(path) # Replace the behavior where non-ASCII values in the WSGI environ are # arbitrarily decoded with ISO-8859-1. # Refs comment in `get_bytes_from_wsgi()`. return path.decode('iso-8859-1') def get(self, path, data=None, secure=False, **extra): """Construct a GET request.""" data = {} if data is None else data return self.generic('GET', path, secure=secure, **{ 'QUERY_STRING': urlencode(data, doseq=True), **extra, }) def post(self, path, data=None, content_type=MULTIPART_CONTENT, secure=False, **extra): """Construct a POST request.""" data = self._encode_json({} if data is None else data, content_type) post_data = self._encode_data(data, content_type) return self.generic('POST', path, post_data, content_type, secure=secure, **extra) def head(self, path, data=None, secure=False, **extra): """Construct a HEAD request.""" data = {} if data is None else data return self.generic('HEAD', path, secure=secure, **{ 'QUERY_STRING': urlencode(data, doseq=True), **extra, }) def trace(self, path, secure=False, **extra): """Construct a TRACE request.""" return self.generic('TRACE', path, secure=secure, **extra) def options(self, path, data='', content_type='application/octet-stream', secure=False, **extra): "Construct an OPTIONS request." return self.generic('OPTIONS', path, data, content_type, secure=secure, **extra) def put(self, path, data='', content_type='application/octet-stream', secure=False, **extra): """Construct a PUT request.""" data = self._encode_json(data, content_type) return self.generic('PUT', path, data, content_type, secure=secure, **extra) def patch(self, path, data='', content_type='application/octet-stream', secure=False, **extra): """Construct a PATCH request.""" data = self._encode_json(data, content_type) return self.generic('PATCH', path, data, content_type, secure=secure, **extra) def delete(self, path, data='', content_type='application/octet-stream', secure=False, **extra): """Construct a DELETE request.""" data = self._encode_json(data, content_type) return self.generic('DELETE', path, data, content_type, secure=secure, **extra) def generic(self, method, path, data='', content_type='application/octet-stream', secure=False, **extra): """Construct an arbitrary HTTP request.""" parsed = urlparse(str(path)) # path can be lazy data = force_bytes(data, settings.DEFAULT_CHARSET) r = { 'PATH_INFO': self._get_path(parsed), 'REQUEST_METHOD': method, 'SERVER_PORT': '443' if secure else '80', 'wsgi.url_scheme': 'https' if secure else 'http', } if data: r.update({ 'CONTENT_LENGTH': str(len(data)), 'CONTENT_TYPE': content_type, 'wsgi.input': FakePayload(data), }) r.update(extra) # If QUERY_STRING is absent or empty, we want to extract it from the URL. if not r.get('QUERY_STRING'): # WSGI requires latin-1 encoded strings. See get_path_info(). query_string = parsed[4].encode().decode('iso-8859-1') r['QUERY_STRING'] = query_string return self.request(**r) class AsyncRequestFactory(RequestFactory): """ Class that lets you create mock ASGI-like Request objects for use in testing. Usage: rf = AsyncRequestFactory() get_request = await rf.get('/hello/') post_request = await rf.post('/submit/', {'foo': 'bar'}) Once you have a request object you can pass it to any view function, including synchronous ones. The reason we have a separate class here is: a) this makes ASGIRequest subclasses, and b) AsyncTestClient can subclass it. """ def _base_scope(self, **request): """The base scope for a request.""" # This is a minimal valid ASGI scope, plus: # - headers['cookie'] for cookie support, # - 'client' often useful, see #8551. scope = { 'asgi': {'version': '3.0'}, 'type': 'http', 'http_version': '1.1', 'client': ['127.0.0.1', 0], 'server': ('testserver', '80'), 'scheme': 'http', 'method': 'GET', 'headers': [], **self.defaults, **request, } scope['headers'].append(( b'cookie', b'; '.join(sorted( ('%s=%s' % (morsel.key, morsel.coded_value)).encode('ascii') for morsel in self.cookies.values() )), )) return scope def request(self, **request): """Construct a generic request object.""" # This is synchronous, which means all methods on this class are. # AsyncClient, however, has an async request function, which makes all # its methods async. if '_body_file' in request: body_file = request.pop('_body_file') else: body_file = FakePayload('') return ASGIRequest(self._base_scope(**request), body_file) def generic( self, method, path, data='', content_type='application/octet-stream', secure=False, **extra, ): """Construct an arbitrary HTTP request.""" parsed = urlparse(str(path)) # path can be lazy. data = force_bytes(data, settings.DEFAULT_CHARSET) s = { 'method': method, 'path': self._get_path(parsed), 'server': ('127.0.0.1', '443' if secure else '80'), 'scheme': 'https' if secure else 'http', 'headers': [(b'host', b'testserver')], } if data: s['headers'].extend([ (b'content-length', str(len(data)).encode('ascii')), (b'content-type', content_type.encode('ascii')), ]) s['_body_file'] = FakePayload(data) follow = extra.pop('follow', None) if follow is not None: s['follow'] = follow if query_string := extra.pop('QUERY_STRING', None): s['query_string'] = query_string s['headers'] += [ (key.lower().encode('ascii'), value.encode('latin1')) for key, value in extra.items() ] # If QUERY_STRING is absent or empty, we want to extract it from the # URL. if not s.get('query_string'): s['query_string'] = parsed[4] return self.request(**s) class ClientMixin: """ Mixin with common methods between Client and AsyncClient. """ def store_exc_info(self, **kwargs): """Store exceptions when they are generated by a view.""" self.exc_info = sys.exc_info() def check_exception(self, response): """ Look for a signaled exception, clear the current context exception data, re-raise the signaled exception, and clear the signaled exception from the local cache. """ response.exc_info = self.exc_info if self.exc_info: _, exc_value, _ = self.exc_info self.exc_info = None if self.raise_request_exception: raise exc_value @property def session(self): """Return the current session variables.""" engine = import_module(settings.SESSION_ENGINE) cookie = self.cookies.get(settings.SESSION_COOKIE_NAME) if cookie: return engine.SessionStore(cookie.value) session = engine.SessionStore() session.save() self.cookies[settings.SESSION_COOKIE_NAME] = session.session_key return session def login(self, **credentials): """ Set the Factory to appear as if it has successfully logged into a site. Return True if login is possible or False if the provided credentials are incorrect. """ from django.contrib.auth import authenticate user = authenticate(**credentials) if user: self._login(user) return True return False def force_login(self, user, backend=None): def get_backend(): from django.contrib.auth import load_backend for backend_path in settings.AUTHENTICATION_BACKENDS: backend = load_backend(backend_path) if hasattr(backend, 'get_user'): return backend_path if backend is None: backend = get_backend() user.backend = backend self._login(user, backend) def _login(self, user, backend=None): from django.contrib.auth import login # Create a fake request to store login details. request = HttpRequest() if self.session: request.session = self.session else: engine = import_module(settings.SESSION_ENGINE) request.session = engine.SessionStore() login(request, user, backend) # Save the session values. request.session.save() # Set the cookie to represent the session. session_cookie = settings.SESSION_COOKIE_NAME self.cookies[session_cookie] = request.session.session_key cookie_data = { 'max-age': None, 'path': '/', 'domain': settings.SESSION_COOKIE_DOMAIN, 'secure': settings.SESSION_COOKIE_SECURE or None, 'expires': None, } self.cookies[session_cookie].update(cookie_data) def logout(self): """Log out the user by removing the cookies and session object.""" from django.contrib.auth import get_user, logout request = HttpRequest() if self.session: request.session = self.session request.user = get_user(request) else: engine = import_module(settings.SESSION_ENGINE) request.session = engine.SessionStore() logout(request) self.cookies = SimpleCookie() def _parse_json(self, response, **extra): if not hasattr(response, '_json'): if not JSON_CONTENT_TYPE_RE.match(response.get('Content-Type')): raise ValueError( 'Content-Type header is "%s", not "application/json"' % response.get('Content-Type') ) response._json = json.loads(response.content.decode(response.charset), **extra) return response._json class Client(ClientMixin, RequestFactory): """ A class that can act as a client for testing purposes. It allows the user to compose GET and POST requests, and obtain the response that the server gave to those requests. The server Response objects are annotated with the details of the contexts and templates that were rendered during the process of serving the request. Client objects are stateful - they will retain cookie (and thus session) details for the lifetime of the Client instance. This is not intended as a replacement for Twill/Selenium or the like - it is here to allow testing against the contexts and templates produced by a view, rather than the HTML rendered to the end-user. """ def __init__(self, enforce_csrf_checks=False, raise_request_exception=True, **defaults): super().__init__(**defaults) self.handler = ClientHandler(enforce_csrf_checks) self.raise_request_exception = raise_request_exception self.exc_info = None self.extra = None def request(self, **request): """ The master request method. Compose the environment dictionary and pass to the handler, return the result of the handler. Assume defaults for the query environment, which can be overridden using the arguments to the request. """ environ = self._base_environ(**request) # Curry a data dictionary into an instance of the template renderer # callback function. data = {} on_template_render = partial(store_rendered_templates, data) signal_uid = "template-render-%s" % id(request) signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid) # Capture exceptions created by the handler. exception_uid = "request-exception-%s" % id(request) got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid) try: response = self.handler(environ) finally: signals.template_rendered.disconnect(dispatch_uid=signal_uid) got_request_exception.disconnect(dispatch_uid=exception_uid) # Check for signaled exceptions. self.check_exception(response) # Save the client and request that stimulated the response. response.client = self response.request = request # Add any rendered template detail to the response. response.templates = data.get('templates', []) response.context = data.get('context') response.json = partial(self._parse_json, response) # Attach the ResolverMatch instance to the response. urlconf = getattr(response.wsgi_request, 'urlconf', None) response.resolver_match = SimpleLazyObject( lambda: resolve(request['PATH_INFO'], urlconf=urlconf), ) # Flatten a single context. Not really necessary anymore thanks to the # __getattr__ flattening in ContextList, but has some edge case # backwards compatibility implications. if response.context and len(response.context) == 1: response.context = response.context[0] # Update persistent cookie data. if response.cookies: self.cookies.update(response.cookies) return response def get(self, path, data=None, follow=False, secure=False, **extra): """Request a response from the server using GET.""" self.extra = extra response = super().get(path, data=data, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, **extra) return response def post(self, path, data=None, content_type=MULTIPART_CONTENT, follow=False, secure=False, **extra): """Request a response from the server using POST.""" self.extra = extra response = super().post(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, content_type=content_type, **extra) return response def head(self, path, data=None, follow=False, secure=False, **extra): """Request a response from the server using HEAD.""" self.extra = extra response = super().head(path, data=data, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, **extra) return response def options(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra): """Request a response from the server using OPTIONS.""" self.extra = extra response = super().options(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, content_type=content_type, **extra) return response def put(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra): """Send a resource to the server using PUT.""" self.extra = extra response = super().put(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, content_type=content_type, **extra) return response def patch(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra): """Send a resource to the server using PATCH.""" self.extra = extra response = super().patch(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, content_type=content_type, **extra) return response def delete(self, path, data='', content_type='application/octet-stream', follow=False, secure=False, **extra): """Send a DELETE request to the server.""" self.extra = extra response = super().delete(path, data=data, content_type=content_type, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, content_type=content_type, **extra) return response def trace(self, path, data='', follow=False, secure=False, **extra): """Send a TRACE request to the server.""" self.extra = extra response = super().trace(path, data=data, secure=secure, **extra) if follow: response = self._handle_redirects(response, data=data, **extra) return response def _handle_redirects(self, response, data='', content_type='', **extra): """ Follow any redirects by requesting responses from the server using GET. """ response.redirect_chain = [] redirect_status_codes = ( HTTPStatus.MOVED_PERMANENTLY, HTTPStatus.FOUND, HTTPStatus.SEE_OTHER, HTTPStatus.TEMPORARY_REDIRECT, HTTPStatus.PERMANENT_REDIRECT, ) while response.status_code in redirect_status_codes: response_url = response.url redirect_chain = response.redirect_chain redirect_chain.append((response_url, response.status_code)) url = urlsplit(response_url) if url.scheme: extra['wsgi.url_scheme'] = url.scheme if url.hostname: extra['SERVER_NAME'] = url.hostname if url.port: extra['SERVER_PORT'] = str(url.port) # Prepend the request path to handle relative path redirects path = url.path or '/' if not path.startswith('/'): path = urljoin(response.request['PATH_INFO'], path) if response.status_code in (HTTPStatus.TEMPORARY_REDIRECT, HTTPStatus.PERMANENT_REDIRECT): # Preserve request method and query string (if needed) # post-redirect for 307/308 responses. request_method = response.request['REQUEST_METHOD'].lower() if request_method not in ('get', 'head'): extra['QUERY_STRING'] = url.query request_method = getattr(self, request_method) else: request_method = self.get data = QueryDict(url.query) content_type = None response = request_method(path, data=data, content_type=content_type, follow=False, **extra) response.redirect_chain = redirect_chain if redirect_chain[-1] in redirect_chain[:-1]: # Check that we're not redirecting to somewhere we've already # been to, to prevent loops. raise RedirectCycleError("Redirect loop detected.", last_response=response) if len(redirect_chain) > 20: # Such a lengthy chain likely also means a loop, but one with # a growing path, changing view, or changing query argument; # 20 is the value of "network.http.redirection-limit" from Firefox. raise RedirectCycleError("Too many redirects.", last_response=response) return response class AsyncClient(ClientMixin, AsyncRequestFactory): """ An async version of Client that creates ASGIRequests and calls through an async request path. Does not currently support "follow" on its methods. """ def __init__(self, enforce_csrf_checks=False, raise_request_exception=True, **defaults): super().__init__(**defaults) self.handler = AsyncClientHandler(enforce_csrf_checks) self.raise_request_exception = raise_request_exception self.exc_info = None self.extra = None async def request(self, **request): """ The master request method. Compose the scope dictionary and pass to the handler, return the result of the handler. Assume defaults for the query environment, which can be overridden using the arguments to the request. """ if 'follow' in request: raise NotImplementedError( 'AsyncClient request methods do not accept the follow ' 'parameter.' ) scope = self._base_scope(**request) # Curry a data dictionary into an instance of the template renderer # callback function. data = {} on_template_render = partial(store_rendered_templates, data) signal_uid = 'template-render-%s' % id(request) signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid) # Capture exceptions created by the handler. exception_uid = 'request-exception-%s' % id(request) got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid) try: response = await self.handler(scope) finally: signals.template_rendered.disconnect(dispatch_uid=signal_uid) got_request_exception.disconnect(dispatch_uid=exception_uid) # Check for signaled exceptions. self.check_exception(response) # Save the client and request that stimulated the response. response.client = self response.request = request # Add any rendered template detail to the response. response.templates = data.get('templates', []) response.context = data.get('context') response.json = partial(self._parse_json, response) # Attach the ResolverMatch instance to the response. urlconf = getattr(response.asgi_request, 'urlconf', None) response.resolver_match = SimpleLazyObject( lambda: resolve(request['path'], urlconf=urlconf), ) # Flatten a single context. Not really necessary anymore thanks to the # __getattr__ flattening in ContextList, but has some edge case # backwards compatibility implications. if response.context and len(response.context) == 1: response.context = response.context[0] # Update persistent cookie data. if response.cookies: self.cookies.update(response.cookies) return response
cfc0b4be1ea8504e3541a24bd08731edcb0aeda55b7627871f0781a7fd0e4a02
import asyncio import difflib import json import logging import posixpath import sys import threading import unittest import warnings from collections import Counter from contextlib import contextmanager from copy import copy, deepcopy from difflib import get_close_matches from functools import wraps from unittest.suite import _DebugResult from unittest.util import safe_repr from urllib.parse import ( parse_qsl, unquote, urlencode, urljoin, urlparse, urlsplit, urlunparse, ) from urllib.request import url2pathname from asgiref.sync import async_to_sync from django.apps import apps from django.conf import settings from django.core import mail from django.core.exceptions import ImproperlyConfigured, ValidationError from django.core.files import locks from django.core.handlers.wsgi import WSGIHandler, get_path_info from django.core.management import call_command from django.core.management.color import no_style from django.core.management.sql import emit_post_migrate_signal from django.core.servers.basehttp import ThreadedWSGIServer, WSGIRequestHandler from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction from django.forms.fields import CharField from django.http import QueryDict from django.http.request import split_domain_port, validate_host from django.test.client import AsyncClient, Client from django.test.html import HTMLParseError, parse_html from django.test.signals import setting_changed, template_rendered from django.test.utils import ( CaptureQueriesContext, ContextList, compare_xml, modify_settings, override_settings, ) from django.utils.deprecation import RemovedInDjango41Warning from django.utils.functional import classproperty from django.utils.version import PY310 from django.views.static import serve __all__ = ('TestCase', 'TransactionTestCase', 'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature') def to_list(value): """ Put value into a list if it's not already one. Return an empty list if value is None. """ if value is None: value = [] elif not isinstance(value, list): value = [value] return value def assert_and_parse_html(self, html, user_msg, msg): try: dom = parse_html(html) except HTMLParseError as e: standardMsg = '%s\n%s' % (msg, e) self.fail(self._formatMessage(user_msg, standardMsg)) return dom class _AssertNumQueriesContext(CaptureQueriesContext): def __init__(self, test_case, num, connection): self.test_case = test_case self.num = num super().__init__(connection) def __exit__(self, exc_type, exc_value, traceback): super().__exit__(exc_type, exc_value, traceback) if exc_type is not None: return executed = len(self) self.test_case.assertEqual( executed, self.num, "%d queries executed, %d expected\nCaptured queries were:\n%s" % ( executed, self.num, '\n'.join( '%d. %s' % (i, query['sql']) for i, query in enumerate(self.captured_queries, start=1) ) ) ) class _AssertTemplateUsedContext: def __init__(self, test_case, template_name): self.test_case = test_case self.template_name = template_name self.rendered_templates = [] self.rendered_template_names = [] self.context = ContextList() def on_template_render(self, sender, signal, template, context, **kwargs): self.rendered_templates.append(template) self.rendered_template_names.append(template.name) self.context.append(copy(context)) def test(self): return self.template_name in self.rendered_template_names def message(self): return '%s was not rendered.' % self.template_name def __enter__(self): template_rendered.connect(self.on_template_render) return self def __exit__(self, exc_type, exc_value, traceback): template_rendered.disconnect(self.on_template_render) if exc_type is not None: return if not self.test(): message = self.message() if self.rendered_templates: message += ' Following templates were rendered: %s' % ( ', '.join(self.rendered_template_names) ) else: message += ' No template was rendered.' self.test_case.fail(message) class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext): def test(self): return self.template_name not in self.rendered_template_names def message(self): return '%s was rendered.' % self.template_name class _DatabaseFailure: def __init__(self, wrapped, message): self.wrapped = wrapped self.message = message def __call__(self): raise AssertionError(self.message) class SimpleTestCase(unittest.TestCase): # The class we'll use for the test client self.client. # Can be overridden in derived classes. client_class = Client async_client_class = AsyncClient _overridden_settings = None _modified_settings = None databases = set() _disallowed_database_msg = ( 'Database %(operation)s to %(alias)r are not allowed in SimpleTestCase ' 'subclasses. Either subclass TestCase or TransactionTestCase to ensure ' 'proper test isolation or add %(alias)r to %(test)s.databases to silence ' 'this failure.' ) _disallowed_connection_methods = [ ('connect', 'connections'), ('temporary_connection', 'connections'), ('cursor', 'queries'), ('chunked_cursor', 'queries'), ] @classmethod def setUpClass(cls): super().setUpClass() if cls._overridden_settings: cls._cls_overridden_context = override_settings(**cls._overridden_settings) cls._cls_overridden_context.enable() cls.addClassCleanup(cls._cls_overridden_context.disable) if cls._modified_settings: cls._cls_modified_context = modify_settings(cls._modified_settings) cls._cls_modified_context.enable() cls.addClassCleanup(cls._cls_modified_context.disable) cls._add_databases_failures() cls.addClassCleanup(cls._remove_databases_failures) @classmethod def _validate_databases(cls): if cls.databases == '__all__': return frozenset(connections) for alias in cls.databases: if alias not in connections: message = '%s.%s.databases refers to %r which is not defined in settings.DATABASES.' % ( cls.__module__, cls.__qualname__, alias, ) close_matches = get_close_matches(alias, list(connections)) if close_matches: message += ' Did you mean %r?' % close_matches[0] raise ImproperlyConfigured(message) return frozenset(cls.databases) @classmethod def _add_databases_failures(cls): cls.databases = cls._validate_databases() for alias in connections: if alias in cls.databases: continue connection = connections[alias] for name, operation in cls._disallowed_connection_methods: message = cls._disallowed_database_msg % { 'test': '%s.%s' % (cls.__module__, cls.__qualname__), 'alias': alias, 'operation': operation, } method = getattr(connection, name) setattr(connection, name, _DatabaseFailure(method, message)) @classmethod def _remove_databases_failures(cls): for alias in connections: if alias in cls.databases: continue connection = connections[alias] for name, _ in cls._disallowed_connection_methods: method = getattr(connection, name) setattr(connection, name, method.wrapped) def __call__(self, result=None): """ Wrapper around default __call__ method to perform common Django test set up. This means that user-defined Test Cases aren't required to include a call to super().setUp(). """ self._setup_and_call(result) def debug(self): """Perform the same as __call__(), without catching the exception.""" debug_result = _DebugResult() self._setup_and_call(debug_result, debug=True) def _setup_and_call(self, result, debug=False): """ Perform the following in order: pre-setup, run test, post-teardown, skipping pre/post hooks if test is set to be skipped. If debug=True, reraise any errors in setup and use super().debug() instead of __call__() to run the test. """ testMethod = getattr(self, self._testMethodName) skipped = ( getattr(self.__class__, "__unittest_skip__", False) or getattr(testMethod, "__unittest_skip__", False) ) # Convert async test methods. if asyncio.iscoroutinefunction(testMethod): setattr(self, self._testMethodName, async_to_sync(testMethod)) if not skipped: try: self._pre_setup() except Exception: if debug: raise result.addError(self, sys.exc_info()) return if debug: super().debug() else: super().__call__(result) if not skipped: try: self._post_teardown() except Exception: if debug: raise result.addError(self, sys.exc_info()) return def _pre_setup(self): """ Perform pre-test setup: * Create a test client. * Clear the mail test outbox. """ self.client = self.client_class() self.async_client = self.async_client_class() mail.outbox = [] def _post_teardown(self): """Perform post-test things.""" pass def settings(self, **kwargs): """ A context manager that temporarily sets a setting and reverts to the original value when exiting the context. """ return override_settings(**kwargs) def modify_settings(self, **kwargs): """ A context manager that temporarily applies changes a list setting and reverts back to the original value when exiting the context. """ return modify_settings(**kwargs) def assertRedirects(self, response, expected_url, status_code=302, target_status_code=200, msg_prefix='', fetch_redirect_response=True): """ Assert that a response redirected to a specific URL and that the redirect URL can be loaded. Won't work for external links since it uses the test client to do a request (use fetch_redirect_response=False to check such links without fetching them). """ if msg_prefix: msg_prefix += ": " if hasattr(response, 'redirect_chain'): # The request was a followed redirect self.assertTrue( response.redirect_chain, msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)" % (response.status_code, status_code) ) self.assertEqual( response.redirect_chain[0][1], status_code, msg_prefix + "Initial response didn't redirect as expected: Response code was %d (expected %d)" % (response.redirect_chain[0][1], status_code) ) url, status_code = response.redirect_chain[-1] self.assertEqual( response.status_code, target_status_code, msg_prefix + "Response didn't redirect as expected: Final Response code was %d (expected %d)" % (response.status_code, target_status_code) ) else: # Not a followed redirect self.assertEqual( response.status_code, status_code, msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)" % (response.status_code, status_code) ) url = response.url scheme, netloc, path, query, fragment = urlsplit(url) # Prepend the request path to handle relative path redirects. if not path.startswith('/'): url = urljoin(response.request['PATH_INFO'], url) path = urljoin(response.request['PATH_INFO'], path) if fetch_redirect_response: # netloc might be empty, or in cases where Django tests the # HTTP scheme, the convention is for netloc to be 'testserver'. # Trust both as "internal" URLs here. domain, port = split_domain_port(netloc) if domain and not validate_host(domain, settings.ALLOWED_HOSTS): raise ValueError( "The test client is unable to fetch remote URLs (got %s). " "If the host is served by Django, add '%s' to ALLOWED_HOSTS. " "Otherwise, use assertRedirects(..., fetch_redirect_response=False)." % (url, domain) ) # Get the redirection page, using the same client that was used # to obtain the original response. extra = response.client.extra or {} redirect_response = response.client.get( path, QueryDict(query), secure=(scheme == 'https'), **extra, ) self.assertEqual( redirect_response.status_code, target_status_code, msg_prefix + "Couldn't retrieve redirection page '%s': response code was %d (expected %d)" % (path, redirect_response.status_code, target_status_code) ) self.assertURLEqual( url, expected_url, msg_prefix + "Response redirected to '%s', expected '%s'" % (url, expected_url) ) def assertURLEqual(self, url1, url2, msg_prefix=''): """ Assert that two URLs are the same, ignoring the order of query string parameters except for parameters with the same name. For example, /path/?x=1&y=2 is equal to /path/?y=2&x=1, but /path/?a=1&a=2 isn't equal to /path/?a=2&a=1. """ def normalize(url): """Sort the URL's query string parameters.""" url = str(url) # Coerce reverse_lazy() URLs. scheme, netloc, path, params, query, fragment = urlparse(url) query_parts = sorted(parse_qsl(query)) return urlunparse((scheme, netloc, path, params, urlencode(query_parts), fragment)) self.assertEqual( normalize(url1), normalize(url2), msg_prefix + "Expected '%s' to equal '%s'." % (url1, url2) ) def _assert_contains(self, response, text, status_code, msg_prefix, html): # If the response supports deferred rendering and hasn't been rendered # yet, then ensure that it does get rendered before proceeding further. if hasattr(response, 'render') and callable(response.render) and not response.is_rendered: response.render() if msg_prefix: msg_prefix += ": " self.assertEqual( response.status_code, status_code, msg_prefix + "Couldn't retrieve content: Response code was %d" " (expected %d)" % (response.status_code, status_code) ) if response.streaming: content = b''.join(response.streaming_content) else: content = response.content if not isinstance(text, bytes) or html: text = str(text) content = content.decode(response.charset) text_repr = "'%s'" % text else: text_repr = repr(text) if html: content = assert_and_parse_html(self, content, None, "Response's content is not valid HTML:") text = assert_and_parse_html(self, text, None, "Second argument is not valid HTML:") real_count = content.count(text) return (text_repr, real_count, msg_prefix) def assertContains(self, response, text, count=None, status_code=200, msg_prefix='', html=False): """ Assert that a response indicates that some content was retrieved successfully, (i.e., the HTTP status code was as expected) and that ``text`` occurs ``count`` times in the content of the response. If ``count`` is None, the count doesn't matter - the assertion is true if the text occurs at least once in the response. """ text_repr, real_count, msg_prefix = self._assert_contains( response, text, status_code, msg_prefix, html) if count is not None: self.assertEqual( real_count, count, msg_prefix + "Found %d instances of %s in response (expected %d)" % (real_count, text_repr, count) ) else: self.assertTrue(real_count != 0, msg_prefix + "Couldn't find %s in response" % text_repr) def assertNotContains(self, response, text, status_code=200, msg_prefix='', html=False): """ Assert that a response indicates that some content was retrieved successfully, (i.e., the HTTP status code was as expected) and that ``text`` doesn't occur in the content of the response. """ text_repr, real_count, msg_prefix = self._assert_contains( response, text, status_code, msg_prefix, html) self.assertEqual(real_count, 0, msg_prefix + "Response should not contain %s" % text_repr) def assertFormError(self, response, form, field, errors, msg_prefix=''): """ Assert that a form used to render the response has a specific field error. """ if msg_prefix: msg_prefix += ": " # Put context(s) into a list to simplify processing. contexts = to_list(response.context) if not contexts: self.fail(msg_prefix + "Response did not use any contexts to render the response") # Put error(s) into a list to simplify processing. errors = to_list(errors) # Search all contexts for the error. found_form = False for i, context in enumerate(contexts): if form not in context: continue found_form = True for err in errors: if field: if field in context[form].errors: field_errors = context[form].errors[field] self.assertTrue( err in field_errors, msg_prefix + "The field '%s' on form '%s' in" " context %d does not contain the error '%s'" " (actual errors: %s)" % (field, form, i, err, repr(field_errors)) ) elif field in context[form].fields: self.fail( msg_prefix + "The field '%s' on form '%s' in context %d contains no errors" % (field, form, i) ) else: self.fail( msg_prefix + "The form '%s' in context %d does not contain the field '%s'" % (form, i, field) ) else: non_field_errors = context[form].non_field_errors() self.assertTrue( err in non_field_errors, msg_prefix + "The form '%s' in context %d does not" " contain the non-field error '%s'" " (actual errors: %s)" % (form, i, err, non_field_errors or 'none') ) if not found_form: self.fail(msg_prefix + "The form '%s' was not used to render the response" % form) def assertFormsetError(self, response, formset, form_index, field, errors, msg_prefix=''): """ Assert that a formset used to render the response has a specific error. For field errors, specify the ``form_index`` and the ``field``. For non-field errors, specify the ``form_index`` and the ``field`` as None. For non-form errors, specify ``form_index`` as None and the ``field`` as None. """ # Add punctuation to msg_prefix if msg_prefix: msg_prefix += ": " # Put context(s) into a list to simplify processing. contexts = to_list(response.context) if not contexts: self.fail(msg_prefix + 'Response did not use any contexts to ' 'render the response') # Put error(s) into a list to simplify processing. errors = to_list(errors) # Search all contexts for the error. found_formset = False for i, context in enumerate(contexts): if formset not in context: continue found_formset = True for err in errors: if field is not None: if field in context[formset].forms[form_index].errors: field_errors = context[formset].forms[form_index].errors[field] self.assertTrue( err in field_errors, msg_prefix + "The field '%s' on formset '%s', " "form %d in context %d does not contain the " "error '%s' (actual errors: %s)" % (field, formset, form_index, i, err, repr(field_errors)) ) elif field in context[formset].forms[form_index].fields: self.fail( msg_prefix + "The field '%s' on formset '%s', form %d in context %d contains no errors" % (field, formset, form_index, i) ) else: self.fail( msg_prefix + "The formset '%s', form %d in context %d does not contain the field '%s'" % (formset, form_index, i, field) ) elif form_index is not None: non_field_errors = context[formset].forms[form_index].non_field_errors() self.assertFalse( not non_field_errors, msg_prefix + "The formset '%s', form %d in context %d " "does not contain any non-field errors." % (formset, form_index, i) ) self.assertTrue( err in non_field_errors, msg_prefix + "The formset '%s', form %d in context %d " "does not contain the non-field error '%s' (actual errors: %s)" % (formset, form_index, i, err, repr(non_field_errors)) ) else: non_form_errors = context[formset].non_form_errors() self.assertFalse( not non_form_errors, msg_prefix + "The formset '%s' in context %d does not " "contain any non-form errors." % (formset, i) ) self.assertTrue( err in non_form_errors, msg_prefix + "The formset '%s' in context %d does not " "contain the non-form error '%s' (actual errors: %s)" % (formset, i, err, repr(non_form_errors)) ) if not found_formset: self.fail(msg_prefix + "The formset '%s' was not used to render the response" % formset) def _assert_template_used(self, response, template_name, msg_prefix): if response is None and template_name is None: raise TypeError('response and/or template_name argument must be provided') if msg_prefix: msg_prefix += ": " if template_name is not None and response is not None and not hasattr(response, 'templates'): raise ValueError( "assertTemplateUsed() and assertTemplateNotUsed() are only " "usable on responses fetched using the Django test Client." ) if not hasattr(response, 'templates') or (response is None and template_name): if response: template_name = response response = None # use this template with context manager return template_name, None, msg_prefix template_names = [t.name for t in response.templates if t.name is not None] return None, template_names, msg_prefix def assertTemplateUsed(self, response=None, template_name=None, msg_prefix='', count=None): """ Assert that the template with the provided name was used in rendering the response. Also usable as context manager. """ context_mgr_template, template_names, msg_prefix = self._assert_template_used( response, template_name, msg_prefix) if context_mgr_template: # Use assertTemplateUsed as context manager. return _AssertTemplateUsedContext(self, context_mgr_template) if not template_names: self.fail(msg_prefix + "No templates used to render the response") self.assertTrue( template_name in template_names, msg_prefix + "Template '%s' was not a template used to render" " the response. Actual template(s) used: %s" % (template_name, ', '.join(template_names)) ) if count is not None: self.assertEqual( template_names.count(template_name), count, msg_prefix + "Template '%s' was expected to be rendered %d " "time(s) but was actually rendered %d time(s)." % (template_name, count, template_names.count(template_name)) ) def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''): """ Assert that the template with the provided name was NOT used in rendering the response. Also usable as context manager. """ context_mgr_template, template_names, msg_prefix = self._assert_template_used( response, template_name, msg_prefix ) if context_mgr_template: # Use assertTemplateNotUsed as context manager. return _AssertTemplateNotUsedContext(self, context_mgr_template) self.assertFalse( template_name in template_names, msg_prefix + "Template '%s' was used unexpectedly in rendering the response" % template_name ) @contextmanager def _assert_raises_or_warns_cm(self, func, cm_attr, expected_exception, expected_message): with func(expected_exception) as cm: yield cm self.assertIn(expected_message, str(getattr(cm, cm_attr))) def _assertFooMessage(self, func, cm_attr, expected_exception, expected_message, *args, **kwargs): callable_obj = None if args: callable_obj, *args = args cm = self._assert_raises_or_warns_cm(func, cm_attr, expected_exception, expected_message) # Assertion used in context manager fashion. if callable_obj is None: return cm # Assertion was passed a callable. with cm: callable_obj(*args, **kwargs) def assertRaisesMessage(self, expected_exception, expected_message, *args, **kwargs): """ Assert that expected_message is found in the message of a raised exception. Args: expected_exception: Exception class expected to be raised. expected_message: expected error message string value. args: Function to be called and extra positional args. kwargs: Extra kwargs. """ return self._assertFooMessage( self.assertRaises, 'exception', expected_exception, expected_message, *args, **kwargs ) def assertWarnsMessage(self, expected_warning, expected_message, *args, **kwargs): """ Same as assertRaisesMessage but for assertWarns() instead of assertRaises(). """ return self._assertFooMessage( self.assertWarns, 'warning', expected_warning, expected_message, *args, **kwargs ) # A similar method is available in Python 3.10+. if not PY310: @contextmanager def assertNoLogs(self, logger, level=None): """ Assert no messages are logged on the logger, with at least the given level. """ if isinstance(level, int): level = logging.getLevelName(level) elif level is None: level = 'INFO' try: with self.assertLogs(logger, level) as cm: yield except AssertionError as e: msg = e.args[0] expected_msg = f'no logs of level {level} or higher triggered on {logger}' if msg != expected_msg: raise e else: self.fail(f'Unexpected logs found: {cm.output!r}') def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None, field_kwargs=None, empty_value=''): """ Assert that a form field behaves correctly with various inputs. Args: fieldclass: the class of the field to be tested. valid: a dictionary mapping valid inputs to their expected cleaned values. invalid: a dictionary mapping invalid inputs to one or more raised error messages. field_args: the args passed to instantiate the field field_kwargs: the kwargs passed to instantiate the field empty_value: the expected clean output for inputs in empty_values """ if field_args is None: field_args = [] if field_kwargs is None: field_kwargs = {} required = fieldclass(*field_args, **field_kwargs) optional = fieldclass(*field_args, **{**field_kwargs, 'required': False}) # test valid inputs for input, output in valid.items(): self.assertEqual(required.clean(input), output) self.assertEqual(optional.clean(input), output) # test invalid inputs for input, errors in invalid.items(): with self.assertRaises(ValidationError) as context_manager: required.clean(input) self.assertEqual(context_manager.exception.messages, errors) with self.assertRaises(ValidationError) as context_manager: optional.clean(input) self.assertEqual(context_manager.exception.messages, errors) # test required inputs error_required = [required.error_messages['required']] for e in required.empty_values: with self.assertRaises(ValidationError) as context_manager: required.clean(e) self.assertEqual(context_manager.exception.messages, error_required) self.assertEqual(optional.clean(e), empty_value) # test that max_length and min_length are always accepted if issubclass(fieldclass, CharField): field_kwargs.update({'min_length': 2, 'max_length': 20}) self.assertIsInstance(fieldclass(*field_args, **field_kwargs), fieldclass) def assertHTMLEqual(self, html1, html2, msg=None): """ Assert that two HTML snippets are semantically the same. Whitespace in most cases is ignored, and attribute ordering is not significant. The arguments must be valid HTML. """ dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:') dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:') if dom1 != dom2: standardMsg = '%s != %s' % ( safe_repr(dom1, True), safe_repr(dom2, True)) diff = ('\n' + '\n'.join(difflib.ndiff( str(dom1).splitlines(), str(dom2).splitlines(), ))) standardMsg = self._truncateMessage(standardMsg, diff) self.fail(self._formatMessage(msg, standardMsg)) def assertHTMLNotEqual(self, html1, html2, msg=None): """Assert that two HTML snippets are not semantically equivalent.""" dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:') dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:') if dom1 == dom2: standardMsg = '%s == %s' % ( safe_repr(dom1, True), safe_repr(dom2, True)) self.fail(self._formatMessage(msg, standardMsg)) def assertInHTML(self, needle, haystack, count=None, msg_prefix=''): needle = assert_and_parse_html(self, needle, None, 'First argument is not valid HTML:') haystack = assert_and_parse_html(self, haystack, None, 'Second argument is not valid HTML:') real_count = haystack.count(needle) if count is not None: self.assertEqual( real_count, count, msg_prefix + "Found %d instances of '%s' in response (expected %d)" % (real_count, needle, count) ) else: self.assertTrue(real_count != 0, msg_prefix + "Couldn't find '%s' in response" % needle) def assertJSONEqual(self, raw, expected_data, msg=None): """ Assert that the JSON fragments raw and expected_data are equal. Usual JSON non-significant whitespace rules apply as the heavyweight is delegated to the json library. """ try: data = json.loads(raw) except json.JSONDecodeError: self.fail("First argument is not valid JSON: %r" % raw) if isinstance(expected_data, str): try: expected_data = json.loads(expected_data) except ValueError: self.fail("Second argument is not valid JSON: %r" % expected_data) self.assertEqual(data, expected_data, msg=msg) def assertJSONNotEqual(self, raw, expected_data, msg=None): """ Assert that the JSON fragments raw and expected_data are not equal. Usual JSON non-significant whitespace rules apply as the heavyweight is delegated to the json library. """ try: data = json.loads(raw) except json.JSONDecodeError: self.fail("First argument is not valid JSON: %r" % raw) if isinstance(expected_data, str): try: expected_data = json.loads(expected_data) except json.JSONDecodeError: self.fail("Second argument is not valid JSON: %r" % expected_data) self.assertNotEqual(data, expected_data, msg=msg) def assertXMLEqual(self, xml1, xml2, msg=None): """ Assert that two XML snippets are semantically the same. Whitespace in most cases is ignored and attribute ordering is not significant. The arguments must be valid XML. """ try: result = compare_xml(xml1, xml2) except Exception as e: standardMsg = 'First or second argument is not valid XML\n%s' % e self.fail(self._formatMessage(msg, standardMsg)) else: if not result: standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True)) diff = ('\n' + '\n'.join( difflib.ndiff(xml1.splitlines(), xml2.splitlines()) )) standardMsg = self._truncateMessage(standardMsg, diff) self.fail(self._formatMessage(msg, standardMsg)) def assertXMLNotEqual(self, xml1, xml2, msg=None): """ Assert that two XML snippets are not semantically equivalent. Whitespace in most cases is ignored and attribute ordering is not significant. The arguments must be valid XML. """ try: result = compare_xml(xml1, xml2) except Exception as e: standardMsg = 'First or second argument is not valid XML\n%s' % e self.fail(self._formatMessage(msg, standardMsg)) else: if result: standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True)) self.fail(self._formatMessage(msg, standardMsg)) class TransactionTestCase(SimpleTestCase): # Subclasses can ask for resetting of auto increment sequence before each # test case reset_sequences = False # Subclasses can enable only a subset of apps for faster tests available_apps = None # Subclasses can define fixtures which will be automatically installed. fixtures = None databases = {DEFAULT_DB_ALIAS} _disallowed_database_msg = ( 'Database %(operation)s to %(alias)r are not allowed in this test. ' 'Add %(alias)r to %(test)s.databases to ensure proper test isolation ' 'and silence this failure.' ) # If transactions aren't available, Django will serialize the database # contents into a fixture during setup and flush and reload them # during teardown (as flush does not restore data from migrations). # This can be slow; this flag allows enabling on a per-case basis. serialized_rollback = False def _pre_setup(self): """ Perform pre-test setup: * If the class has an 'available_apps' attribute, restrict the app registry to these applications, then fire the post_migrate signal -- it must run with the correct set of applications for the test case. * If the class has a 'fixtures' attribute, install those fixtures. """ super()._pre_setup() if self.available_apps is not None: apps.set_available_apps(self.available_apps) setting_changed.send( sender=settings._wrapped.__class__, setting='INSTALLED_APPS', value=self.available_apps, enter=True, ) for db_name in self._databases_names(include_mirrors=False): emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name) try: self._fixture_setup() except Exception: if self.available_apps is not None: apps.unset_available_apps() setting_changed.send( sender=settings._wrapped.__class__, setting='INSTALLED_APPS', value=settings.INSTALLED_APPS, enter=False, ) raise # Clear the queries_log so that it's less likely to overflow (a single # test probably won't execute 9K queries). If queries_log overflows, # then assertNumQueries() doesn't work. for db_name in self._databases_names(include_mirrors=False): connections[db_name].queries_log.clear() @classmethod def _databases_names(cls, include_mirrors=True): # Only consider allowed database aliases, including mirrors or not. return [ alias for alias in connections if alias in cls.databases and ( include_mirrors or not connections[alias].settings_dict['TEST']['MIRROR'] ) ] def _reset_sequences(self, db_name): conn = connections[db_name] if conn.features.supports_sequence_reset: sql_list = conn.ops.sequence_reset_by_name_sql( no_style(), conn.introspection.sequence_list()) if sql_list: with transaction.atomic(using=db_name): with conn.cursor() as cursor: for sql in sql_list: cursor.execute(sql) def _fixture_setup(self): for db_name in self._databases_names(include_mirrors=False): # Reset sequences if self.reset_sequences: self._reset_sequences(db_name) # Provide replica initial data from migrated apps, if needed. if self.serialized_rollback and hasattr(connections[db_name], "_test_serialized_contents"): if self.available_apps is not None: apps.unset_available_apps() connections[db_name].creation.deserialize_db_from_string( connections[db_name]._test_serialized_contents ) if self.available_apps is not None: apps.set_available_apps(self.available_apps) if self.fixtures: # We have to use this slightly awkward syntax due to the fact # that we're using *args and **kwargs together. call_command('loaddata', *self.fixtures, **{'verbosity': 0, 'database': db_name}) def _should_reload_connections(self): return True def _post_teardown(self): """ Perform post-test things: * Flush the contents of the database to leave a clean slate. If the class has an 'available_apps' attribute, don't fire post_migrate. * Force-close the connection so the next test gets a clean cursor. """ try: self._fixture_teardown() super()._post_teardown() if self._should_reload_connections(): # Some DB cursors include SQL statements as part of cursor # creation. If you have a test that does a rollback, the effect # of these statements is lost, which can affect the operation of # tests (e.g., losing a timezone setting causing objects to be # created with the wrong time). To make sure this doesn't # happen, get a clean connection at the start of every test. for conn in connections.all(): conn.close() finally: if self.available_apps is not None: apps.unset_available_apps() setting_changed.send(sender=settings._wrapped.__class__, setting='INSTALLED_APPS', value=settings.INSTALLED_APPS, enter=False) def _fixture_teardown(self): # Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal # when flushing only a subset of the apps for db_name in self._databases_names(include_mirrors=False): # Flush the database inhibit_post_migrate = ( self.available_apps is not None or ( # Inhibit the post_migrate signal when using serialized # rollback to avoid trying to recreate the serialized data. self.serialized_rollback and hasattr(connections[db_name], '_test_serialized_contents') ) ) call_command('flush', verbosity=0, interactive=False, database=db_name, reset_sequences=False, allow_cascade=self.available_apps is not None, inhibit_post_migrate=inhibit_post_migrate) def assertQuerysetEqual(self, qs, values, transform=None, ordered=True, msg=None): values = list(values) # RemovedInDjango41Warning. if transform is None: if ( values and isinstance(values[0], str) and qs and not isinstance(qs[0], str) ): # Transform qs using repr() if the first element of values is a # string and the first element of qs is not (which would be the # case if qs is a flattened values_list). warnings.warn( "In Django 4.1, repr() will not be called automatically " "on a queryset when compared to string values. Set an " "explicit 'transform' to silence this warning.", category=RemovedInDjango41Warning, stacklevel=2, ) transform = repr items = qs if transform is not None: items = map(transform, items) if not ordered: return self.assertDictEqual(Counter(items), Counter(values), msg=msg) # For example qs.iterator() could be passed as qs, but it does not # have 'ordered' attribute. if len(values) > 1 and hasattr(qs, 'ordered') and not qs.ordered: raise ValueError( 'Trying to compare non-ordered queryset against more than one ' 'ordered value.' ) return self.assertEqual(list(items), values, msg=msg) def assertNumQueries(self, num, func=None, *args, using=DEFAULT_DB_ALIAS, **kwargs): conn = connections[using] context = _AssertNumQueriesContext(self, num, conn) if func is None: return context with context: func(*args, **kwargs) def connections_support_transactions(aliases=None): """ Return whether or not all (or specified) connections support transactions. """ conns = connections.all() if aliases is None else (connections[alias] for alias in aliases) return all(conn.features.supports_transactions for conn in conns) class TestData: """ Descriptor to provide TestCase instance isolation for attributes assigned during the setUpTestData() phase. Allow safe alteration of objects assigned in setUpTestData() by test methods by exposing deep copies instead of the original objects. Objects are deep copied using a memo kept on the test case instance in order to maintain their original relationships. """ memo_attr = '_testdata_memo' def __init__(self, name, data): self.name = name self.data = data def get_memo(self, testcase): try: memo = getattr(testcase, self.memo_attr) except AttributeError: memo = {} setattr(testcase, self.memo_attr, memo) return memo def __get__(self, instance, owner): if instance is None: return self.data memo = self.get_memo(instance) try: data = deepcopy(self.data, memo) except TypeError: # RemovedInDjango41Warning. msg = ( "Assigning objects which don't support copy.deepcopy() during " "setUpTestData() is deprecated. Either assign the %s " "attribute during setUpClass() or setUp(), or add support for " "deepcopy() to %s.%s.%s." ) % ( self.name, owner.__module__, owner.__qualname__, self.name, ) warnings.warn(msg, category=RemovedInDjango41Warning, stacklevel=2) data = self.data setattr(instance, self.name, data) return data def __repr__(self): return '<TestData: name=%r, data=%r>' % (self.name, self.data) class TestCase(TransactionTestCase): """ Similar to TransactionTestCase, but use `transaction.atomic()` to achieve test isolation. In most situations, TestCase should be preferred to TransactionTestCase as it allows faster execution. However, there are some situations where using TransactionTestCase might be necessary (e.g. testing some transactional behavior). On database backends with no transaction support, TestCase behaves as TransactionTestCase. """ @classmethod def _enter_atomics(cls): """Open atomic blocks for multiple databases.""" atomics = {} for db_name in cls._databases_names(): atomics[db_name] = transaction.atomic(using=db_name) atomics[db_name].__enter__() return atomics @classmethod def _rollback_atomics(cls, atomics): """Rollback atomic blocks opened by the previous method.""" for db_name in reversed(cls._databases_names()): transaction.set_rollback(True, using=db_name) atomics[db_name].__exit__(None, None, None) @classmethod def _databases_support_transactions(cls): return connections_support_transactions(cls.databases) @classmethod def setUpClass(cls): super().setUpClass() if not cls._databases_support_transactions(): return # Disable the durability check to allow testing durable atomic blocks # in a transaction for performance reasons. transaction.Atomic._ensure_durability = False try: cls.cls_atomics = cls._enter_atomics() if cls.fixtures: for db_name in cls._databases_names(include_mirrors=False): try: call_command('loaddata', *cls.fixtures, **{'verbosity': 0, 'database': db_name}) except Exception: cls._rollback_atomics(cls.cls_atomics) raise pre_attrs = cls.__dict__.copy() try: cls.setUpTestData() except Exception: cls._rollback_atomics(cls.cls_atomics) raise for name, value in cls.__dict__.items(): if value is not pre_attrs.get(name): setattr(cls, name, TestData(name, value)) except Exception: transaction.Atomic._ensure_durability = True raise @classmethod def tearDownClass(cls): transaction.Atomic._ensure_durability = True if cls._databases_support_transactions(): cls._rollback_atomics(cls.cls_atomics) for conn in connections.all(): conn.close() super().tearDownClass() @classmethod def setUpTestData(cls): """Load initial data for the TestCase.""" pass def _should_reload_connections(self): if self._databases_support_transactions(): return False return super()._should_reload_connections() def _fixture_setup(self): if not self._databases_support_transactions(): # If the backend does not support transactions, we should reload # class data before each test self.setUpTestData() return super()._fixture_setup() if self.reset_sequences: raise TypeError('reset_sequences cannot be used on TestCase instances') self.atomics = self._enter_atomics() def _fixture_teardown(self): if not self._databases_support_transactions(): return super()._fixture_teardown() try: for db_name in reversed(self._databases_names()): if self._should_check_constraints(connections[db_name]): connections[db_name].check_constraints() finally: self._rollback_atomics(self.atomics) def _should_check_constraints(self, connection): return ( connection.features.can_defer_constraint_checks and not connection.needs_rollback and connection.is_usable() ) @classmethod @contextmanager def captureOnCommitCallbacks(cls, *, using=DEFAULT_DB_ALIAS, execute=False): """Context manager to capture transaction.on_commit() callbacks.""" callbacks = [] start_count = len(connections[using].run_on_commit) try: yield callbacks finally: run_on_commit = connections[using].run_on_commit[start_count:] callbacks[:] = [func for sids, func in run_on_commit] if execute: for callback in callbacks: callback() class CheckCondition: """Descriptor class for deferred condition checking.""" def __init__(self, *conditions): self.conditions = conditions def add_condition(self, condition, reason): return self.__class__(*self.conditions, (condition, reason)) def __get__(self, instance, cls=None): # Trigger access for all bases. if any(getattr(base, '__unittest_skip__', False) for base in cls.__bases__): return True for condition, reason in self.conditions: if condition(): # Override this descriptor's value and set the skip reason. cls.__unittest_skip__ = True cls.__unittest_skip_why__ = reason return True return False def _deferredSkip(condition, reason, name): def decorator(test_func): nonlocal condition if not (isinstance(test_func, type) and issubclass(test_func, unittest.TestCase)): @wraps(test_func) def skip_wrapper(*args, **kwargs): if (args and isinstance(args[0], unittest.TestCase) and connection.alias not in getattr(args[0], 'databases', {})): raise ValueError( "%s cannot be used on %s as %s doesn't allow queries " "against the %r database." % ( name, args[0], args[0].__class__.__qualname__, connection.alias, ) ) if condition(): raise unittest.SkipTest(reason) return test_func(*args, **kwargs) test_item = skip_wrapper else: # Assume a class is decorated test_item = test_func databases = getattr(test_item, 'databases', None) if not databases or connection.alias not in databases: # Defer raising to allow importing test class's module. def condition(): raise ValueError( "%s cannot be used on %s as it doesn't allow queries " "against the '%s' database." % ( name, test_item, connection.alias, ) ) # Retrieve the possibly existing value from the class's dict to # avoid triggering the descriptor. skip = test_func.__dict__.get('__unittest_skip__') if isinstance(skip, CheckCondition): test_item.__unittest_skip__ = skip.add_condition(condition, reason) elif skip is not True: test_item.__unittest_skip__ = CheckCondition((condition, reason)) return test_item return decorator def skipIfDBFeature(*features): """Skip a test if a database has at least one of the named features.""" return _deferredSkip( lambda: any(getattr(connection.features, feature, False) for feature in features), "Database has feature(s) %s" % ", ".join(features), 'skipIfDBFeature', ) def skipUnlessDBFeature(*features): """Skip a test unless a database has all the named features.""" return _deferredSkip( lambda: not all(getattr(connection.features, feature, False) for feature in features), "Database doesn't support feature(s): %s" % ", ".join(features), 'skipUnlessDBFeature', ) def skipUnlessAnyDBFeature(*features): """Skip a test unless a database has any of the named features.""" return _deferredSkip( lambda: not any(getattr(connection.features, feature, False) for feature in features), "Database doesn't support any of the feature(s): %s" % ", ".join(features), 'skipUnlessAnyDBFeature', ) class QuietWSGIRequestHandler(WSGIRequestHandler): """ A WSGIRequestHandler that doesn't log to standard output any of the requests received, so as to not clutter the test result output. """ def log_message(*args): pass class FSFilesHandler(WSGIHandler): """ WSGI middleware that intercepts calls to a directory, as defined by one of the *_ROOT settings, and serves those files, publishing them under *_URL. """ def __init__(self, application): self.application = application self.base_url = urlparse(self.get_base_url()) super().__init__() def _should_handle(self, path): """ Check if the path should be handled. Ignore the path if: * the host is provided as part of the base_url * the request's path isn't under the media path (or equal) """ return path.startswith(self.base_url[2]) and not self.base_url[1] def file_path(self, url): """Return the relative path to the file on disk for the given URL.""" relative_url = url[len(self.base_url[2]):] return url2pathname(relative_url) def get_response(self, request): from django.http import Http404 if self._should_handle(request.path): try: return self.serve(request) except Http404: pass return super().get_response(request) def serve(self, request): os_rel_path = self.file_path(request.path) os_rel_path = posixpath.normpath(unquote(os_rel_path)) # Emulate behavior of django.contrib.staticfiles.views.serve() when it # invokes staticfiles' finders functionality. # TODO: Modify if/when that internal API is refactored final_rel_path = os_rel_path.replace('\\', '/').lstrip('/') return serve(request, final_rel_path, document_root=self.get_base_dir()) def __call__(self, environ, start_response): if not self._should_handle(get_path_info(environ)): return self.application(environ, start_response) return super().__call__(environ, start_response) class _StaticFilesHandler(FSFilesHandler): """ Handler for serving static files. A private class that is meant to be used solely as a convenience by LiveServerThread. """ def get_base_dir(self): return settings.STATIC_ROOT def get_base_url(self): return settings.STATIC_URL class _MediaFilesHandler(FSFilesHandler): """ Handler for serving the media files. A private class that is meant to be used solely as a convenience by LiveServerThread. """ def get_base_dir(self): return settings.MEDIA_ROOT def get_base_url(self): return settings.MEDIA_URL class LiveServerThread(threading.Thread): """Thread for running a live http server while the tests are running.""" server_class = ThreadedWSGIServer def __init__(self, host, static_handler, connections_override=None, port=0): self.host = host self.port = port self.is_ready = threading.Event() self.error = None self.static_handler = static_handler self.connections_override = connections_override super().__init__() def run(self): """ Set up the live server and databases, and then loop over handling HTTP requests. """ if self.connections_override: # Override this thread's database connections with the ones # provided by the main thread. for alias, conn in self.connections_override.items(): connections[alias] = conn try: # Create the handler for serving static and media files handler = self.static_handler(_MediaFilesHandler(WSGIHandler())) self.httpd = self._create_server() # If binding to port zero, assign the port allocated by the OS. if self.port == 0: self.port = self.httpd.server_address[1] self.httpd.set_app(handler) self.is_ready.set() self.httpd.serve_forever() except Exception as e: self.error = e self.is_ready.set() finally: connections.close_all() def _create_server(self, connections_override=None): return self.server_class( (self.host, self.port), QuietWSGIRequestHandler, allow_reuse_address=False, connections_override=connections_override, ) def terminate(self): if hasattr(self, 'httpd'): # Stop the WSGI server self.httpd.shutdown() self.httpd.server_close() self.join() class LiveServerTestCase(TransactionTestCase): """ Do basically the same as TransactionTestCase but also launch a live HTTP server in a separate thread so that the tests may use another testing framework, such as Selenium for example, instead of the built-in dummy client. It inherits from TransactionTestCase instead of TestCase because the threads don't share the same transactions (unless if using in-memory sqlite) and each thread needs to commit all their transactions so that the other thread can see the changes. """ host = 'localhost' port = 0 server_thread_class = LiveServerThread static_handler = _StaticFilesHandler @classproperty def live_server_url(cls): return 'http://%s:%s' % (cls.host, cls.server_thread.port) @classproperty def allowed_host(cls): return cls.host @classmethod def _make_connections_override(cls): connections_override = {} for conn in connections.all(): # If using in-memory sqlite databases, pass the connections to # the server thread. if conn.vendor == 'sqlite' and conn.is_in_memory_db(): connections_override[conn.alias] = conn return connections_override @classmethod def setUpClass(cls): super().setUpClass() cls._live_server_modified_settings = modify_settings( ALLOWED_HOSTS={'append': cls.allowed_host}, ) cls._live_server_modified_settings.enable() connections_override = cls._make_connections_override() for conn in connections_override.values(): # Explicitly enable thread-shareability for this connection. conn.inc_thread_sharing() cls.server_thread = cls._create_server_thread(connections_override) cls.server_thread.daemon = True cls.server_thread.start() # Wait for the live server to be ready cls.server_thread.is_ready.wait() if cls.server_thread.error: # Clean up behind ourselves, since tearDownClass won't get called in # case of errors. cls._tearDownClassInternal() raise cls.server_thread.error @classmethod def _create_server_thread(cls, connections_override): return cls.server_thread_class( cls.host, cls.static_handler, connections_override=connections_override, port=cls.port, ) @classmethod def _tearDownClassInternal(cls): # Terminate the live server's thread. cls.server_thread.terminate() # Restore shared connections' non-shareability. for conn in cls.server_thread.connections_override.values(): conn.dec_thread_sharing() cls._live_server_modified_settings.disable() super().tearDownClass() @classmethod def tearDownClass(cls): cls._tearDownClassInternal() class SerializeMixin: """ Enforce serialization of TestCases that share a common resource. Define a common 'lockfile' for each set of TestCases to serialize. This file must exist on the filesystem. Place it early in the MRO in order to isolate setUpClass()/tearDownClass(). """ lockfile = None def __init_subclass__(cls, /, **kwargs): super().__init_subclass__(**kwargs) if cls.lockfile is None: raise ValueError( "{}.lockfile isn't set. Set it to a unique value " "in the base class.".format(cls.__name__)) @classmethod def setUpClass(cls): cls._lockfile = open(cls.lockfile) cls.addClassCleanup(cls._lockfile.close) locks.lock(cls._lockfile, locks.LOCK_EX) super().setUpClass()
cceb1d927ef408fae6c286081bac946d52d8acb779f0c39e835a5681837a5ccf
import asyncio import collections import logging import os import re import sys import time import warnings from contextlib import contextmanager from functools import wraps from io import StringIO from itertools import chain from types import SimpleNamespace from unittest import TestCase, skipIf, skipUnless from xml.dom.minidom import Node, parseString from django.apps import apps from django.apps.registry import Apps from django.conf import UserSettingsHolder, settings from django.core import mail from django.core.exceptions import ImproperlyConfigured from django.core.signals import request_started from django.db import DEFAULT_DB_ALIAS, connections, reset_queries from django.db.models.options import Options from django.template import Template from django.test.signals import setting_changed, template_rendered from django.urls import get_script_prefix, set_script_prefix from django.utils.deprecation import RemovedInDjango50Warning from django.utils.translation import deactivate try: import jinja2 except ImportError: jinja2 = None __all__ = ( 'Approximate', 'ContextList', 'isolate_lru_cache', 'get_runner', 'CaptureQueriesContext', 'ignore_warnings', 'isolate_apps', 'modify_settings', 'override_settings', 'override_system_checks', 'tag', 'requires_tz_support', 'setup_databases', 'setup_test_environment', 'teardown_test_environment', ) TZ_SUPPORT = hasattr(time, 'tzset') class Approximate: def __init__(self, val, places=7): self.val = val self.places = places def __repr__(self): return repr(self.val) def __eq__(self, other): return self.val == other or round(abs(self.val - other), self.places) == 0 class ContextList(list): """ A wrapper that provides direct key access to context items contained in a list of context objects. """ def __getitem__(self, key): if isinstance(key, str): for subcontext in self: if key in subcontext: return subcontext[key] raise KeyError(key) else: return super().__getitem__(key) def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default def __contains__(self, key): try: self[key] except KeyError: return False return True def keys(self): """ Flattened keys of subcontexts. """ return set(chain.from_iterable(d for subcontext in self for d in subcontext)) def instrumented_test_render(self, context): """ An instrumented Template render method, providing a signal that can be intercepted by the test Client. """ template_rendered.send(sender=self, template=self, context=context) return self.nodelist.render(context) class _TestState: pass def setup_test_environment(debug=None): """ Perform global pre-test setup, such as installing the instrumented template renderer and setting the email backend to the locmem email backend. """ if hasattr(_TestState, 'saved_data'): # Executing this function twice would overwrite the saved values. raise RuntimeError( "setup_test_environment() was already called and can't be called " "again without first calling teardown_test_environment()." ) if debug is None: debug = settings.DEBUG saved_data = SimpleNamespace() _TestState.saved_data = saved_data saved_data.allowed_hosts = settings.ALLOWED_HOSTS # Add the default host of the test client. settings.ALLOWED_HOSTS = [*settings.ALLOWED_HOSTS, 'testserver'] saved_data.debug = settings.DEBUG settings.DEBUG = debug saved_data.email_backend = settings.EMAIL_BACKEND settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend' saved_data.template_render = Template._render Template._render = instrumented_test_render mail.outbox = [] deactivate() def teardown_test_environment(): """ Perform any global post-test teardown, such as restoring the original template renderer and restoring the email sending functions. """ saved_data = _TestState.saved_data settings.ALLOWED_HOSTS = saved_data.allowed_hosts settings.DEBUG = saved_data.debug settings.EMAIL_BACKEND = saved_data.email_backend Template._render = saved_data.template_render del _TestState.saved_data del mail.outbox def setup_databases( verbosity, interactive, *, time_keeper=None, keepdb=False, debug_sql=False, parallel=0, aliases=None, serialized_aliases=None, **kwargs, ): """Create the test databases.""" if time_keeper is None: time_keeper = NullTimeKeeper() test_databases, mirrored_aliases = get_unique_databases_and_mirrors(aliases) old_names = [] for db_name, aliases in test_databases.values(): first_alias = None for alias in aliases: connection = connections[alias] old_names.append((connection, db_name, first_alias is None)) # Actually create the database for the first connection if first_alias is None: first_alias = alias with time_keeper.timed(" Creating '%s'" % alias): # RemovedInDjango50Warning: when the deprecation ends, # replace with: # serialize_alias = serialized_aliases is None or alias in serialized_aliases try: serialize_alias = connection.settings_dict['TEST']['SERIALIZE'] except KeyError: serialize_alias = ( serialized_aliases is None or alias in serialized_aliases ) else: warnings.warn( 'The SERIALIZE test database setting is ' 'deprecated as it can be inferred from the ' 'TestCase/TransactionTestCase.databases that ' 'enable the serialized_rollback feature.', category=RemovedInDjango50Warning, ) connection.creation.create_test_db( verbosity=verbosity, autoclobber=not interactive, keepdb=keepdb, serialize=serialize_alias, ) if parallel > 1: for index in range(parallel): with time_keeper.timed(" Cloning '%s'" % alias): connection.creation.clone_test_db( suffix=str(index + 1), verbosity=verbosity, keepdb=keepdb, ) # Configure all other connections as mirrors of the first one else: connections[alias].creation.set_as_test_mirror(connections[first_alias].settings_dict) # Configure the test mirrors. for alias, mirror_alias in mirrored_aliases.items(): connections[alias].creation.set_as_test_mirror( connections[mirror_alias].settings_dict) if debug_sql: for alias in connections: connections[alias].force_debug_cursor = True return old_names def iter_test_cases(tests): """ Return an iterator over a test suite's unittest.TestCase objects. The tests argument can also be an iterable of TestCase objects. """ for test in tests: if isinstance(test, str): # Prevent an unfriendly RecursionError that can happen with # strings. raise TypeError( f'Test {test!r} must be a test case or test suite not string ' f'(was found in {tests!r}).' ) if isinstance(test, TestCase): yield test else: # Otherwise, assume it is a test suite. yield from iter_test_cases(test) def dependency_ordered(test_databases, dependencies): """ Reorder test_databases into an order that honors the dependencies described in TEST[DEPENDENCIES]. """ ordered_test_databases = [] resolved_databases = set() # Maps db signature to dependencies of all its aliases dependencies_map = {} # Check that no database depends on its own alias for sig, (_, aliases) in test_databases: all_deps = set() for alias in aliases: all_deps.update(dependencies.get(alias, [])) if not all_deps.isdisjoint(aliases): raise ImproperlyConfigured( "Circular dependency: databases %r depend on each other, " "but are aliases." % aliases ) dependencies_map[sig] = all_deps while test_databases: changed = False deferred = [] # Try to find a DB that has all its dependencies met for signature, (db_name, aliases) in test_databases: if dependencies_map[signature].issubset(resolved_databases): resolved_databases.update(aliases) ordered_test_databases.append((signature, (db_name, aliases))) changed = True else: deferred.append((signature, (db_name, aliases))) if not changed: raise ImproperlyConfigured("Circular dependency in TEST[DEPENDENCIES]") test_databases = deferred return ordered_test_databases def get_unique_databases_and_mirrors(aliases=None): """ Figure out which databases actually need to be created. Deduplicate entries in DATABASES that correspond the same database or are configured as test mirrors. Return two values: - test_databases: ordered mapping of signatures to (name, list of aliases) where all aliases share the same underlying database. - mirrored_aliases: mapping of mirror aliases to original aliases. """ if aliases is None: aliases = connections mirrored_aliases = {} test_databases = {} dependencies = {} default_sig = connections[DEFAULT_DB_ALIAS].creation.test_db_signature() for alias in connections: connection = connections[alias] test_settings = connection.settings_dict['TEST'] if test_settings['MIRROR']: # If the database is marked as a test mirror, save the alias. mirrored_aliases[alias] = test_settings['MIRROR'] elif alias in aliases: # Store a tuple with DB parameters that uniquely identify it. # If we have two aliases with the same values for that tuple, # we only need to create the test database once. item = test_databases.setdefault( connection.creation.test_db_signature(), (connection.settings_dict['NAME'], []), ) # The default database must be the first because data migrations # use the default alias by default. if alias == DEFAULT_DB_ALIAS: item[1].insert(0, alias) else: item[1].append(alias) if 'DEPENDENCIES' in test_settings: dependencies[alias] = test_settings['DEPENDENCIES'] else: if alias != DEFAULT_DB_ALIAS and connection.creation.test_db_signature() != default_sig: dependencies[alias] = test_settings.get('DEPENDENCIES', [DEFAULT_DB_ALIAS]) test_databases = dict(dependency_ordered(test_databases.items(), dependencies)) return test_databases, mirrored_aliases def teardown_databases(old_config, verbosity, parallel=0, keepdb=False): """Destroy all the non-mirror databases.""" for connection, old_name, destroy in old_config: if destroy: if parallel > 1: for index in range(parallel): connection.creation.destroy_test_db( suffix=str(index + 1), verbosity=verbosity, keepdb=keepdb, ) connection.creation.destroy_test_db(old_name, verbosity, keepdb) def get_runner(settings, test_runner_class=None): test_runner_class = test_runner_class or settings.TEST_RUNNER test_path = test_runner_class.split('.') # Allow for relative paths if len(test_path) > 1: test_module_name = '.'.join(test_path[:-1]) else: test_module_name = '.' test_module = __import__(test_module_name, {}, {}, test_path[-1]) return getattr(test_module, test_path[-1]) class TestContextDecorator: """ A base class that can either be used as a context manager during tests or as a test function or unittest.TestCase subclass decorator to perform temporary alterations. `attr_name`: attribute assigned the return value of enable() if used as a class decorator. `kwarg_name`: keyword argument passing the return value of enable() if used as a function decorator. """ def __init__(self, attr_name=None, kwarg_name=None): self.attr_name = attr_name self.kwarg_name = kwarg_name def enable(self): raise NotImplementedError def disable(self): raise NotImplementedError def __enter__(self): return self.enable() def __exit__(self, exc_type, exc_value, traceback): self.disable() def decorate_class(self, cls): if issubclass(cls, TestCase): decorated_setUp = cls.setUp def setUp(inner_self): context = self.enable() inner_self.addCleanup(self.disable) if self.attr_name: setattr(inner_self, self.attr_name, context) decorated_setUp(inner_self) cls.setUp = setUp return cls raise TypeError('Can only decorate subclasses of unittest.TestCase') def decorate_callable(self, func): if asyncio.iscoroutinefunction(func): # If the inner function is an async function, we must execute async # as well so that the `with` statement executes at the right time. @wraps(func) async def inner(*args, **kwargs): with self as context: if self.kwarg_name: kwargs[self.kwarg_name] = context return await func(*args, **kwargs) else: @wraps(func) def inner(*args, **kwargs): with self as context: if self.kwarg_name: kwargs[self.kwarg_name] = context return func(*args, **kwargs) return inner def __call__(self, decorated): if isinstance(decorated, type): return self.decorate_class(decorated) elif callable(decorated): return self.decorate_callable(decorated) raise TypeError('Cannot decorate object of type %s' % type(decorated)) class override_settings(TestContextDecorator): """ Act as either a decorator or a context manager. If it's a decorator, take a function and return a wrapped function. If it's a contextmanager, use it with the ``with`` statement. In either event, entering/exiting are called before and after, respectively, the function/block is executed. """ enable_exception = None def __init__(self, **kwargs): self.options = kwargs super().__init__() def enable(self): # Keep this code at the beginning to leave the settings unchanged # in case it raises an exception because INSTALLED_APPS is invalid. if 'INSTALLED_APPS' in self.options: try: apps.set_installed_apps(self.options['INSTALLED_APPS']) except Exception: apps.unset_installed_apps() raise override = UserSettingsHolder(settings._wrapped) for key, new_value in self.options.items(): setattr(override, key, new_value) self.wrapped = settings._wrapped settings._wrapped = override for key, new_value in self.options.items(): try: setting_changed.send( sender=settings._wrapped.__class__, setting=key, value=new_value, enter=True, ) except Exception as exc: self.enable_exception = exc self.disable() def disable(self): if 'INSTALLED_APPS' in self.options: apps.unset_installed_apps() settings._wrapped = self.wrapped del self.wrapped responses = [] for key in self.options: new_value = getattr(settings, key, None) responses_for_setting = setting_changed.send_robust( sender=settings._wrapped.__class__, setting=key, value=new_value, enter=False, ) responses.extend(responses_for_setting) if self.enable_exception is not None: exc = self.enable_exception self.enable_exception = None raise exc for _, response in responses: if isinstance(response, Exception): raise response def save_options(self, test_func): if test_func._overridden_settings is None: test_func._overridden_settings = self.options else: # Duplicate dict to prevent subclasses from altering their parent. test_func._overridden_settings = { **test_func._overridden_settings, **self.options, } def decorate_class(self, cls): from django.test import SimpleTestCase if not issubclass(cls, SimpleTestCase): raise ValueError( "Only subclasses of Django SimpleTestCase can be decorated " "with override_settings") self.save_options(cls) return cls class modify_settings(override_settings): """ Like override_settings, but makes it possible to append, prepend, or remove items instead of redefining the entire list. """ def __init__(self, *args, **kwargs): if args: # Hack used when instantiating from SimpleTestCase.setUpClass. assert not kwargs self.operations = args[0] else: assert not args self.operations = list(kwargs.items()) super(override_settings, self).__init__() def save_options(self, test_func): if test_func._modified_settings is None: test_func._modified_settings = self.operations else: # Duplicate list to prevent subclasses from altering their parent. test_func._modified_settings = list( test_func._modified_settings) + self.operations def enable(self): self.options = {} for name, operations in self.operations: try: # When called from SimpleTestCase.setUpClass, values may be # overridden several times; cumulate changes. value = self.options[name] except KeyError: value = list(getattr(settings, name, [])) for action, items in operations.items(): # items my be a single value or an iterable. if isinstance(items, str): items = [items] if action == 'append': value = value + [item for item in items if item not in value] elif action == 'prepend': value = [item for item in items if item not in value] + value elif action == 'remove': value = [item for item in value if item not in items] else: raise ValueError("Unsupported action: %s" % action) self.options[name] = value super().enable() class override_system_checks(TestContextDecorator): """ Act as a decorator. Override list of registered system checks. Useful when you override `INSTALLED_APPS`, e.g. if you exclude `auth` app, you also need to exclude its system checks. """ def __init__(self, new_checks, deployment_checks=None): from django.core.checks.registry import registry self.registry = registry self.new_checks = new_checks self.deployment_checks = deployment_checks super().__init__() def enable(self): self.old_checks = self.registry.registered_checks self.registry.registered_checks = set() for check in self.new_checks: self.registry.register(check, *getattr(check, 'tags', ())) self.old_deployment_checks = self.registry.deployment_checks if self.deployment_checks is not None: self.registry.deployment_checks = set() for check in self.deployment_checks: self.registry.register(check, *getattr(check, 'tags', ()), deploy=True) def disable(self): self.registry.registered_checks = self.old_checks self.registry.deployment_checks = self.old_deployment_checks def compare_xml(want, got): """ Try to do a 'xml-comparison' of want and got. Plain string comparison doesn't always work because, for example, attribute ordering should not be important. Ignore comment nodes, processing instructions, document type node, and leading and trailing whitespaces. Based on https://github.com/lxml/lxml/blob/master/src/lxml/doctestcompare.py """ _norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+') def norm_whitespace(v): return _norm_whitespace_re.sub(' ', v) def child_text(element): return ''.join(c.data for c in element.childNodes if c.nodeType == Node.TEXT_NODE) def children(element): return [c for c in element.childNodes if c.nodeType == Node.ELEMENT_NODE] def norm_child_text(element): return norm_whitespace(child_text(element)) def attrs_dict(element): return dict(element.attributes.items()) def check_element(want_element, got_element): if want_element.tagName != got_element.tagName: return False if norm_child_text(want_element) != norm_child_text(got_element): return False if attrs_dict(want_element) != attrs_dict(got_element): return False want_children = children(want_element) got_children = children(got_element) if len(want_children) != len(got_children): return False return all(check_element(want, got) for want, got in zip(want_children, got_children)) def first_node(document): for node in document.childNodes: if node.nodeType not in ( Node.COMMENT_NODE, Node.DOCUMENT_TYPE_NODE, Node.PROCESSING_INSTRUCTION_NODE, ): return node want = want.strip().replace('\\n', '\n') got = got.strip().replace('\\n', '\n') # If the string is not a complete xml document, we may need to add a # root element. This allow us to compare fragments, like "<foo/><bar/>" if not want.startswith('<?xml'): wrapper = '<root>%s</root>' want = wrapper % want got = wrapper % got # Parse the want and got strings, and compare the parsings. want_root = first_node(parseString(want)) got_root = first_node(parseString(got)) return check_element(want_root, got_root) class CaptureQueriesContext: """ Context manager that captures queries executed by the specified connection. """ def __init__(self, connection): self.connection = connection def __iter__(self): return iter(self.captured_queries) def __getitem__(self, index): return self.captured_queries[index] def __len__(self): return len(self.captured_queries) @property def captured_queries(self): return self.connection.queries[self.initial_queries:self.final_queries] def __enter__(self): self.force_debug_cursor = self.connection.force_debug_cursor self.connection.force_debug_cursor = True # Run any initialization queries if needed so that they won't be # included as part of the count. self.connection.ensure_connection() self.initial_queries = len(self.connection.queries_log) self.final_queries = None request_started.disconnect(reset_queries) return self def __exit__(self, exc_type, exc_value, traceback): self.connection.force_debug_cursor = self.force_debug_cursor request_started.connect(reset_queries) if exc_type is not None: return self.final_queries = len(self.connection.queries_log) class ignore_warnings(TestContextDecorator): def __init__(self, **kwargs): self.ignore_kwargs = kwargs if 'message' in self.ignore_kwargs or 'module' in self.ignore_kwargs: self.filter_func = warnings.filterwarnings else: self.filter_func = warnings.simplefilter super().__init__() def enable(self): self.catch_warnings = warnings.catch_warnings() self.catch_warnings.__enter__() self.filter_func('ignore', **self.ignore_kwargs) def disable(self): self.catch_warnings.__exit__(*sys.exc_info()) # On OSes that don't provide tzset (Windows), we can't set the timezone # in which the program runs. As a consequence, we must skip tests that # don't enforce a specific timezone (with timezone.override or equivalent), # or attempt to interpret naive datetimes in the default timezone. requires_tz_support = skipUnless( TZ_SUPPORT, "This test relies on the ability to run a program in an arbitrary " "time zone, but your operating system isn't able to do that." ) @contextmanager def extend_sys_path(*paths): """Context manager to temporarily add paths to sys.path.""" _orig_sys_path = sys.path[:] sys.path.extend(paths) try: yield finally: sys.path = _orig_sys_path @contextmanager def isolate_lru_cache(lru_cache_object): """Clear the cache of an LRU cache object on entering and exiting.""" lru_cache_object.cache_clear() try: yield finally: lru_cache_object.cache_clear() @contextmanager def captured_output(stream_name): """Return a context manager used by captured_stdout/stdin/stderr that temporarily replaces the sys stream *stream_name* with a StringIO. Note: This function and the following ``captured_std*`` are copied from CPython's ``test.support`` module.""" orig_stdout = getattr(sys, stream_name) setattr(sys, stream_name, StringIO()) try: yield getattr(sys, stream_name) finally: setattr(sys, stream_name, orig_stdout) def captured_stdout(): """Capture the output of sys.stdout: with captured_stdout() as stdout: print("hello") self.assertEqual(stdout.getvalue(), "hello\n") """ return captured_output("stdout") def captured_stderr(): """Capture the output of sys.stderr: with captured_stderr() as stderr: print("hello", file=sys.stderr) self.assertEqual(stderr.getvalue(), "hello\n") """ return captured_output("stderr") def captured_stdin(): """Capture the input to sys.stdin: with captured_stdin() as stdin: stdin.write('hello\n') stdin.seek(0) # call test code that consumes from sys.stdin captured = input() self.assertEqual(captured, "hello") """ return captured_output("stdin") @contextmanager def freeze_time(t): """ Context manager to temporarily freeze time.time(). This temporarily modifies the time function of the time module. Modules which import the time function directly (e.g. `from time import time`) won't be affected This isn't meant as a public API, but helps reduce some repetitive code in Django's test suite. """ _real_time = time.time time.time = lambda: t try: yield finally: time.time = _real_time def require_jinja2(test_func): """ Decorator to enable a Jinja2 template engine in addition to the regular Django template engine for a test or skip it if Jinja2 isn't available. """ test_func = skipIf(jinja2 is None, "this test requires jinja2")(test_func) return override_settings(TEMPLATES=[{ 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'APP_DIRS': True, }, { 'BACKEND': 'django.template.backends.jinja2.Jinja2', 'APP_DIRS': True, 'OPTIONS': {'keep_trailing_newline': True}, }])(test_func) class override_script_prefix(TestContextDecorator): """Decorator or context manager to temporary override the script prefix.""" def __init__(self, prefix): self.prefix = prefix super().__init__() def enable(self): self.old_prefix = get_script_prefix() set_script_prefix(self.prefix) def disable(self): set_script_prefix(self.old_prefix) class LoggingCaptureMixin: """ Capture the output from the 'django' logger and store it on the class's logger_output attribute. """ def setUp(self): self.logger = logging.getLogger('django') self.old_stream = self.logger.handlers[0].stream self.logger_output = StringIO() self.logger.handlers[0].stream = self.logger_output def tearDown(self): self.logger.handlers[0].stream = self.old_stream class isolate_apps(TestContextDecorator): """ Act as either a decorator or a context manager to register models defined in its wrapped context to an isolated registry. The list of installed apps the isolated registry should contain must be passed as arguments. Two optional keyword arguments can be specified: `attr_name`: attribute assigned the isolated registry if used as a class decorator. `kwarg_name`: keyword argument passing the isolated registry if used as a function decorator. """ def __init__(self, *installed_apps, **kwargs): self.installed_apps = installed_apps super().__init__(**kwargs) def enable(self): self.old_apps = Options.default_apps apps = Apps(self.installed_apps) setattr(Options, 'default_apps', apps) return apps def disable(self): setattr(Options, 'default_apps', self.old_apps) class TimeKeeper: def __init__(self): self.records = collections.defaultdict(list) @contextmanager def timed(self, name): self.records[name] start_time = time.perf_counter() try: yield finally: end_time = time.perf_counter() - start_time self.records[name].append(end_time) def print_results(self): for name, end_times in self.records.items(): for record_time in end_times: record = '%s took %.3fs' % (name, record_time) sys.stderr.write(record + os.linesep) class NullTimeKeeper: @contextmanager def timed(self, name): yield def print_results(self): pass def tag(*tags): """Decorator to add tags to a test class or method.""" def decorator(obj): if hasattr(obj, 'tags'): obj.tags = obj.tags.union(tags) else: setattr(obj, 'tags', set(tags)) return obj return decorator @contextmanager def register_lookup(field, *lookups, lookup_name=None): """ Context manager to temporarily register lookups on a model field using lookup_name (or the lookup's lookup_name if not provided). """ try: for lookup in lookups: field.register_lookup(lookup, lookup_name) yield finally: for lookup in lookups: field._unregister_lookup(lookup, lookup_name)
d80e4e20f95e6232ac909572a71d17227fe810663934c5fbaac3e9671f363d37
import functools import re import sys import types import warnings from pathlib import Path from django.conf import settings from django.http import Http404, HttpResponse, HttpResponseNotFound from django.template import Context, Engine, TemplateDoesNotExist from django.template.defaultfilters import pprint from django.urls import resolve from django.utils import timezone from django.utils.datastructures import MultiValueDict from django.utils.encoding import force_str from django.utils.module_loading import import_string from django.utils.regex_helper import _lazy_re_compile from django.utils.version import get_docs_version # Minimal Django templates engine to render the error templates # regardless of the project's TEMPLATES setting. Templates are # read directly from the filesystem so that the error handler # works even if the template loader is broken. DEBUG_ENGINE = Engine( debug=True, libraries={'i18n': 'django.templatetags.i18n'}, ) def builtin_template_path(name): """ Return a path to a builtin template. Avoid calling this function at the module level or in a class-definition because __file__ may not exist, e.g. in frozen environments. """ return Path(__file__).parent / 'templates' / name class ExceptionCycleWarning(UserWarning): pass class CallableSettingWrapper: """ Object to wrap callable appearing in settings. * Not to call in the debug page (#21345). * Not to break the debug page if the callable forbidding to set attributes (#23070). """ def __init__(self, callable_setting): self._wrapped = callable_setting def __repr__(self): return repr(self._wrapped) def technical_500_response(request, exc_type, exc_value, tb, status_code=500): """ Create a technical server error response. The last three arguments are the values returned from sys.exc_info() and friends. """ reporter = get_exception_reporter_class(request)(request, exc_type, exc_value, tb) if request.accepts('text/html'): html = reporter.get_traceback_html() return HttpResponse(html, status=status_code, content_type='text/html') else: text = reporter.get_traceback_text() return HttpResponse(text, status=status_code, content_type='text/plain; charset=utf-8') @functools.lru_cache() def get_default_exception_reporter_filter(): # Instantiate the default filter for the first time and cache it. return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)() def get_exception_reporter_filter(request): default_filter = get_default_exception_reporter_filter() return getattr(request, 'exception_reporter_filter', default_filter) def get_exception_reporter_class(request): default_exception_reporter_class = import_string(settings.DEFAULT_EXCEPTION_REPORTER) return getattr(request, 'exception_reporter_class', default_exception_reporter_class) class SafeExceptionReporterFilter: """ Use annotations made by the sensitive_post_parameters and sensitive_variables decorators to filter out sensitive information. """ cleansed_substitute = '********************' hidden_settings = _lazy_re_compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE', flags=re.I) def cleanse_setting(self, key, value): """ Cleanse an individual setting key/value of sensitive content. If the value is a dictionary, recursively cleanse the keys in that dictionary. """ try: is_sensitive = self.hidden_settings.search(key) except TypeError: is_sensitive = False if is_sensitive: cleansed = self.cleansed_substitute elif isinstance(value, dict): cleansed = {k: self.cleanse_setting(k, v) for k, v in value.items()} elif isinstance(value, list): cleansed = [self.cleanse_setting('', v) for v in value] elif isinstance(value, tuple): cleansed = tuple([self.cleanse_setting('', v) for v in value]) else: cleansed = value if callable(cleansed): cleansed = CallableSettingWrapper(cleansed) return cleansed def get_safe_settings(self): """ Return a dictionary of the settings module with values of sensitive settings replaced with stars (*********). """ settings_dict = {} for k in dir(settings): if k.isupper(): settings_dict[k] = self.cleanse_setting(k, getattr(settings, k)) return settings_dict def get_safe_request_meta(self, request): """ Return a dictionary of request.META with sensitive values redacted. """ if not hasattr(request, 'META'): return {} return {k: self.cleanse_setting(k, v) for k, v in request.META.items()} def is_active(self, request): """ This filter is to add safety in production environments (i.e. DEBUG is False). If DEBUG is True then your site is not safe anyway. This hook is provided as a convenience to easily activate or deactivate the filter on a per request basis. """ return settings.DEBUG is False def get_cleansed_multivaluedict(self, request, multivaluedict): """ Replace the keys in a MultiValueDict marked as sensitive with stars. This mitigates leaking sensitive POST parameters if something like request.POST['nonexistent_key'] throws an exception (#21098). """ sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', []) if self.is_active(request) and sensitive_post_parameters: multivaluedict = multivaluedict.copy() for param in sensitive_post_parameters: if param in multivaluedict: multivaluedict[param] = self.cleansed_substitute return multivaluedict def get_post_parameters(self, request): """ Replace the values of POST parameters marked as sensitive with stars (*********). """ if request is None: return {} else: sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', []) if self.is_active(request) and sensitive_post_parameters: cleansed = request.POST.copy() if sensitive_post_parameters == '__ALL__': # Cleanse all parameters. for k in cleansed: cleansed[k] = self.cleansed_substitute return cleansed else: # Cleanse only the specified parameters. for param in sensitive_post_parameters: if param in cleansed: cleansed[param] = self.cleansed_substitute return cleansed else: return request.POST def cleanse_special_types(self, request, value): try: # If value is lazy or a complex object of another kind, this check # might raise an exception. isinstance checks that lazy # MultiValueDicts will have a return value. is_multivalue_dict = isinstance(value, MultiValueDict) except Exception as e: return '{!r} while evaluating {!r}'.format(e, value) if is_multivalue_dict: # Cleanse MultiValueDicts (request.POST is the one we usually care about) value = self.get_cleansed_multivaluedict(request, value) return value def get_traceback_frame_variables(self, request, tb_frame): """ Replace the values of variables marked as sensitive with stars (*********). """ # Loop through the frame's callers to see if the sensitive_variables # decorator was used. current_frame = tb_frame.f_back sensitive_variables = None while current_frame is not None: if (current_frame.f_code.co_name == 'sensitive_variables_wrapper' and 'sensitive_variables_wrapper' in current_frame.f_locals): # The sensitive_variables decorator was used, so we take note # of the sensitive variables' names. wrapper = current_frame.f_locals['sensitive_variables_wrapper'] sensitive_variables = getattr(wrapper, 'sensitive_variables', None) break current_frame = current_frame.f_back cleansed = {} if self.is_active(request) and sensitive_variables: if sensitive_variables == '__ALL__': # Cleanse all variables for name in tb_frame.f_locals: cleansed[name] = self.cleansed_substitute else: # Cleanse specified variables for name, value in tb_frame.f_locals.items(): if name in sensitive_variables: value = self.cleansed_substitute else: value = self.cleanse_special_types(request, value) cleansed[name] = value else: # Potentially cleanse the request and any MultiValueDicts if they # are one of the frame variables. for name, value in tb_frame.f_locals.items(): cleansed[name] = self.cleanse_special_types(request, value) if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper' and 'sensitive_variables_wrapper' in tb_frame.f_locals): # For good measure, obfuscate the decorated function's arguments in # the sensitive_variables decorator's frame, in case the variables # associated with those arguments were meant to be obfuscated from # the decorated function's frame. cleansed['func_args'] = self.cleansed_substitute cleansed['func_kwargs'] = self.cleansed_substitute return cleansed.items() class ExceptionReporter: """Organize and coordinate reporting on exceptions.""" @property def html_template_path(self): return builtin_template_path('technical_500.html') @property def text_template_path(self): return builtin_template_path('technical_500.txt') def __init__(self, request, exc_type, exc_value, tb, is_email=False): self.request = request self.filter = get_exception_reporter_filter(self.request) self.exc_type = exc_type self.exc_value = exc_value self.tb = tb self.is_email = is_email self.template_info = getattr(self.exc_value, 'template_debug', None) self.template_does_not_exist = False self.postmortem = None def _get_raw_insecure_uri(self): """ Return an absolute URI from variables available in this request. Skip allowed hosts protection, so may return insecure URI. """ return '{scheme}://{host}{path}'.format( scheme=self.request.scheme, host=self.request._get_raw_host(), path=self.request.get_full_path(), ) def get_traceback_data(self): """Return a dictionary containing traceback information.""" if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist): self.template_does_not_exist = True self.postmortem = self.exc_value.chain or [self.exc_value] frames = self.get_traceback_frames() for i, frame in enumerate(frames): if 'vars' in frame: frame_vars = [] for k, v in frame['vars']: v = pprint(v) # Trim large blobs of data if len(v) > 4096: v = '%s… <trimmed %d bytes string>' % (v[0:4096], len(v)) frame_vars.append((k, v)) frame['vars'] = frame_vars frames[i] = frame unicode_hint = '' if self.exc_type and issubclass(self.exc_type, UnicodeError): start = getattr(self.exc_value, 'start', None) end = getattr(self.exc_value, 'end', None) if start is not None and end is not None: unicode_str = self.exc_value.args[1] unicode_hint = force_str( unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))], 'ascii', errors='replace' ) from django import get_version if self.request is None: user_str = None else: try: user_str = str(self.request.user) except Exception: # request.user may raise OperationalError if the database is # unavailable, for example. user_str = '[unable to retrieve the current user]' c = { 'is_email': self.is_email, 'unicode_hint': unicode_hint, 'frames': frames, 'request': self.request, 'request_meta': self.filter.get_safe_request_meta(self.request), 'user_str': user_str, 'filtered_POST_items': list(self.filter.get_post_parameters(self.request).items()), 'settings': self.filter.get_safe_settings(), 'sys_executable': sys.executable, 'sys_version_info': '%d.%d.%d' % sys.version_info[0:3], 'server_time': timezone.now(), 'django_version_info': get_version(), 'sys_path': sys.path, 'template_info': self.template_info, 'template_does_not_exist': self.template_does_not_exist, 'postmortem': self.postmortem, } if self.request is not None: c['request_GET_items'] = self.request.GET.items() c['request_FILES_items'] = self.request.FILES.items() c['request_COOKIES_items'] = self.request.COOKIES.items() c['request_insecure_uri'] = self._get_raw_insecure_uri() # Check whether exception info is available if self.exc_type: c['exception_type'] = self.exc_type.__name__ if self.exc_value: c['exception_value'] = str(self.exc_value) if frames: c['lastframe'] = frames[-1] return c def get_traceback_html(self): """Return HTML version of debug 500 HTTP error page.""" with self.html_template_path.open(encoding='utf-8') as fh: t = DEBUG_ENGINE.from_string(fh.read()) c = Context(self.get_traceback_data(), use_l10n=False) return t.render(c) def get_traceback_text(self): """Return plain text version of debug 500 HTTP error page.""" with self.text_template_path.open(encoding='utf-8') as fh: t = DEBUG_ENGINE.from_string(fh.read()) c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False) return t.render(c) def _get_source(self, filename, loader, module_name): source = None if hasattr(loader, 'get_source'): try: source = loader.get_source(module_name) except ImportError: pass if source is not None: source = source.splitlines() if source is None: try: with open(filename, 'rb') as fp: source = fp.read().splitlines() except OSError: pass return source def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None): """ Return context_lines before and after lineno from file. Return (pre_context_lineno, pre_context, context_line, post_context). """ source = self._get_source(filename, loader, module_name) if source is None: return None, [], None, [] # If we just read the source from a file, or if the loader did not # apply tokenize.detect_encoding to decode the source into a # string, then we should do that ourselves. if isinstance(source[0], bytes): encoding = 'ascii' for line in source[:2]: # File coding may be specified. Match pattern from PEP-263 # (https://www.python.org/dev/peps/pep-0263/) match = re.search(br'coding[:=]\s*([-\w.]+)', line) if match: encoding = match[1].decode('ascii') break source = [str(sline, encoding, 'replace') for sline in source] lower_bound = max(0, lineno - context_lines) upper_bound = lineno + context_lines try: pre_context = source[lower_bound:lineno] context_line = source[lineno] post_context = source[lineno + 1:upper_bound] except IndexError: return None, [], None, [] return lower_bound, pre_context, context_line, post_context def _get_explicit_or_implicit_cause(self, exc_value): explicit = getattr(exc_value, '__cause__', None) suppress_context = getattr(exc_value, '__suppress_context__', None) implicit = getattr(exc_value, '__context__', None) return explicit or (None if suppress_context else implicit) def get_traceback_frames(self): # Get the exception and all its causes exceptions = [] exc_value = self.exc_value while exc_value: exceptions.append(exc_value) exc_value = self._get_explicit_or_implicit_cause(exc_value) if exc_value in exceptions: warnings.warn( "Cycle in the exception chain detected: exception '%s' " "encountered again." % exc_value, ExceptionCycleWarning, ) # Avoid infinite loop if there's a cyclic reference (#29393). break frames = [] # No exceptions were supplied to ExceptionReporter if not exceptions: return frames # In case there's just one exception, take the traceback from self.tb exc_value = exceptions.pop() tb = self.tb if not exceptions else exc_value.__traceback__ while True: frames.extend(self.get_exception_traceback_frames(exc_value, tb)) try: exc_value = exceptions.pop() except IndexError: break tb = exc_value.__traceback__ return frames def get_exception_traceback_frames(self, exc_value, tb): exc_cause = self._get_explicit_or_implicit_cause(exc_value) exc_cause_explicit = getattr(exc_value, '__cause__', True) if tb is None: yield { 'exc_cause': exc_cause, 'exc_cause_explicit': exc_cause_explicit, 'tb': None, 'type': 'user', } while tb is not None: # Support for __traceback_hide__ which is used by a few libraries # to hide internal frames. if tb.tb_frame.f_locals.get('__traceback_hide__'): tb = tb.tb_next continue filename = tb.tb_frame.f_code.co_filename function = tb.tb_frame.f_code.co_name lineno = tb.tb_lineno - 1 loader = tb.tb_frame.f_globals.get('__loader__') module_name = tb.tb_frame.f_globals.get('__name__') or '' pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file( filename, lineno, 7, loader, module_name, ) if pre_context_lineno is None: pre_context_lineno = lineno pre_context = [] context_line = '<source code not available>' post_context = [] yield { 'exc_cause': exc_cause, 'exc_cause_explicit': exc_cause_explicit, 'tb': tb, 'type': 'django' if module_name.startswith('django.') else 'user', 'filename': filename, 'function': function, 'lineno': lineno + 1, 'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame), 'id': id(tb), 'pre_context': pre_context, 'context_line': context_line, 'post_context': post_context, 'pre_context_lineno': pre_context_lineno + 1, } tb = tb.tb_next def technical_404_response(request, exception): """Create a technical 404 error response. `exception` is the Http404.""" try: error_url = exception.args[0]['path'] except (IndexError, TypeError, KeyError): error_url = request.path_info[1:] # Trim leading slash try: tried = exception.args[0]['tried'] except (IndexError, TypeError, KeyError): resolved = True tried = request.resolver_match.tried if request.resolver_match else None else: resolved = False if (not tried or ( # empty URLconf request.path == '/' and len(tried) == 1 and # default URLconf len(tried[0]) == 1 and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin' )): return default_urlconf(request) urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF) if isinstance(urlconf, types.ModuleType): urlconf = urlconf.__name__ caller = '' try: resolver_match = resolve(request.path) except Http404: pass else: obj = resolver_match.func if hasattr(obj, 'view_class'): caller = obj.view_class elif hasattr(obj, '__name__'): caller = obj.__name__ elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'): caller = obj.__class__.__name__ if hasattr(obj, '__module__'): module = obj.__module__ caller = '%s.%s' % (module, caller) with builtin_template_path('technical_404.html').open(encoding='utf-8') as fh: t = DEBUG_ENGINE.from_string(fh.read()) reporter_filter = get_default_exception_reporter_filter() c = Context({ 'urlconf': urlconf, 'root_urlconf': settings.ROOT_URLCONF, 'request_path': error_url, 'urlpatterns': tried, 'resolved': resolved, 'reason': str(exception), 'request': request, 'settings': reporter_filter.get_safe_settings(), 'raising_view_name': caller, }) return HttpResponseNotFound(t.render(c), content_type='text/html') def default_urlconf(request): """Create an empty URLconf 404 error response.""" with builtin_template_path('default_urlconf.html').open(encoding='utf-8') as fh: t = DEBUG_ENGINE.from_string(fh.read()) c = Context({ 'version': get_docs_version(), }) return HttpResponse(t.render(c), content_type='text/html')
8fa2e05adaeed8ce0c9bbe74377dcbfe6bbea948aec5f7639888dbb49090bda1
from django.conf import settings from django.http import HttpResponseForbidden from django.template import Context, Engine, TemplateDoesNotExist, loader from django.utils.translation import gettext as _ from django.utils.version import get_docs_version # We include the template inline since we need to be able to reliably display # this error message, especially for the sake of developers, and there isn't any # other way of making it available independent of what is in the settings file. # Only the text appearing with DEBUG=False is translated. Normal translation # tags cannot be used with this inline templates as makemessages would not be # able to discover the strings. CSRF_FAILURE_TEMPLATE = """ <!DOCTYPE html> <html lang="en"> <head> <meta http-equiv="content-type" content="text/html; charset=utf-8"> <meta name="robots" content="NONE,NOARCHIVE"> <title>403 Forbidden</title> <style type="text/css"> html * { padding:0; margin:0; } body * { padding:10px 20px; } body * * { padding:0; } body { font:small sans-serif; background:#eee; color:#000; } body>div { border-bottom:1px solid #ddd; } h1 { font-weight:normal; margin-bottom:.4em; } h1 span { font-size:60%; color:#666; font-weight:normal; } #info { background:#f6f6f6; } #info ul { margin: 0.5em 4em; } #info p, #summary p { padding-top:10px; } #summary { background: #ffc; } #explanation { background:#eee; border-bottom: 0px none; } </style> </head> <body> <div id="summary"> <h1>{{ title }} <span>(403)</span></h1> <p>{{ main }}</p> {% if no_referer %} <p>{{ no_referer1 }}</p> <p>{{ no_referer2 }}</p> <p>{{ no_referer3 }}</p> {% endif %} {% if no_cookie %} <p>{{ no_cookie1 }}</p> <p>{{ no_cookie2 }}</p> {% endif %} </div> {% if DEBUG %} <div id="info"> <h2>Help</h2> {% if reason %} <p>Reason given for failure:</p> <pre> {{ reason }} </pre> {% endif %} <p>In general, this can occur when there is a genuine Cross Site Request Forgery, or when <a href="https://docs.djangoproject.com/en/{{ docs_version }}/ref/csrf/">Django’s CSRF mechanism</a> has not been used correctly. For POST forms, you need to ensure:</p> <ul> <li>Your browser is accepting cookies.</li> <li>The view function passes a <code>request</code> to the template’s <a href="https://docs.djangoproject.com/en/dev/topics/templates/#django.template.backends.base.Template.render"><code>render</code></a> method.</li> <li>In the template, there is a <code>{% templatetag openblock %} csrf_token {% templatetag closeblock %}</code> template tag inside each POST form that targets an internal URL.</li> <li>If you are not using <code>CsrfViewMiddleware</code>, then you must use <code>csrf_protect</code> on any views that use the <code>csrf_token</code> template tag, as well as those that accept the POST data.</li> <li>The form has a valid CSRF token. After logging in in another browser tab or hitting the back button after a login, you may need to reload the page with the form, because the token is rotated after a login.</li> </ul> <p>You’re seeing the help section of this page because you have <code>DEBUG = True</code> in your Django settings file. Change that to <code>False</code>, and only the initial error message will be displayed. </p> <p>You can customize this page using the CSRF_FAILURE_VIEW setting.</p> </div> {% else %} <div id="explanation"> <p><small>{{ more }}</small></p> </div> {% endif %} </body> </html> """ CSRF_FAILURE_TEMPLATE_NAME = "403_csrf.html" def csrf_failure(request, reason="", template_name=CSRF_FAILURE_TEMPLATE_NAME): """ Default view used when request fails CSRF protection """ from django.middleware.csrf import REASON_NO_CSRF_COOKIE, REASON_NO_REFERER c = { 'title': _("Forbidden"), 'main': _("CSRF verification failed. Request aborted."), 'reason': reason, 'no_referer': reason == REASON_NO_REFERER, 'no_referer1': _( 'You are seeing this message because this HTTPS site requires a ' '“Referer header” to be sent by your web browser, but none was ' 'sent. This header is required for security reasons, to ensure ' 'that your browser is not being hijacked by third parties.'), 'no_referer2': _( 'If you have configured your browser to disable “Referer” headers, ' 'please re-enable them, at least for this site, or for HTTPS ' 'connections, or for “same-origin” requests.'), 'no_referer3': _( 'If you are using the <meta name="referrer" ' 'content=\"no-referrer\"> tag or including the “Referrer-Policy: ' 'no-referrer” header, please remove them. The CSRF protection ' 'requires the “Referer” header to do strict referer checking. If ' 'you’re concerned about privacy, use alternatives like ' '<a rel=\"noreferrer\" …> for links to third-party sites.'), 'no_cookie': reason == REASON_NO_CSRF_COOKIE, 'no_cookie1': _( "You are seeing this message because this site requires a CSRF " "cookie when submitting forms. This cookie is required for " "security reasons, to ensure that your browser is not being " "hijacked by third parties."), 'no_cookie2': _( 'If you have configured your browser to disable cookies, please ' 're-enable them, at least for this site, or for “same-origin” ' 'requests.'), 'DEBUG': settings.DEBUG, 'docs_version': get_docs_version(), 'more': _("More information is available with DEBUG=True."), } try: t = loader.get_template(template_name) except TemplateDoesNotExist: if template_name == CSRF_FAILURE_TEMPLATE_NAME: # If the default template doesn't exist, use the string template. t = Engine().from_string(CSRF_FAILURE_TEMPLATE) c = Context(c) else: # Raise if a developer-specified template doesn't exist. raise return HttpResponseForbidden(t.render(c), content_type='text/html')
b483c256b2288d5e333444c55593eb8d9c05337ae9d90d5b23f2a31b030466d7
""" Settings and configuration for Django. Read values from the module specified by the DJANGO_SETTINGS_MODULE environment variable, and then from django.conf.global_settings; see the global_settings.py for a list of all possible variables. """ import importlib import os import time import warnings from pathlib import Path from django.conf import global_settings from django.core.exceptions import ImproperlyConfigured from django.utils.deprecation import RemovedInDjango50Warning from django.utils.functional import LazyObject, empty ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE" class SettingsReference(str): """ String subclass which references a current settings value. It's treated as the value in memory but serializes to a settings.NAME attribute reference. """ def __new__(self, value, setting_name): return str.__new__(self, value) def __init__(self, value, setting_name): self.setting_name = setting_name class LazySettings(LazyObject): """ A lazy proxy for either global Django settings or a custom settings object. The user can manually configure settings prior to using them. Otherwise, Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE. """ def _setup(self, name=None): """ Load the settings module pointed to by the environment variable. This is used the first time settings are needed, if the user hasn't configured settings manually. """ settings_module = os.environ.get(ENVIRONMENT_VARIABLE) if not settings_module: desc = ("setting %s" % name) if name else "settings" raise ImproperlyConfigured( "Requested %s, but settings are not configured. " "You must either define the environment variable %s " "or call settings.configure() before accessing settings." % (desc, ENVIRONMENT_VARIABLE)) self._wrapped = Settings(settings_module) def __repr__(self): # Hardcode the class name as otherwise it yields 'Settings'. if self._wrapped is empty: return '<LazySettings [Unevaluated]>' return '<LazySettings "%(settings_module)s">' % { 'settings_module': self._wrapped.SETTINGS_MODULE, } def __getattr__(self, name): """Return the value of a setting and cache it in self.__dict__.""" if self._wrapped is empty: self._setup(name) val = getattr(self._wrapped, name) # Special case some settings which require further modification. # This is done here for performance reasons so the modified value is cached. if name in {'MEDIA_URL', 'STATIC_URL'} and val is not None: val = self._add_script_prefix(val) elif name == 'SECRET_KEY' and not val: raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.") self.__dict__[name] = val return val def __setattr__(self, name, value): """ Set the value of setting. Clear all cached values if _wrapped changes (@override_settings does this) or clear single values when set. """ if name == '_wrapped': self.__dict__.clear() else: self.__dict__.pop(name, None) super().__setattr__(name, value) def __delattr__(self, name): """Delete a setting and clear it from cache if needed.""" super().__delattr__(name) self.__dict__.pop(name, None) def configure(self, default_settings=global_settings, **options): """ Called to manually configure the settings. The 'default_settings' parameter sets where to retrieve any unspecified values from (its argument must support attribute access (__getattr__)). """ if self._wrapped is not empty: raise RuntimeError('Settings already configured.') holder = UserSettingsHolder(default_settings) for name, value in options.items(): if not name.isupper(): raise TypeError('Setting %r must be uppercase.' % name) setattr(holder, name, value) self._wrapped = holder @staticmethod def _add_script_prefix(value): """ Add SCRIPT_NAME prefix to relative paths. Useful when the app is being served at a subpath and manually prefixing subpath to STATIC_URL and MEDIA_URL in settings is inconvenient. """ # Don't apply prefix to absolute paths and URLs. if value.startswith(('http://', 'https://', '/')): return value from django.urls import get_script_prefix return '%s%s' % (get_script_prefix(), value) @property def configured(self): """Return True if the settings have already been configured.""" return self._wrapped is not empty class Settings: def __init__(self, settings_module): # update this dict from global settings (but only for ALL_CAPS settings) for setting in dir(global_settings): if setting.isupper(): setattr(self, setting, getattr(global_settings, setting)) # store the settings module in case someone later cares self.SETTINGS_MODULE = settings_module mod = importlib.import_module(self.SETTINGS_MODULE) tuple_settings = ( 'ALLOWED_HOSTS', "INSTALLED_APPS", "TEMPLATE_DIRS", "LOCALE_PATHS", ) self._explicit_settings = set() for setting in dir(mod): if setting.isupper(): setting_value = getattr(mod, setting) if (setting in tuple_settings and not isinstance(setting_value, (list, tuple))): raise ImproperlyConfigured("The %s setting must be a list or a tuple." % setting) setattr(self, setting, setting_value) self._explicit_settings.add(setting) if self.USE_TZ is False and not self.is_overridden('USE_TZ'): warnings.warn( 'The default value of USE_TZ will change from False to True ' 'in Django 5.0. Set USE_TZ to False in your project settings ' 'if you want to keep the current default behavior.', category=RemovedInDjango50Warning, ) if hasattr(time, 'tzset') and self.TIME_ZONE: # When we can, attempt to validate the timezone. If we can't find # this file, no check happens and it's harmless. zoneinfo_root = Path('/usr/share/zoneinfo') zone_info_file = zoneinfo_root.joinpath(*self.TIME_ZONE.split('/')) if zoneinfo_root.exists() and not zone_info_file.exists(): raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE) # Move the time zone info into os.environ. See ticket #2315 for why # we don't do this unconditionally (breaks Windows). os.environ['TZ'] = self.TIME_ZONE time.tzset() def is_overridden(self, setting): return setting in self._explicit_settings def __repr__(self): return '<%(cls)s "%(settings_module)s">' % { 'cls': self.__class__.__name__, 'settings_module': self.SETTINGS_MODULE, } class UserSettingsHolder: """Holder for user configured settings.""" # SETTINGS_MODULE doesn't make much sense in the manually configured # (standalone) case. SETTINGS_MODULE = None def __init__(self, default_settings): """ Requests for configuration variables not in this class are satisfied from the module specified in default_settings (if possible). """ self.__dict__['_deleted'] = set() self.default_settings = default_settings def __getattr__(self, name): if not name.isupper() or name in self._deleted: raise AttributeError return getattr(self.default_settings, name) def __setattr__(self, name, value): self._deleted.discard(name) super().__setattr__(name, value) def __delattr__(self, name): self._deleted.add(name) if hasattr(self, name): super().__delattr__(name) def __dir__(self): return sorted( s for s in [*self.__dict__, *dir(self.default_settings)] if s not in self._deleted ) def is_overridden(self, setting): deleted = (setting in self._deleted) set_locally = (setting in self.__dict__) set_on_default = getattr(self.default_settings, 'is_overridden', lambda s: False)(setting) return deleted or set_locally or set_on_default def __repr__(self): return '<%(cls)s>' % { 'cls': self.__class__.__name__, } settings = LazySettings()
12a5e624f3b1181ff2473d86992f85c0e713a33240c47d6790e34c6b66ec8c10
""" Default Django settings. Override these with settings in the module pointed to by the DJANGO_SETTINGS_MODULE environment variable. """ # This is defined here as a do-nothing function because we can't import # django.utils.translation -- that module depends on the settings. def gettext_noop(s): return s #################### # CORE # #################### DEBUG = False # Whether the framework should propagate raw exceptions rather than catching # them. This is useful under some testing situations and should never be used # on a live site. DEBUG_PROPAGATE_EXCEPTIONS = False # People who get code error notifications. # In the format [('Full Name', '[email protected]'), ('Full Name', '[email protected]')] ADMINS = [] # List of IP addresses, as strings, that: # * See debug comments, when DEBUG is true # * Receive x-headers INTERNAL_IPS = [] # Hosts/domain names that are valid for this site. # "*" matches anything, ".example.com" matches example.com and all subdomains ALLOWED_HOSTS = [] # Local time zone for this installation. All choices can be found here: # https://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all # systems may support all possibilities). When USE_TZ is True, this is # interpreted as the default user time zone. TIME_ZONE = 'America/Chicago' # If you set this to True, Django will use timezone-aware datetimes. USE_TZ = False # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' # Languages we provide translations for, out of the box. LANGUAGES = [ ('af', gettext_noop('Afrikaans')), ('ar', gettext_noop('Arabic')), ('ar-dz', gettext_noop('Algerian Arabic')), ('ast', gettext_noop('Asturian')), ('az', gettext_noop('Azerbaijani')), ('bg', gettext_noop('Bulgarian')), ('be', gettext_noop('Belarusian')), ('bn', gettext_noop('Bengali')), ('br', gettext_noop('Breton')), ('bs', gettext_noop('Bosnian')), ('ca', gettext_noop('Catalan')), ('cs', gettext_noop('Czech')), ('cy', gettext_noop('Welsh')), ('da', gettext_noop('Danish')), ('de', gettext_noop('German')), ('dsb', gettext_noop('Lower Sorbian')), ('el', gettext_noop('Greek')), ('en', gettext_noop('English')), ('en-au', gettext_noop('Australian English')), ('en-gb', gettext_noop('British English')), ('eo', gettext_noop('Esperanto')), ('es', gettext_noop('Spanish')), ('es-ar', gettext_noop('Argentinian Spanish')), ('es-co', gettext_noop('Colombian Spanish')), ('es-mx', gettext_noop('Mexican Spanish')), ('es-ni', gettext_noop('Nicaraguan Spanish')), ('es-ve', gettext_noop('Venezuelan Spanish')), ('et', gettext_noop('Estonian')), ('eu', gettext_noop('Basque')), ('fa', gettext_noop('Persian')), ('fi', gettext_noop('Finnish')), ('fr', gettext_noop('French')), ('fy', gettext_noop('Frisian')), ('ga', gettext_noop('Irish')), ('gd', gettext_noop('Scottish Gaelic')), ('gl', gettext_noop('Galician')), ('he', gettext_noop('Hebrew')), ('hi', gettext_noop('Hindi')), ('hr', gettext_noop('Croatian')), ('hsb', gettext_noop('Upper Sorbian')), ('hu', gettext_noop('Hungarian')), ('hy', gettext_noop('Armenian')), ('ia', gettext_noop('Interlingua')), ('id', gettext_noop('Indonesian')), ('ig', gettext_noop('Igbo')), ('io', gettext_noop('Ido')), ('is', gettext_noop('Icelandic')), ('it', gettext_noop('Italian')), ('ja', gettext_noop('Japanese')), ('ka', gettext_noop('Georgian')), ('kab', gettext_noop('Kabyle')), ('kk', gettext_noop('Kazakh')), ('km', gettext_noop('Khmer')), ('kn', gettext_noop('Kannada')), ('ko', gettext_noop('Korean')), ('ky', gettext_noop('Kyrgyz')), ('lb', gettext_noop('Luxembourgish')), ('lt', gettext_noop('Lithuanian')), ('lv', gettext_noop('Latvian')), ('mk', gettext_noop('Macedonian')), ('ml', gettext_noop('Malayalam')), ('mn', gettext_noop('Mongolian')), ('mr', gettext_noop('Marathi')), ('my', gettext_noop('Burmese')), ('nb', gettext_noop('Norwegian Bokmål')), ('ne', gettext_noop('Nepali')), ('nl', gettext_noop('Dutch')), ('nn', gettext_noop('Norwegian Nynorsk')), ('os', gettext_noop('Ossetic')), ('pa', gettext_noop('Punjabi')), ('pl', gettext_noop('Polish')), ('pt', gettext_noop('Portuguese')), ('pt-br', gettext_noop('Brazilian Portuguese')), ('ro', gettext_noop('Romanian')), ('ru', gettext_noop('Russian')), ('sk', gettext_noop('Slovak')), ('sl', gettext_noop('Slovenian')), ('sq', gettext_noop('Albanian')), ('sr', gettext_noop('Serbian')), ('sr-latn', gettext_noop('Serbian Latin')), ('sv', gettext_noop('Swedish')), ('sw', gettext_noop('Swahili')), ('ta', gettext_noop('Tamil')), ('te', gettext_noop('Telugu')), ('tg', gettext_noop('Tajik')), ('th', gettext_noop('Thai')), ('tk', gettext_noop('Turkmen')), ('tr', gettext_noop('Turkish')), ('tt', gettext_noop('Tatar')), ('udm', gettext_noop('Udmurt')), ('uk', gettext_noop('Ukrainian')), ('ur', gettext_noop('Urdu')), ('uz', gettext_noop('Uzbek')), ('vi', gettext_noop('Vietnamese')), ('zh-hans', gettext_noop('Simplified Chinese')), ('zh-hant', gettext_noop('Traditional Chinese')), ] # Languages using BiDi (right-to-left) layout LANGUAGES_BIDI = ["he", "ar", "ar-dz", "fa", "ur"] # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True LOCALE_PATHS = [] # Settings for language cookie LANGUAGE_COOKIE_NAME = 'django_language' LANGUAGE_COOKIE_AGE = None LANGUAGE_COOKIE_DOMAIN = None LANGUAGE_COOKIE_PATH = '/' LANGUAGE_COOKIE_SECURE = False LANGUAGE_COOKIE_HTTPONLY = False LANGUAGE_COOKIE_SAMESITE = None # If you set this to True, Django will format dates, numbers and calendars # according to user current locale. USE_L10N = False # Not-necessarily-technical managers of the site. They get broken link # notifications and other various emails. MANAGERS = ADMINS # Default charset to use for all HttpResponse objects, if a MIME type isn't # manually specified. It's used to construct the Content-Type header. DEFAULT_CHARSET = 'utf-8' # Email address that error messages come from. SERVER_EMAIL = 'root@localhost' # Database connection info. If left empty, will default to the dummy backend. DATABASES = {} # Classes used to implement DB routing behavior. DATABASE_ROUTERS = [] # The email backend to use. For possible shortcuts see django.core.mail. # The default is to use the SMTP backend. # Third-party backends can be specified by providing a Python path # to a module that defines an EmailBackend class. EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # Host for sending email. EMAIL_HOST = 'localhost' # Port for sending email. EMAIL_PORT = 25 # Whether to send SMTP 'Date' header in the local time zone or in UTC. EMAIL_USE_LOCALTIME = False # Optional SMTP authentication information for EMAIL_HOST. EMAIL_HOST_USER = '' EMAIL_HOST_PASSWORD = '' EMAIL_USE_TLS = False EMAIL_USE_SSL = False EMAIL_SSL_CERTFILE = None EMAIL_SSL_KEYFILE = None EMAIL_TIMEOUT = None # List of strings representing installed apps. INSTALLED_APPS = [] TEMPLATES = [] # Default form rendering class. FORM_RENDERER = 'django.forms.renderers.DjangoTemplates' # Default email address to use for various automated correspondence from # the site managers. DEFAULT_FROM_EMAIL = 'webmaster@localhost' # Subject-line prefix for email messages send with django.core.mail.mail_admins # or ...mail_managers. Make sure to include the trailing space. EMAIL_SUBJECT_PREFIX = '[Django] ' # Whether to append trailing slashes to URLs. APPEND_SLASH = True # Whether to prepend the "www." subdomain to URLs that don't have it. PREPEND_WWW = False # Override the server-derived value of SCRIPT_NAME FORCE_SCRIPT_NAME = None # List of compiled regular expression objects representing User-Agent strings # that are not allowed to visit any page, systemwide. Use this for bad # robots/crawlers. Here are a few examples: # import re # DISALLOWED_USER_AGENTS = [ # re.compile(r'^NaverBot.*'), # re.compile(r'^EmailSiphon.*'), # re.compile(r'^SiteSucker.*'), # re.compile(r'^sohu-search'), # ] DISALLOWED_USER_AGENTS = [] ABSOLUTE_URL_OVERRIDES = {} # List of compiled regular expression objects representing URLs that need not # be reported by BrokenLinkEmailsMiddleware. Here are a few examples: # import re # IGNORABLE_404_URLS = [ # re.compile(r'^/apple-touch-icon.*\.png$'), # re.compile(r'^/favicon.ico$'), # re.compile(r'^/robots.txt$'), # re.compile(r'^/phpmyadmin/'), # re.compile(r'\.(cgi|php|pl)$'), # ] IGNORABLE_404_URLS = [] # A secret key for this particular Django installation. Used in secret-key # hashing algorithms. Set this in your settings, or Django will complain # loudly. SECRET_KEY = '' # Default file storage mechanism that holds media. DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage' # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. # Examples: "http://example.com/media/", "http://media.example.com/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Example: "/var/www/example.com/static/" STATIC_ROOT = None # URL that handles the static files served from STATIC_ROOT. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = None # List of upload handler classes to be applied in order. FILE_UPLOAD_HANDLERS = [ 'django.core.files.uploadhandler.MemoryFileUploadHandler', 'django.core.files.uploadhandler.TemporaryFileUploadHandler', ] # Maximum size, in bytes, of a request before it will be streamed to the # file system instead of into memory. FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB # Maximum size in bytes of request data (excluding file uploads) that will be # read before a SuspiciousOperation (RequestDataTooBig) is raised. DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB # Maximum number of GET/POST parameters that will be read before a # SuspiciousOperation (TooManyFieldsSent) is raised. DATA_UPLOAD_MAX_NUMBER_FIELDS = 1000 # Directory in which upload streamed files will be temporarily saved. A value of # `None` will make Django use the operating system's default temporary directory # (i.e. "/tmp" on *nix systems). FILE_UPLOAD_TEMP_DIR = None # The numeric mode to set newly-uploaded files to. The value should be a mode # you'd pass directly to os.chmod; see https://docs.python.org/library/os.html#files-and-directories. FILE_UPLOAD_PERMISSIONS = 0o644 # The numeric mode to assign to newly-created directories, when uploading files. # The value should be a mode as you'd pass to os.chmod; # see https://docs.python.org/library/os.html#files-and-directories. FILE_UPLOAD_DIRECTORY_PERMISSIONS = None # Python module path where user will place custom format definition. # The directory where this setting is pointing should contain subdirectories # named as the locales, containing a formats.py file # (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use) FORMAT_MODULE_PATH = None # Default formatting for date objects. See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'N j, Y' # Default formatting for datetime objects. See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATETIME_FORMAT = 'N j, Y, P' # Default formatting for time objects. See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date TIME_FORMAT = 'P' # Default formatting for date objects when only the year and month are relevant. # See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date YEAR_MONTH_FORMAT = 'F Y' # Default formatting for date objects when only the month and day are relevant. # See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date MONTH_DAY_FORMAT = 'F j' # Default short formatting for date objects. See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date SHORT_DATE_FORMAT = 'm/d/Y' # Default short formatting for datetime objects. # See all available format strings here: # https://docs.djangoproject.com/en/dev/ref/templates/builtins/#date SHORT_DATETIME_FORMAT = 'm/d/Y P' # Default formats to be used when parsing dates from input boxes, in order # See all available format string here: # https://docs.python.org/library/datetime.html#strftime-behavior # * Note that these format strings are different from the ones to display dates DATE_INPUT_FORMATS = [ '%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06' '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006' '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006' '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006' '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006' ] # Default formats to be used when parsing times from input boxes, in order # See all available format string here: # https://docs.python.org/library/datetime.html#strftime-behavior # * Note that these format strings are different from the ones to display dates TIME_INPUT_FORMATS = [ '%H:%M:%S', # '14:30:59' '%H:%M:%S.%f', # '14:30:59.000200' '%H:%M', # '14:30' ] # Default formats to be used when parsing dates and times from input boxes, # in order # See all available format string here: # https://docs.python.org/library/datetime.html#strftime-behavior # * Note that these format strings are different from the ones to display dates DATETIME_INPUT_FORMATS = [ '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59' '%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200' '%m/%d/%Y %H:%M', # '10/25/2006 14:30' '%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59' '%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200' '%m/%d/%y %H:%M', # '10/25/06 14:30' ] # First day of week, to be used on calendars # 0 means Sunday, 1 means Monday... FIRST_DAY_OF_WEEK = 0 # Decimal separator symbol DECIMAL_SEPARATOR = '.' # Boolean that sets whether to add thousand separator when formatting numbers USE_THOUSAND_SEPARATOR = False # Number of digits that will be together, when splitting them by # THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands... NUMBER_GROUPING = 0 # Thousand separator symbol THOUSAND_SEPARATOR = ',' # The tablespaces to use for each model when not specified otherwise. DEFAULT_TABLESPACE = '' DEFAULT_INDEX_TABLESPACE = '' # Default primary key field type. DEFAULT_AUTO_FIELD = 'django.db.models.AutoField' # Default X-Frame-Options header value X_FRAME_OPTIONS = 'DENY' USE_X_FORWARDED_HOST = False USE_X_FORWARDED_PORT = False # The Python dotted path to the WSGI application that Django's internal server # (runserver) will use. If `None`, the return value of # 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same # behavior as previous versions of Django. Otherwise this should point to an # actual WSGI application object. WSGI_APPLICATION = None # If your Django app is behind a proxy that sets a header to specify secure # connections, AND that proxy ensures that user-submitted headers with the # same name are ignored (so that people can't spoof it), set this value to # a tuple of (header_name, header_value). For any requests that come in with # that header/value, request.is_secure() will return True. # WARNING! Only set this if you fully understand what you're doing. Otherwise, # you may be opening yourself up to a security risk. SECURE_PROXY_SSL_HEADER = None ############## # MIDDLEWARE # ############## # List of middleware to use. Order is important; in the request phase, these # middleware will be applied in the order given, and in the response # phase the middleware will be applied in reverse order. MIDDLEWARE = [] ############ # SESSIONS # ############ # Cache to store session data if using the cache session backend. SESSION_CACHE_ALIAS = 'default' # Cookie name. This can be whatever you want. SESSION_COOKIE_NAME = 'sessionid' # Age of cookie, in seconds (default: 2 weeks). SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # A string like "example.com", or None for standard domain cookie. SESSION_COOKIE_DOMAIN = None # Whether the session cookie should be secure (https:// only). SESSION_COOKIE_SECURE = False # The path of the session cookie. SESSION_COOKIE_PATH = '/' # Whether to use the HttpOnly flag. SESSION_COOKIE_HTTPONLY = True # Whether to set the flag restricting cookie leaks on cross-site requests. # This can be 'Lax', 'Strict', 'None', or False to disable the flag. SESSION_COOKIE_SAMESITE = 'Lax' # Whether to save the session data on every request. SESSION_SAVE_EVERY_REQUEST = False # Whether a user's session cookie expires when the web browser is closed. SESSION_EXPIRE_AT_BROWSER_CLOSE = False # The module to store session data SESSION_ENGINE = 'django.contrib.sessions.backends.db' # Directory to store session files if using the file session module. If None, # the backend will use a sensible default. SESSION_FILE_PATH = None # class to serialize session data SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer' ######### # CACHE # ######### # The cache backends to use. CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', } } CACHE_MIDDLEWARE_KEY_PREFIX = '' CACHE_MIDDLEWARE_SECONDS = 600 CACHE_MIDDLEWARE_ALIAS = 'default' ################## # AUTHENTICATION # ################## AUTH_USER_MODEL = 'auth.User' AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend'] LOGIN_URL = '/accounts/login/' LOGIN_REDIRECT_URL = '/accounts/profile/' LOGOUT_REDIRECT_URL = None # The number of seconds a password reset link is valid for (default: 3 days). PASSWORD_RESET_TIMEOUT = 60 * 60 * 24 * 3 # the first hasher in this list is the preferred algorithm. any # password using different algorithms will be converted automatically # upon login PASSWORD_HASHERS = [ 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher', 'django.contrib.auth.hashers.Argon2PasswordHasher', 'django.contrib.auth.hashers.BCryptSHA256PasswordHasher', 'django.contrib.auth.hashers.ScryptPasswordHasher', ] AUTH_PASSWORD_VALIDATORS = [] ########### # SIGNING # ########### SIGNING_BACKEND = 'django.core.signing.TimestampSigner' ######## # CSRF # ######## # Dotted path to callable to be used as view when a request is # rejected by the CSRF middleware. CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure' # Settings for CSRF cookie. CSRF_COOKIE_NAME = 'csrftoken' CSRF_COOKIE_AGE = 60 * 60 * 24 * 7 * 52 CSRF_COOKIE_DOMAIN = None CSRF_COOKIE_PATH = '/' CSRF_COOKIE_SECURE = False CSRF_COOKIE_HTTPONLY = False CSRF_COOKIE_SAMESITE = 'Lax' CSRF_HEADER_NAME = 'HTTP_X_CSRFTOKEN' CSRF_TRUSTED_ORIGINS = [] CSRF_USE_SESSIONS = False ############ # MESSAGES # ############ # Class to use as messages backend MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage' # Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within # django.contrib.messages to avoid imports in this settings file. ########### # LOGGING # ########### # The callable to use to configure logging LOGGING_CONFIG = 'logging.config.dictConfig' # Custom logging configuration. LOGGING = {} # Default exception reporter class used in case none has been # specifically assigned to the HttpRequest instance. DEFAULT_EXCEPTION_REPORTER = 'django.views.debug.ExceptionReporter' # Default exception reporter filter class used in case none has been # specifically assigned to the HttpRequest instance. DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter' ########### # TESTING # ########### # The name of the class to use to run the test suite TEST_RUNNER = 'django.test.runner.DiscoverRunner' # Apps that don't need to be serialized at test database creation time # (only apps with migrations are to start with) TEST_NON_SERIALIZED_APPS = [] ############ # FIXTURES # ############ # The list of directories to search for fixtures FIXTURE_DIRS = [] ############### # STATICFILES # ############### # A list of locations of additional static files STATICFILES_DIRS = [] # The default file storage backend used during the build process STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage' # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = [ 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ] ############## # MIGRATIONS # ############## # Migration module overrides for apps, by app label. MIGRATION_MODULES = {} ################# # SYSTEM CHECKS # ################# # List of all issues generated by system checks that should be silenced. Light # issues like warnings, infos or debugs will not generate a message. Silencing # serious issues like errors and criticals does not result in hiding the # message, but Django will not stop you from e.g. running server. SILENCED_SYSTEM_CHECKS = [] ####################### # SECURITY MIDDLEWARE # ####################### SECURE_CONTENT_TYPE_NOSNIFF = True SECURE_CROSS_ORIGIN_OPENER_POLICY = 'same-origin' SECURE_HSTS_INCLUDE_SUBDOMAINS = False SECURE_HSTS_PRELOAD = False SECURE_HSTS_SECONDS = 0 SECURE_REDIRECT_EXEMPT = [] SECURE_REFERRER_POLICY = 'same-origin' SECURE_SSL_HOST = None SECURE_SSL_REDIRECT = False
235facedb87f4d9cc9074a717fd52faa1d404d3055ba05557993e6cb4e6c6954
# These classes override date and datetime to ensure that strftime('%Y') # returns four digits (with leading zeros) on years < 1000. # https://bugs.python.org/issue13305 # # Based on code submitted to comp.lang.python by Andrew Dalke # # >>> datetime_safe.date(10, 8, 2).strftime("%Y/%m/%d was a %A") # '0010/08/02 was a Monday' import time import warnings from datetime import date as real_date, datetime as real_datetime from django.utils.deprecation import RemovedInDjango50Warning from django.utils.regex_helper import _lazy_re_compile warnings.warn( 'The django.utils.datetime_safe module is deprecated.', category=RemovedInDjango50Warning, stacklevel=2, ) class date(real_date): def strftime(self, fmt): return strftime(self, fmt) class datetime(real_datetime): def strftime(self, fmt): return strftime(self, fmt) @classmethod def combine(cls, date, time): return cls(date.year, date.month, date.day, time.hour, time.minute, time.second, time.microsecond, time.tzinfo) def date(self): return date(self.year, self.month, self.day) def new_date(d): "Generate a safe date from a datetime.date object." return date(d.year, d.month, d.day) def new_datetime(d): """ Generate a safe datetime from a datetime.date or datetime.datetime object. """ kw = [d.year, d.month, d.day] if isinstance(d, real_datetime): kw.extend([d.hour, d.minute, d.second, d.microsecond, d.tzinfo]) return datetime(*kw) # This library does not support strftime's "%s" or "%y" format strings. # Allowed if there's an even number of "%"s because they are escaped. _illegal_formatting = _lazy_re_compile(r"((^|[^%])(%%)*%[sy])") def _findall(text, substr): # Also finds overlaps sites = [] i = 0 while True: i = text.find(substr, i) if i == -1: break sites.append(i) i += 1 return sites def strftime(dt, fmt): if dt.year >= 1000: return super(type(dt), dt).strftime(fmt) illegal_formatting = _illegal_formatting.search(fmt) if illegal_formatting: raise TypeError('strftime of dates before 1000 does not handle ' + illegal_formatting[0]) year = dt.year # For every non-leap year century, advance by # 6 years to get into the 28-year repeat cycle delta = 2000 - year off = 6 * (delta // 100 + delta // 400) year = year + off # Move to around the year 2000 year = year + ((2000 - year) // 28) * 28 timetuple = dt.timetuple() s1 = time.strftime(fmt, (year,) + timetuple[1:]) sites1 = _findall(s1, str(year)) s2 = time.strftime(fmt, (year + 28,) + timetuple[1:]) sites2 = _findall(s2, str(year + 28)) sites = [] for site in sites1: if site in sites2: sites.append(site) s = s1 syear = "%04d" % dt.year for site in sites: s = s[:site] + syear + s[site + 4:] return s
1b0e832ea4a5606216d2d2dc69391214bc0a92e5ff68ce88d71afa6c617123c9
import functools import itertools import logging import os import signal import subprocess import sys import threading import time import traceback import weakref from collections import defaultdict from pathlib import Path from types import ModuleType from zipimport import zipimporter import django from django.apps import apps from django.core.signals import request_finished from django.dispatch import Signal from django.utils.functional import cached_property from django.utils.version import get_version_tuple autoreload_started = Signal() file_changed = Signal() DJANGO_AUTORELOAD_ENV = 'RUN_MAIN' logger = logging.getLogger('django.utils.autoreload') # If an error is raised while importing a file, it's not placed in sys.modules. # This means that any future modifications aren't caught. Keep a list of these # file paths to allow watching them in the future. _error_files = [] _exception = None try: import termios except ImportError: termios = None try: import pywatchman except ImportError: pywatchman = None def is_django_module(module): """Return True if the given module is nested under Django.""" return module.__name__.startswith('django.') def is_django_path(path): """Return True if the given file path is nested under Django.""" return Path(django.__file__).parent in Path(path).parents def check_errors(fn): @functools.wraps(fn) def wrapper(*args, **kwargs): global _exception try: fn(*args, **kwargs) except Exception: _exception = sys.exc_info() et, ev, tb = _exception if getattr(ev, 'filename', None) is None: # get the filename from the last item in the stack filename = traceback.extract_tb(tb)[-1][0] else: filename = ev.filename if filename not in _error_files: _error_files.append(filename) raise return wrapper def raise_last_exception(): global _exception if _exception is not None: raise _exception[1] def ensure_echo_on(): """ Ensure that echo mode is enabled. Some tools such as PDB disable it which causes usability issues after reload. """ if not termios or not sys.stdin.isatty(): return attr_list = termios.tcgetattr(sys.stdin) if not attr_list[3] & termios.ECHO: attr_list[3] |= termios.ECHO if hasattr(signal, 'SIGTTOU'): old_handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN) else: old_handler = None termios.tcsetattr(sys.stdin, termios.TCSANOW, attr_list) if old_handler is not None: signal.signal(signal.SIGTTOU, old_handler) def iter_all_python_module_files(): # This is a hot path during reloading. Create a stable sorted list of # modules based on the module name and pass it to iter_modules_and_files(). # This ensures cached results are returned in the usual case that modules # aren't loaded on the fly. keys = sorted(sys.modules) modules = tuple(m for m in map(sys.modules.__getitem__, keys) if not isinstance(m, weakref.ProxyTypes)) return iter_modules_and_files(modules, frozenset(_error_files)) @functools.lru_cache(maxsize=1) def iter_modules_and_files(modules, extra_files): """Iterate through all modules needed to be watched.""" sys_file_paths = [] for module in modules: # During debugging (with PyDev) the 'typing.io' and 'typing.re' objects # are added to sys.modules, however they are types not modules and so # cause issues here. if not isinstance(module, ModuleType): continue if module.__name__ == '__main__': # __main__ (usually manage.py) doesn't always have a __spec__ set. # Handle this by falling back to using __file__, resolved below. # See https://docs.python.org/reference/import.html#main-spec # __file__ may not exists, e.g. when running ipdb debugger. if hasattr(module, '__file__'): sys_file_paths.append(module.__file__) continue if getattr(module, '__spec__', None) is None: continue spec = module.__spec__ # Modules could be loaded from places without a concrete location. If # this is the case, skip them. if spec.has_location: origin = spec.loader.archive if isinstance(spec.loader, zipimporter) else spec.origin sys_file_paths.append(origin) results = set() for filename in itertools.chain(sys_file_paths, extra_files): if not filename: continue path = Path(filename) try: if not path.exists(): # The module could have been removed, don't fail loudly if this # is the case. continue except ValueError as e: # Network filesystems may return null bytes in file paths. logger.debug('"%s" raised when resolving path: "%s"', e, path) continue resolved_path = path.resolve().absolute() results.add(resolved_path) return frozenset(results) @functools.lru_cache(maxsize=1) def common_roots(paths): """ Return a tuple of common roots that are shared between the given paths. File system watchers operate on directories and aren't cheap to create. Try to find the minimum set of directories to watch that encompass all of the files that need to be watched. """ # Inspired from Werkzeug: # https://github.com/pallets/werkzeug/blob/7477be2853df70a022d9613e765581b9411c3c39/werkzeug/_reloader.py # Create a sorted list of the path components, longest first. path_parts = sorted([x.parts for x in paths], key=len, reverse=True) tree = {} for chunks in path_parts: node = tree # Add each part of the path to the tree. for chunk in chunks: node = node.setdefault(chunk, {}) # Clear the last leaf in the tree. node.clear() # Turn the tree into a list of Path instances. def _walk(node, path): for prefix, child in node.items(): yield from _walk(child, path + (prefix,)) if not node: yield Path(*path) return tuple(_walk(tree, ())) def sys_path_directories(): """ Yield absolute directories from sys.path, ignoring entries that don't exist. """ for path in sys.path: path = Path(path) if not path.exists(): continue resolved_path = path.resolve().absolute() # If the path is a file (like a zip file), watch the parent directory. if resolved_path.is_file(): yield resolved_path.parent else: yield resolved_path def get_child_arguments(): """ Return the executable. This contains a workaround for Windows if the executable is reported to not have the .exe extension which can cause bugs on reloading. """ import __main__ py_script = Path(sys.argv[0]) args = [sys.executable] + ['-W%s' % o for o in sys.warnoptions] # __spec__ is set when the server was started with the `-m` option, # see https://docs.python.org/3/reference/import.html#main-spec # __spec__ may not exist, e.g. when running in a Conda env. if getattr(__main__, '__spec__', None) is not None: spec = __main__.__spec__ if (spec.name == '__main__' or spec.name.endswith('.__main__')) and spec.parent: name = spec.parent else: name = spec.name args += ['-m', name] args += sys.argv[1:] elif not py_script.exists(): # sys.argv[0] may not exist for several reasons on Windows. # It may exist with a .exe extension or have a -script.py suffix. exe_entrypoint = py_script.with_suffix('.exe') if exe_entrypoint.exists(): # Should be executed directly, ignoring sys.executable. return [exe_entrypoint, *sys.argv[1:]] script_entrypoint = py_script.with_name('%s-script.py' % py_script.name) if script_entrypoint.exists(): # Should be executed as usual. return [*args, script_entrypoint, *sys.argv[1:]] raise RuntimeError('Script %s does not exist.' % py_script) else: args += sys.argv return args def trigger_reload(filename): logger.info('%s changed, reloading.', filename) sys.exit(3) def restart_with_reloader(): new_environ = {**os.environ, DJANGO_AUTORELOAD_ENV: 'true'} args = get_child_arguments() while True: p = subprocess.run(args, env=new_environ, close_fds=False) if p.returncode != 3: return p.returncode class BaseReloader: def __init__(self): self.extra_files = set() self.directory_globs = defaultdict(set) self._stop_condition = threading.Event() def watch_dir(self, path, glob): path = Path(path) try: path = path.absolute() except FileNotFoundError: logger.debug( 'Unable to watch directory %s as it cannot be resolved.', path, exc_info=True, ) return logger.debug('Watching dir %s with glob %s.', path, glob) self.directory_globs[path].add(glob) def watched_files(self, include_globs=True): """ Yield all files that need to be watched, including module files and files within globs. """ yield from iter_all_python_module_files() yield from self.extra_files if include_globs: for directory, patterns in self.directory_globs.items(): for pattern in patterns: yield from directory.glob(pattern) def wait_for_apps_ready(self, app_reg, django_main_thread): """ Wait until Django reports that the apps have been loaded. If the given thread has terminated before the apps are ready, then a SyntaxError or other non-recoverable error has been raised. In that case, stop waiting for the apps_ready event and continue processing. Return True if the thread is alive and the ready event has been triggered, or False if the thread is terminated while waiting for the event. """ while django_main_thread.is_alive(): if app_reg.ready_event.wait(timeout=0.1): return True else: logger.debug('Main Django thread has terminated before apps are ready.') return False def run(self, django_main_thread): logger.debug('Waiting for apps ready_event.') self.wait_for_apps_ready(apps, django_main_thread) from django.urls import get_resolver # Prevent a race condition where URL modules aren't loaded when the # reloader starts by accessing the urlconf_module property. try: get_resolver().urlconf_module except Exception: # Loading the urlconf can result in errors during development. # If this occurs then swallow the error and continue. pass logger.debug('Apps ready_event triggered. Sending autoreload_started signal.') autoreload_started.send(sender=self) self.run_loop() def run_loop(self): ticker = self.tick() while not self.should_stop: try: next(ticker) except StopIteration: break self.stop() def tick(self): """ This generator is called in a loop from run_loop. It's important that the method takes care of pausing or otherwise waiting for a period of time. This split between run_loop() and tick() is to improve the testability of the reloader implementations by decoupling the work they do from the loop. """ raise NotImplementedError('subclasses must implement tick().') @classmethod def check_availability(cls): raise NotImplementedError('subclasses must implement check_availability().') def notify_file_changed(self, path): results = file_changed.send(sender=self, file_path=path) logger.debug('%s notified as changed. Signal results: %s.', path, results) if not any(res[1] for res in results): trigger_reload(path) # These are primarily used for testing. @property def should_stop(self): return self._stop_condition.is_set() def stop(self): self._stop_condition.set() class StatReloader(BaseReloader): SLEEP_TIME = 1 # Check for changes once per second. def tick(self): mtimes = {} while True: for filepath, mtime in self.snapshot_files(): old_time = mtimes.get(filepath) mtimes[filepath] = mtime if old_time is None: logger.debug('File %s first seen with mtime %s', filepath, mtime) continue elif mtime > old_time: logger.debug('File %s previous mtime: %s, current mtime: %s', filepath, old_time, mtime) self.notify_file_changed(filepath) time.sleep(self.SLEEP_TIME) yield def snapshot_files(self): # watched_files may produce duplicate paths if globs overlap. seen_files = set() for file in self.watched_files(): if file in seen_files: continue try: mtime = file.stat().st_mtime except OSError: # This is thrown when the file does not exist. continue seen_files.add(file) yield file, mtime @classmethod def check_availability(cls): return True class WatchmanUnavailable(RuntimeError): pass class WatchmanReloader(BaseReloader): def __init__(self): self.roots = defaultdict(set) self.processed_request = threading.Event() self.client_timeout = int(os.environ.get('DJANGO_WATCHMAN_TIMEOUT', 5)) super().__init__() @cached_property def client(self): return pywatchman.client(timeout=self.client_timeout) def _watch_root(self, root): # In practice this shouldn't occur, however, it's possible that a # directory that doesn't exist yet is being watched. If it's outside of # sys.path then this will end up a new root. How to handle this isn't # clear: Not adding the root will likely break when subscribing to the # changes, however, as this is currently an internal API, no files # will be being watched outside of sys.path. Fixing this by checking # inside watch_glob() and watch_dir() is expensive, instead this could # could fall back to the StatReloader if this case is detected? For # now, watching its parent, if possible, is sufficient. if not root.exists(): if not root.parent.exists(): logger.warning('Unable to watch root dir %s as neither it or its parent exist.', root) return root = root.parent result = self.client.query('watch-project', str(root.absolute())) if 'warning' in result: logger.warning('Watchman warning: %s', result['warning']) logger.debug('Watchman watch-project result: %s', result) return result['watch'], result.get('relative_path') @functools.lru_cache() def _get_clock(self, root): return self.client.query('clock', root)['clock'] def _subscribe(self, directory, name, expression): root, rel_path = self._watch_root(directory) # Only receive notifications of files changing, filtering out other types # like special files: https://facebook.github.io/watchman/docs/type only_files_expression = [ 'allof', ['anyof', ['type', 'f'], ['type', 'l']], expression ] query = { 'expression': only_files_expression, 'fields': ['name'], 'since': self._get_clock(root), 'dedup_results': True, } if rel_path: query['relative_root'] = rel_path logger.debug('Issuing watchman subscription %s, for root %s. Query: %s', name, root, query) self.client.query('subscribe', root, name, query) def _subscribe_dir(self, directory, filenames): if not directory.exists(): if not directory.parent.exists(): logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory) return prefix = 'files-parent-%s' % directory.name filenames = ['%s/%s' % (directory.name, filename) for filename in filenames] directory = directory.parent expression = ['name', filenames, 'wholename'] else: prefix = 'files' expression = ['name', filenames] self._subscribe(directory, '%s:%s' % (prefix, directory), expression) def _watch_glob(self, directory, patterns): """ Watch a directory with a specific glob. If the directory doesn't yet exist, attempt to watch the parent directory and amend the patterns to include this. It's important this method isn't called more than one per directory when updating all subscriptions. Subsequent calls will overwrite the named subscription, so it must include all possible glob expressions. """ prefix = 'glob' if not directory.exists(): if not directory.parent.exists(): logger.warning('Unable to watch directory %s as neither it or its parent exist.', directory) return prefix = 'glob-parent-%s' % directory.name patterns = ['%s/%s' % (directory.name, pattern) for pattern in patterns] directory = directory.parent expression = ['anyof'] for pattern in patterns: expression.append(['match', pattern, 'wholename']) self._subscribe(directory, '%s:%s' % (prefix, directory), expression) def watched_roots(self, watched_files): extra_directories = self.directory_globs.keys() watched_file_dirs = [f.parent for f in watched_files] sys_paths = list(sys_path_directories()) return frozenset((*extra_directories, *watched_file_dirs, *sys_paths)) def _update_watches(self): watched_files = list(self.watched_files(include_globs=False)) found_roots = common_roots(self.watched_roots(watched_files)) logger.debug('Watching %s files', len(watched_files)) logger.debug('Found common roots: %s', found_roots) # Setup initial roots for performance, shortest roots first. for root in sorted(found_roots): self._watch_root(root) for directory, patterns in self.directory_globs.items(): self._watch_glob(directory, patterns) # Group sorted watched_files by their parent directory. sorted_files = sorted(watched_files, key=lambda p: p.parent) for directory, group in itertools.groupby(sorted_files, key=lambda p: p.parent): # These paths need to be relative to the parent directory. self._subscribe_dir(directory, [str(p.relative_to(directory)) for p in group]) def update_watches(self): try: self._update_watches() except Exception as ex: # If the service is still available, raise the original exception. if self.check_server_status(ex): raise def _check_subscription(self, sub): subscription = self.client.getSubscription(sub) if not subscription: return logger.debug('Watchman subscription %s has results.', sub) for result in subscription: # When using watch-project, it's not simple to get the relative # directory without storing some specific state. Store the full # path to the directory in the subscription name, prefixed by its # type (glob, files). root_directory = Path(result['subscription'].split(':', 1)[1]) logger.debug('Found root directory %s', root_directory) for file in result.get('files', []): self.notify_file_changed(root_directory / file) def request_processed(self, **kwargs): logger.debug('Request processed. Setting update_watches event.') self.processed_request.set() def tick(self): request_finished.connect(self.request_processed) self.update_watches() while True: if self.processed_request.is_set(): self.update_watches() self.processed_request.clear() try: self.client.receive() except pywatchman.SocketTimeout: pass except pywatchman.WatchmanError as ex: logger.debug('Watchman error: %s, checking server status.', ex) self.check_server_status(ex) else: for sub in list(self.client.subs.keys()): self._check_subscription(sub) yield # Protect against busy loops. time.sleep(0.1) def stop(self): self.client.close() super().stop() def check_server_status(self, inner_ex=None): """Return True if the server is available.""" try: self.client.query('version') except Exception: raise WatchmanUnavailable(str(inner_ex)) from inner_ex return True @classmethod def check_availability(cls): if not pywatchman: raise WatchmanUnavailable('pywatchman not installed.') client = pywatchman.client(timeout=0.1) try: result = client.capabilityCheck() except Exception: # The service is down? raise WatchmanUnavailable('Cannot connect to the watchman service.') version = get_version_tuple(result['version']) # Watchman 4.9 includes multiple improvements to watching project # directories as well as case insensitive filesystems. logger.debug('Watchman version %s', version) if version < (4, 9): raise WatchmanUnavailable('Watchman 4.9 or later is required.') def get_reloader(): """Return the most suitable reloader for this environment.""" try: WatchmanReloader.check_availability() except WatchmanUnavailable: return StatReloader() return WatchmanReloader() def start_django(reloader, main_func, *args, **kwargs): ensure_echo_on() main_func = check_errors(main_func) django_main_thread = threading.Thread(target=main_func, args=args, kwargs=kwargs, name='django-main-thread') django_main_thread.daemon = True django_main_thread.start() while not reloader.should_stop: try: reloader.run(django_main_thread) except WatchmanUnavailable as ex: # It's possible that the watchman service shuts down or otherwise # becomes unavailable. In that case, use the StatReloader. reloader = StatReloader() logger.error('Error connecting to Watchman: %s', ex) logger.info('Watching for file changes with %s', reloader.__class__.__name__) def run_with_reloader(main_func, *args, **kwargs): signal.signal(signal.SIGTERM, lambda *args: sys.exit(0)) try: if os.environ.get(DJANGO_AUTORELOAD_ENV) == 'true': reloader = get_reloader() logger.info('Watching for file changes with %s', reloader.__class__.__name__) start_django(reloader, main_func, *args, **kwargs) else: exit_code = restart_with_reloader() sys.exit(exit_code) except KeyboardInterrupt: pass
07a792d9d8882f760f523a74056df237a5307015e6c6115eb516b86dc2834c82
import html.entities import re import unicodedata from gzip import GzipFile, compress as gzip_compress from io import BytesIO from django.core.exceptions import SuspiciousFileOperation from django.utils.functional import SimpleLazyObject, keep_lazy_text, lazy from django.utils.regex_helper import _lazy_re_compile from django.utils.translation import gettext as _, gettext_lazy, pgettext @keep_lazy_text def capfirst(x): """Capitalize the first letter of a string.""" if not x: return x if not isinstance(x, str): x = str(x) return x[0].upper() + x[1:] # Set up regular expressions re_words = _lazy_re_compile(r'<[^>]+?>|([^<>\s]+)', re.S) re_chars = _lazy_re_compile(r'<[^>]+?>|(.)', re.S) re_tag = _lazy_re_compile(r'<(/)?(\S+?)(?:(\s*/)|\s.*?)?>', re.S) re_newlines = _lazy_re_compile(r'\r\n|\r') # Used in normalize_newlines re_camel_case = _lazy_re_compile(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))') @keep_lazy_text def wrap(text, width): """ A word-wrap function that preserves existing line breaks. Expects that existing line breaks are posix newlines. Preserve all white space except added line breaks consume the space on which they break the line. Don't wrap long words, thus the output text may have lines longer than ``width``. """ def _generator(): for line in text.splitlines(True): # True keeps trailing linebreaks max_width = min((line.endswith('\n') and width + 1 or width), width) while len(line) > max_width: space = line[:max_width + 1].rfind(' ') + 1 if space == 0: space = line.find(' ') + 1 if space == 0: yield line line = '' break yield '%s\n' % line[:space - 1] line = line[space:] max_width = min((line.endswith('\n') and width + 1 or width), width) if line: yield line return ''.join(_generator()) class Truncator(SimpleLazyObject): """ An object used to truncate text, either by characters or words. """ def __init__(self, text): super().__init__(lambda: str(text)) def add_truncation_text(self, text, truncate=None): if truncate is None: truncate = pgettext( 'String to return when truncating text', '%(truncated_text)s…') if '%(truncated_text)s' in truncate: return truncate % {'truncated_text': text} # The truncation text didn't contain the %(truncated_text)s string # replacement argument so just append it to the text. if text.endswith(truncate): # But don't append the truncation text if the current text already # ends in this. return text return '%s%s' % (text, truncate) def chars(self, num, truncate=None, html=False): """ Return the text truncated to be no longer than the specified number of characters. `truncate` specifies what should be used to notify that the string has been truncated, defaulting to a translatable string of an ellipsis. """ self._setup() length = int(num) text = unicodedata.normalize('NFC', self._wrapped) # Calculate the length to truncate to (max length - end_text length) truncate_len = length for char in self.add_truncation_text('', truncate): if not unicodedata.combining(char): truncate_len -= 1 if truncate_len == 0: break if html: return self._truncate_html(length, truncate, text, truncate_len, False) return self._text_chars(length, truncate, text, truncate_len) def _text_chars(self, length, truncate, text, truncate_len): """Truncate a string after a certain number of chars.""" s_len = 0 end_index = None for i, char in enumerate(text): if unicodedata.combining(char): # Don't consider combining characters # as adding to the string length continue s_len += 1 if end_index is None and s_len > truncate_len: end_index = i if s_len > length: # Return the truncated string return self.add_truncation_text(text[:end_index or 0], truncate) # Return the original string since no truncation was necessary return text def words(self, num, truncate=None, html=False): """ Truncate a string after a certain number of words. `truncate` specifies what should be used to notify that the string has been truncated, defaulting to ellipsis. """ self._setup() length = int(num) if html: return self._truncate_html(length, truncate, self._wrapped, length, True) return self._text_words(length, truncate) def _text_words(self, length, truncate): """ Truncate a string after a certain number of words. Strip newlines in the string. """ words = self._wrapped.split() if len(words) > length: words = words[:length] return self.add_truncation_text(' '.join(words), truncate) return ' '.join(words) def _truncate_html(self, length, truncate, text, truncate_len, words): """ Truncate HTML to a certain number of chars (not counting tags and comments), or, if words is True, then to a certain number of words. Close opened tags if they were correctly closed in the given HTML. Preserve newlines in the HTML. """ if words and length <= 0: return '' html4_singlets = ( 'br', 'col', 'link', 'base', 'img', 'param', 'area', 'hr', 'input' ) # Count non-HTML chars/words and keep note of open tags pos = 0 end_text_pos = 0 current_len = 0 open_tags = [] regex = re_words if words else re_chars while current_len <= length: m = regex.search(text, pos) if not m: # Checked through whole string break pos = m.end(0) if m[1]: # It's an actual non-HTML word or char current_len += 1 if current_len == truncate_len: end_text_pos = pos continue # Check for tag tag = re_tag.match(m[0]) if not tag or current_len >= truncate_len: # Don't worry about non tags or tags after our truncate point continue closing_tag, tagname, self_closing = tag.groups() # Element names are always case-insensitive tagname = tagname.lower() if self_closing or tagname in html4_singlets: pass elif closing_tag: # Check for match in open tags list try: i = open_tags.index(tagname) except ValueError: pass else: # SGML: An end tag closes, back to the matching start tag, # all unclosed intervening start tags with omitted end tags open_tags = open_tags[i + 1:] else: # Add it to the start of the open tags list open_tags.insert(0, tagname) if current_len <= length: return text out = text[:end_text_pos] truncate_text = self.add_truncation_text('', truncate) if truncate_text: out += truncate_text # Close any tags still open for tag in open_tags: out += '</%s>' % tag # Return string return out @keep_lazy_text def get_valid_filename(name): """ Return the given string converted to a string that can be used for a clean filename. Remove leading and trailing spaces; convert other spaces to underscores; and remove anything that is not an alphanumeric, dash, underscore, or dot. >>> get_valid_filename("john's portrait in 2004.jpg") 'johns_portrait_in_2004.jpg' """ s = str(name).strip().replace(' ', '_') s = re.sub(r'(?u)[^-\w.]', '', s) if s in {'', '.', '..'}: raise SuspiciousFileOperation("Could not derive file name from '%s'" % name) return s @keep_lazy_text def get_text_list(list_, last_word=gettext_lazy('or')): """ >>> get_text_list(['a', 'b', 'c', 'd']) 'a, b, c or d' >>> get_text_list(['a', 'b', 'c'], 'and') 'a, b and c' >>> get_text_list(['a', 'b'], 'and') 'a and b' >>> get_text_list(['a']) 'a' >>> get_text_list([]) '' """ if not list_: return '' if len(list_) == 1: return str(list_[0]) return '%s %s %s' % ( # Translators: This string is used as a separator between list elements _(', ').join(str(i) for i in list_[:-1]), str(last_word), str(list_[-1]) ) @keep_lazy_text def normalize_newlines(text): """Normalize CRLF and CR newlines to just LF.""" return re_newlines.sub('\n', str(text)) @keep_lazy_text def phone2numeric(phone): """Convert a phone number with letters into its numeric equivalent.""" char2number = { 'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3', 'f': '3', 'g': '4', 'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5', 'm': '6', 'n': '6', 'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7', 't': '8', 'u': '8', 'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9', } return ''.join(char2number.get(c, c) for c in phone.lower()) def compress_string(s): return gzip_compress(s, compresslevel=6, mtime=0) class StreamingBuffer(BytesIO): def read(self): ret = self.getvalue() self.seek(0) self.truncate() return ret # Like compress_string, but for iterators of strings. def compress_sequence(sequence): buf = StreamingBuffer() with GzipFile(mode='wb', compresslevel=6, fileobj=buf, mtime=0) as zfile: # Output headers... yield buf.read() for item in sequence: zfile.write(item) data = buf.read() if data: yield data yield buf.read() # Expression to match some_token and some_token="with spaces" (and similarly # for single-quoted strings). smart_split_re = _lazy_re_compile(r""" ((?: [^\s'"]* (?: (?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*') [^\s'"]* )+ ) | \S+) """, re.VERBOSE) def smart_split(text): r""" Generator that splits a string by spaces, leaving quoted phrases together. Supports both single and double quotes, and supports escaping quotes with backslashes. In the output, strings will keep their initial and trailing quote marks and escaped quotes will remain escaped (the results can then be further processed with unescape_string_literal()). >>> list(smart_split(r'This is "a person\'s" test.')) ['This', 'is', '"a person\\\'s"', 'test.'] >>> list(smart_split(r"Another 'person\'s' test.")) ['Another', "'person\\'s'", 'test.'] >>> list(smart_split(r'A "\"funky\" style" test.')) ['A', '"\\"funky\\" style"', 'test.'] """ for bit in smart_split_re.finditer(str(text)): yield bit[0] def _replace_entity(match): text = match[1] if text[0] == '#': text = text[1:] try: if text[0] in 'xX': c = int(text[1:], 16) else: c = int(text) return chr(c) except ValueError: return match[0] else: try: return chr(html.entities.name2codepoint[text]) except KeyError: return match[0] _entity_re = _lazy_re_compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));") @keep_lazy_text def unescape_string_literal(s): r""" Convert quoted string literals to unquoted strings with escaped quotes and backslashes unquoted:: >>> unescape_string_literal('"abc"') 'abc' >>> unescape_string_literal("'abc'") 'abc' >>> unescape_string_literal('"a \"bc\""') 'a "bc"' >>> unescape_string_literal("'\'ab\' c'") "'ab' c" """ if s[0] not in "\"'" or s[-1] != s[0]: raise ValueError("Not a string literal: %r" % s) quote = s[0] return s[1:-1].replace(r'\%s' % quote, quote).replace(r'\\', '\\') @keep_lazy_text def slugify(value, allow_unicode=False): """ Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated dashes to single dashes. Remove characters that aren't alphanumerics, underscores, or hyphens. Convert to lowercase. Also strip leading and trailing whitespace, dashes, and underscores. """ value = str(value) if allow_unicode: value = unicodedata.normalize('NFKC', value) else: value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii') value = re.sub(r'[^\w\s-]', '', value.lower()) return re.sub(r'[-\s]+', '-', value).strip('-_') def camel_case_to_spaces(value): """ Split CamelCase and convert to lowercase. Strip surrounding whitespace. """ return re_camel_case.sub(r' \1', value).strip().lower() def _format_lazy(format_string, *args, **kwargs): """ Apply str.format() on 'format_string' where format_string, args, and/or kwargs might be lazy. """ return format_string.format(*args, **kwargs) format_lazy = lazy(_format_lazy, str)
98bc80f3939ef82f200a67212b6821791d415391b5dd1f7474f0bc3cecbdf585
"""Functions to parse datetime objects.""" # We're using regular expressions rather than time.strptime because: # - They provide both validation and parsing. # - They're more flexible for datetimes. # - The date/datetime/time constructors produce friendlier error messages. import datetime from django.utils.regex_helper import _lazy_re_compile from django.utils.timezone import get_fixed_timezone, utc date_re = _lazy_re_compile( r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})$' ) time_re = _lazy_re_compile( r'(?P<hour>\d{1,2}):(?P<minute>\d{1,2})' r'(?::(?P<second>\d{1,2})(?:[\.,](?P<microsecond>\d{1,6})\d{0,6})?)?$' ) datetime_re = _lazy_re_compile( r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})' r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})' r'(?::(?P<second>\d{1,2})(?:[\.,](?P<microsecond>\d{1,6})\d{0,6})?)?' r'\s*(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$' ) standard_duration_re = _lazy_re_compile( r'^' r'(?:(?P<days>-?\d+) (days?, )?)?' r'(?P<sign>-?)' r'((?:(?P<hours>\d+):)(?=\d+:\d+))?' r'(?:(?P<minutes>\d+):)?' r'(?P<seconds>\d+)' r'(?:[\.,](?P<microseconds>\d{1,6})\d{0,6})?' r'$' ) # Support the sections of ISO 8601 date representation that are accepted by # timedelta iso8601_duration_re = _lazy_re_compile( r'^(?P<sign>[-+]?)' r'P' r'(?:(?P<days>\d+(.\d+)?)D)?' r'(?:T' r'(?:(?P<hours>\d+(.\d+)?)H)?' r'(?:(?P<minutes>\d+(.\d+)?)M)?' r'(?:(?P<seconds>\d+(.\d+)?)S)?' r')?' r'$' ) # Support PostgreSQL's day-time interval format, e.g. "3 days 04:05:06". The # year-month and mixed intervals cannot be converted to a timedelta and thus # aren't accepted. postgres_interval_re = _lazy_re_compile( r'^' r'(?:(?P<days>-?\d+) (days? ?))?' r'(?:(?P<sign>[-+])?' r'(?P<hours>\d+):' r'(?P<minutes>\d\d):' r'(?P<seconds>\d\d)' r'(?:\.(?P<microseconds>\d{1,6}))?' r')?$' ) def parse_date(value): """Parse a string and return a datetime.date. Raise ValueError if the input is well formatted but not a valid date. Return None if the input isn't well formatted. """ try: return datetime.date.fromisoformat(value) except ValueError: if match := date_re.match(value): kw = {k: int(v) for k, v in match.groupdict().items()} return datetime.date(**kw) def parse_time(value): """Parse a string and return a datetime.time. This function doesn't support time zone offsets. Raise ValueError if the input is well formatted but not a valid time. Return None if the input isn't well formatted, in particular if it contains an offset. """ try: # The fromisoformat() method takes time zone info into account and # returns a time with a tzinfo component, if possible. However, there # are no circumstances where aware datetime.time objects make sense, so # remove the time zone offset. return datetime.time.fromisoformat(value).replace(tzinfo=None) except ValueError: if match := time_re.match(value): kw = match.groupdict() kw['microsecond'] = kw['microsecond'] and kw['microsecond'].ljust(6, '0') kw = {k: int(v) for k, v in kw.items() if v is not None} return datetime.time(**kw) def parse_datetime(value): """Parse a string and return a datetime.datetime. This function supports time zone offsets. When the input contains one, the output uses a timezone with a fixed offset from UTC. Raise ValueError if the input is well formatted but not a valid datetime. Return None if the input isn't well formatted. """ try: return datetime.datetime.fromisoformat(value) except ValueError: if match := datetime_re.match(value): kw = match.groupdict() kw['microsecond'] = kw['microsecond'] and kw['microsecond'].ljust(6, '0') tzinfo = kw.pop('tzinfo') if tzinfo == 'Z': tzinfo = utc elif tzinfo is not None: offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0 offset = 60 * int(tzinfo[1:3]) + offset_mins if tzinfo[0] == '-': offset = -offset tzinfo = get_fixed_timezone(offset) kw = {k: int(v) for k, v in kw.items() if v is not None} return datetime.datetime(**kw, tzinfo=tzinfo) def parse_duration(value): """Parse a duration string and return a datetime.timedelta. The preferred format for durations in Django is '%d %H:%M:%S.%f'. Also supports ISO 8601 representation and PostgreSQL's day-time interval format. """ match = ( standard_duration_re.match(value) or iso8601_duration_re.match(value) or postgres_interval_re.match(value) ) if match: kw = match.groupdict() sign = -1 if kw.pop('sign', '+') == '-' else 1 if kw.get('microseconds'): kw['microseconds'] = kw['microseconds'].ljust(6, '0') if kw.get('seconds') and kw.get('microseconds') and kw['seconds'].startswith('-'): kw['microseconds'] = '-' + kw['microseconds'] kw = {k: float(v.replace(',', '.')) for k, v in kw.items() if v is not None} days = datetime.timedelta(kw.pop('days', .0) or .0) if match.re == iso8601_duration_re: days *= sign return days + sign * datetime.timedelta(**kw)
d926839f0d047ffcc4d94d0f634ed9e9d53cc7c04bfeb74423b73f0c851d829c
import datetime import functools import os import subprocess import sys from django.utils.regex_helper import _lazy_re_compile # Private, stable API for detecting the Python version. PYXY means "Python X.Y # or later". So that third-party apps can use these values, each constant # should remain as long as the oldest supported Django version supports that # Python version. PY36 = sys.version_info >= (3, 6) PY37 = sys.version_info >= (3, 7) PY38 = sys.version_info >= (3, 8) PY39 = sys.version_info >= (3, 9) PY310 = sys.version_info >= (3, 10) def get_version(version=None): """Return a PEP 440-compliant version number from VERSION.""" version = get_complete_version(version) # Now build the two parts of the version number: # main = X.Y[.Z] # sub = .devN - for pre-alpha releases # | {a|b|rc}N - for alpha, beta, and rc releases main = get_main_version(version) sub = '' if version[3] == 'alpha' and version[4] == 0: git_changeset = get_git_changeset() if git_changeset: sub = '.dev%s' % git_changeset elif version[3] != 'final': mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'} sub = mapping[version[3]] + str(version[4]) return main + sub def get_main_version(version=None): """Return main version (X.Y[.Z]) from VERSION.""" version = get_complete_version(version) parts = 2 if version[2] == 0 else 3 return '.'.join(str(x) for x in version[:parts]) def get_complete_version(version=None): """ Return a tuple of the django version. If version argument is non-empty, check for correctness of the tuple provided. """ if version is None: from django import VERSION as version else: assert len(version) == 5 assert version[3] in ('alpha', 'beta', 'rc', 'final') return version def get_docs_version(version=None): version = get_complete_version(version) if version[3] != 'final': return 'dev' else: return '%d.%d' % version[:2] @functools.lru_cache() def get_git_changeset(): """Return a numeric identifier of the latest git changeset. The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format. This value isn't guaranteed to be unique, but collisions are very unlikely, so it's sufficient for generating the development version numbers. """ # Repository may not be found if __file__ is undefined, e.g. in a frozen # module. if '__file__' not in globals(): return None repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) git_log = subprocess.run( 'git log --pretty=format:%ct --quiet -1 HEAD', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd=repo_dir, universal_newlines=True, ) timestamp = git_log.stdout tz = datetime.timezone.utc try: timestamp = datetime.datetime.fromtimestamp(int(timestamp), tz=tz) except ValueError: return None return timestamp.strftime('%Y%m%d%H%M%S') version_component_re = _lazy_re_compile(r'(\d+|[a-z]+|\.)') def get_version_tuple(version): """ Return a tuple of version numbers (e.g. (1, 2, 3)) from the version string (e.g. '1.2.3'). """ version_numbers = [] for item in version_component_re.split(version): if item and item != '.': try: component = int(item) except ValueError: break else: version_numbers.append(component) return tuple(version_numbers)
7d395a8b7b6e8b46bf1cd168682bf171ecb720836510746718a2dda2e96de728
""" Timezone-related classes and functions. """ import functools from contextlib import ContextDecorator from datetime import datetime, timedelta, timezone, tzinfo import pytz from asgiref.local import Local from django.conf import settings __all__ = [ 'utc', 'get_fixed_timezone', 'get_default_timezone', 'get_default_timezone_name', 'get_current_timezone', 'get_current_timezone_name', 'activate', 'deactivate', 'override', 'localtime', 'now', 'is_aware', 'is_naive', 'make_aware', 'make_naive', ] # UTC time zone as a tzinfo instance. utc = pytz.utc _PYTZ_BASE_CLASSES = (pytz.tzinfo.BaseTzInfo, pytz._FixedOffset) # In releases prior to 2018.4, pytz.UTC was not a subclass of BaseTzInfo if not isinstance(pytz.UTC, pytz._FixedOffset): _PYTZ_BASE_CLASSES = _PYTZ_BASE_CLASSES + (type(pytz.UTC),) def get_fixed_timezone(offset): """Return a tzinfo instance with a fixed offset from UTC.""" if isinstance(offset, timedelta): offset = offset.total_seconds() // 60 sign = '-' if offset < 0 else '+' hhmm = '%02d%02d' % divmod(abs(offset), 60) name = sign + hhmm return timezone(timedelta(minutes=offset), name) # In order to avoid accessing settings at compile time, # wrap the logic in a function and cache the result. @functools.lru_cache() def get_default_timezone(): """ Return the default time zone as a tzinfo instance. This is the time zone defined by settings.TIME_ZONE. """ return pytz.timezone(settings.TIME_ZONE) # This function exists for consistency with get_current_timezone_name def get_default_timezone_name(): """Return the name of the default time zone.""" return _get_timezone_name(get_default_timezone()) _active = Local() def get_current_timezone(): """Return the currently active time zone as a tzinfo instance.""" return getattr(_active, "value", get_default_timezone()) def get_current_timezone_name(): """Return the name of the currently active time zone.""" return _get_timezone_name(get_current_timezone()) def _get_timezone_name(timezone): """Return the name of ``timezone``.""" return str(timezone) # Timezone selection functions. # These functions don't change os.environ['TZ'] and call time.tzset() # because it isn't thread safe. def activate(timezone): """ Set the time zone for the current thread. The ``timezone`` argument must be an instance of a tzinfo subclass or a time zone name. """ if isinstance(timezone, tzinfo): _active.value = timezone elif isinstance(timezone, str): _active.value = pytz.timezone(timezone) else: raise ValueError("Invalid timezone: %r" % timezone) def deactivate(): """ Unset the time zone for the current thread. Django will then use the time zone defined by settings.TIME_ZONE. """ if hasattr(_active, "value"): del _active.value class override(ContextDecorator): """ Temporarily set the time zone for the current thread. This is a context manager that uses django.utils.timezone.activate() to set the timezone on entry and restores the previously active timezone on exit. The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a time zone name, or ``None``. If it is ``None``, Django enables the default time zone. """ def __init__(self, timezone): self.timezone = timezone def __enter__(self): self.old_timezone = getattr(_active, 'value', None) if self.timezone is None: deactivate() else: activate(self.timezone) def __exit__(self, exc_type, exc_value, traceback): if self.old_timezone is None: deactivate() else: _active.value = self.old_timezone # Templates def template_localtime(value, use_tz=None): """ Check if value is a datetime and converts it to local time if necessary. If use_tz is provided and is not None, that will force the value to be converted (or not), overriding the value of settings.USE_TZ. This function is designed for use by the template engine. """ should_convert = ( isinstance(value, datetime) and (settings.USE_TZ if use_tz is None else use_tz) and not is_naive(value) and getattr(value, 'convert_to_local_time', True) ) return localtime(value) if should_convert else value # Utilities def localtime(value=None, timezone=None): """ Convert an aware datetime.datetime to local time. Only aware datetimes are allowed. When value is omitted, it defaults to now(). Local time is defined by the current time zone, unless another time zone is specified. """ if value is None: value = now() if timezone is None: timezone = get_current_timezone() # Emulate the behavior of astimezone() on Python < 3.6. if is_naive(value): raise ValueError("localtime() cannot be applied to a naive datetime") return value.astimezone(timezone) def localdate(value=None, timezone=None): """ Convert an aware datetime to local time and return the value's date. Only aware datetimes are allowed. When value is omitted, it defaults to now(). Local time is defined by the current time zone, unless another time zone is specified. """ return localtime(value, timezone).date() def now(): """ Return an aware or naive datetime.datetime, depending on settings.USE_TZ. """ return datetime.now(tz=utc if settings.USE_TZ else None) # By design, these four functions don't perform any checks on their arguments. # The caller should ensure that they don't receive an invalid value like None. def is_aware(value): """ Determine if a given datetime.datetime is aware. The concept is defined in Python's docs: https://docs.python.org/library/datetime.html#datetime.tzinfo Assuming value.tzinfo is either None or a proper datetime.tzinfo, value.utcoffset() implements the appropriate logic. """ return value.utcoffset() is not None def is_naive(value): """ Determine if a given datetime.datetime is naive. The concept is defined in Python's docs: https://docs.python.org/library/datetime.html#datetime.tzinfo Assuming value.tzinfo is either None or a proper datetime.tzinfo, value.utcoffset() implements the appropriate logic. """ return value.utcoffset() is None def make_aware(value, timezone=None, is_dst=None): """Make a naive datetime.datetime in a given time zone aware.""" if timezone is None: timezone = get_current_timezone() if _is_pytz_zone(timezone): # This method is available for pytz time zones. return timezone.localize(value, is_dst=is_dst) else: # Check that we won't overwrite the timezone of an aware datetime. if is_aware(value): raise ValueError( "make_aware expects a naive datetime, got %s" % value) # This may be wrong around DST changes! return value.replace(tzinfo=timezone) def make_naive(value, timezone=None): """Make an aware datetime.datetime naive in a given time zone.""" if timezone is None: timezone = get_current_timezone() # Emulate the behavior of astimezone() on Python < 3.6. if is_naive(value): raise ValueError("make_naive() cannot be applied to a naive datetime") return value.astimezone(timezone).replace(tzinfo=None) def _is_pytz_zone(tz): """Checks if a zone is a pytz zone.""" return isinstance(tz, _PYTZ_BASE_CLASSES) def _datetime_ambiguous_or_imaginary(dt, tz): if _is_pytz_zone(tz): try: tz.utcoffset(dt) except (pytz.AmbiguousTimeError, pytz.NonExistentTimeError): return True else: return False return tz.utcoffset(dt.replace(fold=not dt.fold)) != tz.utcoffset(dt)
d79f69a2634f9aa9b71897e3abdbe30dd1cd435ff2e70d181562eab5520ea73e
""" A class for storing a tree graph. Primarily used for filter constructs in the ORM. """ import copy from django.utils.hashable import make_hashable class Node: """ A single internal node in the tree graph. A Node should be viewed as a connection (the root) with the children being either leaf nodes or other Node instances. """ # Standard connector type. Clients usually won't use this at all and # subclasses will usually override the value. default = 'DEFAULT' def __init__(self, children=None, connector=None, negated=False): """Construct a new Node. If no connector is given, use the default.""" self.children = children[:] if children else [] self.connector = connector or self.default self.negated = negated # Required because django.db.models.query_utils.Q. Q. __init__() is # problematic, but it is a natural Node subclass in all other respects. @classmethod def _new_instance(cls, children=None, connector=None, negated=False): """ Create a new instance of this class when new Nodes (or subclasses) are needed in the internal code in this class. Normally, it just shadows __init__(). However, subclasses with an __init__ signature that aren't an extension of Node.__init__ might need to implement this method to allow a Node to create a new instance of them (if they have any extra setting up to do). """ obj = Node(children, connector, negated) obj.__class__ = cls return obj def __str__(self): template = '(NOT (%s: %s))' if self.negated else '(%s: %s)' return template % (self.connector, ', '.join(str(c) for c in self.children)) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def __deepcopy__(self, memodict): obj = Node(connector=self.connector, negated=self.negated) obj.__class__ = self.__class__ obj.children = copy.deepcopy(self.children, memodict) return obj def __len__(self): """Return the number of children this node has.""" return len(self.children) def __bool__(self): """Return whether or not this node has children.""" return bool(self.children) def __contains__(self, other): """Return True if 'other' is a direct child of this instance.""" return other in self.children def __eq__(self, other): return ( self.__class__ == other.__class__ and self.connector == other.connector and self.negated == other.negated and self.children == other.children ) def __hash__(self): return hash((self.__class__, self.connector, self.negated, *make_hashable(self.children))) def add(self, data, conn_type): """ Combine this tree and the data represented by data using the connector conn_type. The combine is done by squashing the node other away if possible. This tree (self) will never be pushed to a child node of the combined tree, nor will the connector or negated properties change. Return a node which can be used in place of data regardless if the node other got squashed or not. """ if self.connector != conn_type: obj = self._new_instance(self.children, self.connector, self.negated) self.connector = conn_type self.children = [obj, data] return data elif ( isinstance(data, Node) and not data.negated and (data.connector == conn_type or len(data) == 1) ): # We can squash the other node's children directly into this node. # We are just doing (AB)(CD) == (ABCD) here, with the addition that # if the length of the other node is 1 the connector doesn't # matter. However, for the len(self) == 1 case we don't want to do # the squashing, as it would alter self.connector. self.children.extend(data.children) return self else: # We could use perhaps additional logic here to see if some # children could be used for pushdown here. self.children.append(data) return data def negate(self): """Negate the sense of the root connector.""" self.negated = not self.negated
213c6dcd680b1156d26af317989df6779e3d55897944392b47b3c480b38cc590
import codecs import datetime import locale from decimal import Decimal from urllib.parse import quote from django.utils.functional import Promise class DjangoUnicodeDecodeError(UnicodeDecodeError): def __init__(self, obj, *args): self.obj = obj super().__init__(*args) def __str__(self): return '%s. You passed in %r (%s)' % (super().__str__(), self.obj, type(self.obj)) def smart_str(s, encoding='utf-8', strings_only=False, errors='strict'): """ Return a string representing 's'. Treat bytestrings using the 'encoding' codec. If strings_only is True, don't convert (some) non-string-like objects. """ if isinstance(s, Promise): # The input is the result of a gettext_lazy() call. return s return force_str(s, encoding, strings_only, errors) _PROTECTED_TYPES = ( type(None), int, float, Decimal, datetime.datetime, datetime.date, datetime.time, ) def is_protected_type(obj): """Determine if the object instance is of a protected type. Objects of protected types are preserved as-is when passed to force_str(strings_only=True). """ return isinstance(obj, _PROTECTED_TYPES) def force_str(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_str(), except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first for performance reasons. if issubclass(type(s), str): return s if strings_only and is_protected_type(s): return s try: if isinstance(s, bytes): s = str(s, encoding, errors) else: s = str(s) except UnicodeDecodeError as e: raise DjangoUnicodeDecodeError(s, *e.args) return s def smart_bytes(s, encoding='utf-8', strings_only=False, errors='strict'): """ Return a bytestring version of 's', encoded as specified in 'encoding'. If strings_only is True, don't convert (some) non-string-like objects. """ if isinstance(s, Promise): # The input is the result of a gettext_lazy() call. return s return force_bytes(s, encoding, strings_only, errors) def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_bytes, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first for performance reasons. if isinstance(s, bytes): if encoding == 'utf-8': return s else: return s.decode('utf-8', errors).encode(encoding, errors) if strings_only and is_protected_type(s): return s if isinstance(s, memoryview): return bytes(s) return str(s).encode(encoding, errors) def iri_to_uri(iri): """ Convert an Internationalized Resource Identifier (IRI) portion to a URI portion that is suitable for inclusion in a URL. This is the algorithm from section 3.1 of RFC 3987, slightly simplified since the input is assumed to be a string rather than an arbitrary byte stream. Take an IRI (string or UTF-8 bytes, e.g. '/I ♥ Django/' or b'/I \xe2\x99\xa5 Django/') and return a string containing the encoded result with ASCII chars only (e.g. '/I%20%E2%99%A5%20Django/'). """ # The list of safe characters here is constructed from the "reserved" and # "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986: # reserved = gen-delims / sub-delims # gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@" # sub-delims = "!" / "$" / "&" / "'" / "(" / ")" # / "*" / "+" / "," / ";" / "=" # unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" # Of the unreserved characters, urllib.parse.quote() already considers all # but the ~ safe. # The % character is also added to the list of safe characters here, as the # end of section 3.1 of RFC 3987 specifically mentions that % must not be # converted. if iri is None: return iri elif isinstance(iri, Promise): iri = str(iri) return quote(iri, safe="/#%[]=:;$&()+,!?*@'~") # List of byte values that uri_to_iri() decodes from percent encoding. # First, the unreserved characters from RFC 3986: _ascii_ranges = [[45, 46, 95, 126], range(65, 91), range(97, 123)] _hextobyte = { (fmt % char).encode(): bytes((char,)) for ascii_range in _ascii_ranges for char in ascii_range for fmt in ['%02x', '%02X'] } # And then everything above 128, because bytes ≥ 128 are part of multibyte # Unicode characters. _hexdig = '0123456789ABCDEFabcdef' _hextobyte.update({ (a + b).encode(): bytes.fromhex(a + b) for a in _hexdig[8:] for b in _hexdig }) def uri_to_iri(uri): """ Convert a Uniform Resource Identifier(URI) into an Internationalized Resource Identifier(IRI). This is the algorithm from section 3.2 of RFC 3987, excluding step 4. Take an URI in ASCII bytes (e.g. '/I%20%E2%99%A5%20Django/') and return a string containing the encoded result (e.g. '/I%20♥%20Django/'). """ if uri is None: return uri uri = force_bytes(uri) # Fast selective unquote: First, split on '%' and then starting with the # second block, decode the first 2 bytes if they represent a hex code to # decode. The rest of the block is the part after '%AB', not containing # any '%'. Add that to the output without further processing. bits = uri.split(b'%') if len(bits) == 1: iri = uri else: parts = [bits[0]] append = parts.append hextobyte = _hextobyte for item in bits[1:]: hex = item[:2] if hex in hextobyte: append(hextobyte[item[:2]]) append(item[2:]) else: append(b'%') append(item) iri = b''.join(parts) return repercent_broken_unicode(iri).decode() def escape_uri_path(path): """ Escape the unsafe characters from the path portion of a Uniform Resource Identifier (URI). """ # These are the "reserved" and "unreserved" characters specified in # sections 2.2 and 2.3 of RFC 2396: # reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | "," # unreserved = alphanum | mark # mark = "-" | "_" | "." | "!" | "~" | "*" | "'" | "(" | ")" # The list of safe characters here is constructed subtracting ";", "=", # and "?" according to section 3.3 of RFC 2396. # The reason for not subtracting and escaping "/" is that we are escaping # the entire path, not a path segment. return quote(path, safe="/:@&+$,-_.!~*'()") def punycode(domain): """Return the Punycode of the given domain if it's non-ASCII.""" return domain.encode('idna').decode('ascii') def repercent_broken_unicode(path): """ As per section 3.2 of RFC 3987, step three of converting a URI into an IRI, repercent-encode any octet produced that is not part of a strictly legal UTF-8 octet sequence. """ while True: try: path.decode() except UnicodeDecodeError as e: # CVE-2019-14235: A recursion shouldn't be used since the exception # handling uses massive amounts of memory repercent = quote(path[e.start:e.end], safe=b"/#%[]=:;$&()+,!?*@'~") path = path[:e.start] + repercent.encode() + path[e.end:] else: return path def filepath_to_uri(path): """Convert a file system path to a URI portion that is suitable for inclusion in a URL. Encode certain chars that would normally be recognized as special chars for URIs. Do not encode the ' character, as it is a valid character within URIs. See the encodeURIComponent() JavaScript function for details. """ if path is None: return path # I know about `os.sep` and `os.altsep` but I want to leave # some flexibility for hardcoding separators. return quote(str(path).replace("\\", "/"), safe="/~!*()'") def get_system_encoding(): """ The encoding of the default system locale. Fallback to 'ascii' if the #encoding is unsupported by Python or could not be determined. See tickets #10335 and #5846. """ try: encoding = locale.getdefaultlocale()[1] or 'ascii' codecs.lookup(encoding) except Exception: encoding = 'ascii' return encoding DEFAULT_LOCALE_ENCODING = get_system_encoding()
4c2b83b70b5d167626578de6c8eee8f8b9a9af5014a209882ed8be7e879bf1e1
import copy from collections.abc import Mapping class OrderedSet: """ A set which keeps the ordering of the inserted items. """ def __init__(self, iterable=None): self.dict = dict.fromkeys(iterable or ()) def add(self, item): self.dict[item] = None def remove(self, item): del self.dict[item] def discard(self, item): try: self.remove(item) except KeyError: pass def __iter__(self): return iter(self.dict) def __reversed__(self): return reversed(self.dict) def __contains__(self, item): return item in self.dict def __bool__(self): return bool(self.dict) def __len__(self): return len(self.dict) def __repr__(self): data = repr(list(self.dict)) if self.dict else '' return f'{self.__class__.__qualname__}({data})' class MultiValueDictKeyError(KeyError): pass class MultiValueDict(dict): """ A subclass of dictionary customized to handle multiple values for the same key. >>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']}) >>> d['name'] 'Simon' >>> d.getlist('name') ['Adrian', 'Simon'] >>> d.getlist('doesnotexist') [] >>> d.getlist('doesnotexist', ['Adrian', 'Simon']) ['Adrian', 'Simon'] >>> d.get('lastname', 'nonexistent') 'nonexistent' >>> d.setlist('lastname', ['Holovaty', 'Willison']) This class exists to solve the irritating problem raised by cgi.parse_qs, which returns a list for every key, even though most web forms submit single name-value pairs. """ def __init__(self, key_to_list_mapping=()): super().__init__(key_to_list_mapping) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, super().__repr__()) def __getitem__(self, key): """ Return the last data value for this key, or [] if it's an empty list; raise KeyError if not found. """ try: list_ = super().__getitem__(key) except KeyError: raise MultiValueDictKeyError(key) try: return list_[-1] except IndexError: return [] def __setitem__(self, key, value): super().__setitem__(key, [value]) def __copy__(self): return self.__class__([ (k, v[:]) for k, v in self.lists() ]) def __deepcopy__(self, memo): result = self.__class__() memo[id(self)] = result for key, value in dict.items(self): dict.__setitem__(result, copy.deepcopy(key, memo), copy.deepcopy(value, memo)) return result def __getstate__(self): return {**self.__dict__, '_data': {k: self._getlist(k) for k in self}} def __setstate__(self, obj_dict): data = obj_dict.pop('_data', {}) for k, v in data.items(): self.setlist(k, v) self.__dict__.update(obj_dict) def get(self, key, default=None): """ Return the last data value for the passed key. If key doesn't exist or value is an empty list, return `default`. """ try: val = self[key] except KeyError: return default if val == []: return default return val def _getlist(self, key, default=None, force_list=False): """ Return a list of values for the key. Used internally to manipulate values list. If force_list is True, return a new copy of values. """ try: values = super().__getitem__(key) except KeyError: if default is None: return [] return default else: if force_list: values = list(values) if values is not None else None return values def getlist(self, key, default=None): """ Return the list of values for the key. If key doesn't exist, return a default value. """ return self._getlist(key, default, force_list=True) def setlist(self, key, list_): super().__setitem__(key, list_) def setdefault(self, key, default=None): if key not in self: self[key] = default # Do not return default here because __setitem__() may store # another value -- QueryDict.__setitem__() does. Look it up. return self[key] def setlistdefault(self, key, default_list=None): if key not in self: if default_list is None: default_list = [] self.setlist(key, default_list) # Do not return default_list here because setlist() may store # another value -- QueryDict.setlist() does. Look it up. return self._getlist(key) def appendlist(self, key, value): """Append an item to the internal list associated with key.""" self.setlistdefault(key).append(value) def items(self): """ Yield (key, value) pairs, where value is the last item in the list associated with the key. """ for key in self: yield key, self[key] def lists(self): """Yield (key, list) pairs.""" return iter(super().items()) def values(self): """Yield the last value on every key list.""" for key in self: yield self[key] def copy(self): """Return a shallow copy of this object.""" return copy.copy(self) def update(self, *args, **kwargs): """Extend rather than replace existing key lists.""" if len(args) > 1: raise TypeError("update expected at most 1 argument, got %d" % len(args)) if args: arg = args[0] if isinstance(arg, MultiValueDict): for key, value_list in arg.lists(): self.setlistdefault(key).extend(value_list) else: if isinstance(arg, Mapping): arg = arg.items() for key, value in arg: self.setlistdefault(key).append(value) for key, value in kwargs.items(): self.setlistdefault(key).append(value) def dict(self): """Return current object as a dict with singular values.""" return {key: self[key] for key in self} class ImmutableList(tuple): """ A tuple-like object that raises useful errors when it is asked to mutate. Example:: >>> a = ImmutableList(range(5), warning="You cannot mutate this.") >>> a[3] = '4' Traceback (most recent call last): ... AttributeError: You cannot mutate this. """ def __new__(cls, *args, warning='ImmutableList object is immutable.', **kwargs): self = tuple.__new__(cls, *args, **kwargs) self.warning = warning return self def complain(self, *args, **kwargs): raise AttributeError(self.warning) # All list mutation functions complain. __delitem__ = complain __delslice__ = complain __iadd__ = complain __imul__ = complain __setitem__ = complain __setslice__ = complain append = complain extend = complain insert = complain pop = complain remove = complain sort = complain reverse = complain class DictWrapper(dict): """ Wrap accesses to a dictionary so that certain values (those starting with the specified prefix) are passed through a function before being returned. The prefix is removed before looking up the real value. Used by the SQL construction code to ensure that values are correctly quoted before being used. """ def __init__(self, data, func, prefix): super().__init__(data) self.func = func self.prefix = prefix def __getitem__(self, key): """ Retrieve the real value after stripping the prefix string (if present). If the prefix is present, pass the value through self.func before returning, otherwise return the raw value. """ use_func = key.startswith(self.prefix) if use_func: key = key[len(self.prefix):] value = super().__getitem__(key) if use_func: return self.func(value) return value def _destruct_iterable_mapping_values(data): for i, elem in enumerate(data): if len(elem) != 2: raise ValueError( 'dictionary update sequence element #{} has ' 'length {}; 2 is required.'.format(i, len(elem)) ) if not isinstance(elem[0], str): raise ValueError('Element key %r invalid, only strings are allowed' % elem[0]) yield tuple(elem) class CaseInsensitiveMapping(Mapping): """ Mapping allowing case-insensitive key lookups. Original case of keys is preserved for iteration and string representation. Example:: >>> ci_map = CaseInsensitiveMapping({'name': 'Jane'}) >>> ci_map['Name'] Jane >>> ci_map['NAME'] Jane >>> ci_map['name'] Jane >>> ci_map # original case preserved {'name': 'Jane'} """ def __init__(self, data): if not isinstance(data, Mapping): data = {k: v for k, v in _destruct_iterable_mapping_values(data)} self._store = {k.lower(): (k, v) for k, v in data.items()} def __getitem__(self, key): return self._store[key.lower()][1] def __len__(self): return len(self._store) def __eq__(self, other): return isinstance(other, Mapping) and { k.lower(): v for k, v in self.items() } == { k.lower(): v for k, v in other.items() } def __iter__(self): return (original_key for original_key, value in self._store.values()) def __repr__(self): return repr({key: value for key, value in self._store.values()}) def copy(self): return self
88e82d744fe0137d82f2d65affba68909174992bcb1387cabb0832da66529577
""" PHP date() style date formatting See http://www.php.net/date for format strings Usage: >>> import datetime >>> d = datetime.datetime.now() >>> df = DateFormat(d) >>> print(df.format('jS F Y H:i')) 7th October 2003 11:39 >>> """ import calendar import datetime from email.utils import format_datetime as format_datetime_rfc5322 from django.utils.dates import ( MONTHS, MONTHS_3, MONTHS_ALT, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR, ) from django.utils.regex_helper import _lazy_re_compile from django.utils.timezone import ( _datetime_ambiguous_or_imaginary, get_default_timezone, is_naive, make_aware, ) from django.utils.translation import gettext as _ re_formatchars = _lazy_re_compile(r'(?<!\\)([aAbcdDeEfFgGhHiIjlLmMnNoOPrsStTUuwWyYzZ])') re_escaped = _lazy_re_compile(r'\\(.)') class Formatter: def format(self, formatstr): pieces = [] for i, piece in enumerate(re_formatchars.split(str(formatstr))): if i % 2: if type(self.data) is datetime.date and hasattr(TimeFormat, piece): raise TypeError( "The format for date objects may not contain " "time-related format specifiers (found '%s')." % piece ) pieces.append(str(getattr(self, piece)())) elif piece: pieces.append(re_escaped.sub(r'\1', piece)) return ''.join(pieces) class TimeFormat(Formatter): def __init__(self, obj): self.data = obj self.timezone = None # We only support timezone when formatting datetime objects, # not date objects (timezone information not appropriate), # or time objects (against established django policy). if isinstance(obj, datetime.datetime): if is_naive(obj): self.timezone = get_default_timezone() else: self.timezone = obj.tzinfo @property def _no_timezone_or_datetime_is_ambiguous_or_imaginary(self): return ( not self.timezone or _datetime_ambiguous_or_imaginary(self.data, self.timezone) ) def a(self): "'a.m.' or 'p.m.'" if self.data.hour > 11: return _('p.m.') return _('a.m.') def A(self): "'AM' or 'PM'" if self.data.hour > 11: return _('PM') return _('AM') def e(self): """ Timezone name. If timezone information is not available, return an empty string. """ if not self.timezone: return "" try: if hasattr(self.data, 'tzinfo') and self.data.tzinfo: return self.data.tzname() or '' except NotImplementedError: pass return "" def f(self): """ Time, in 12-hour hours and minutes, with minutes left off if they're zero. Examples: '1', '1:30', '2:05', '2' Proprietary extension. """ hour = self.data.hour % 12 or 12 minute = self.data.minute return '%d:%02d' % (hour, minute) if minute else hour def g(self): "Hour, 12-hour format without leading zeros; i.e. '1' to '12'" return self.data.hour % 12 or 12 def G(self): "Hour, 24-hour format without leading zeros; i.e. '0' to '23'" return self.data.hour def h(self): "Hour, 12-hour format; i.e. '01' to '12'" return '%02d' % (self.data.hour % 12 or 12) def H(self): "Hour, 24-hour format; i.e. '00' to '23'" return '%02d' % self.data.hour def i(self): "Minutes; i.e. '00' to '59'" return '%02d' % self.data.minute def O(self): # NOQA: E743, E741 """ Difference to Greenwich time in hours; e.g. '+0200', '-0430'. If timezone information is not available, return an empty string. """ if self._no_timezone_or_datetime_is_ambiguous_or_imaginary: return "" seconds = self.Z() sign = '-' if seconds < 0 else '+' seconds = abs(seconds) return "%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60) def P(self): """ Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off if they're zero and the strings 'midnight' and 'noon' if appropriate. Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.' Proprietary extension. """ if self.data.minute == 0 and self.data.hour == 0: return _('midnight') if self.data.minute == 0 and self.data.hour == 12: return _('noon') return '%s %s' % (self.f(), self.a()) def s(self): "Seconds; i.e. '00' to '59'" return '%02d' % self.data.second def T(self): """ Time zone of this machine; e.g. 'EST' or 'MDT'. If timezone information is not available, return an empty string. """ if self._no_timezone_or_datetime_is_ambiguous_or_imaginary: return "" return str(self.timezone.tzname(self.data)) def u(self): "Microseconds; i.e. '000000' to '999999'" return '%06d' % self.data.microsecond def Z(self): """ Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for timezones west of UTC is always negative, and for those east of UTC is always positive. If timezone information is not available, return an empty string. """ if self._no_timezone_or_datetime_is_ambiguous_or_imaginary: return "" offset = self.timezone.utcoffset(self.data) # `offset` is a datetime.timedelta. For negative values (to the west of # UTC) only days can be negative (days=-1) and seconds are always # positive. e.g. UTC-1 -> timedelta(days=-1, seconds=82800, microseconds=0) # Positive offsets have days=0 return offset.days * 86400 + offset.seconds class DateFormat(TimeFormat): def b(self): "Month, textual, 3 letters, lowercase; e.g. 'jan'" return MONTHS_3[self.data.month] def c(self): """ ISO 8601 Format Example : '2008-01-02T10:30:00.000123' """ return self.data.isoformat() def d(self): "Day of the month, 2 digits with leading zeros; i.e. '01' to '31'" return '%02d' % self.data.day def D(self): "Day of the week, textual, 3 letters; e.g. 'Fri'" return WEEKDAYS_ABBR[self.data.weekday()] def E(self): "Alternative month names as required by some locales. Proprietary extension." return MONTHS_ALT[self.data.month] def F(self): "Month, textual, long; e.g. 'January'" return MONTHS[self.data.month] def I(self): # NOQA: E743, E741 "'1' if Daylight Savings Time, '0' otherwise." if self._no_timezone_or_datetime_is_ambiguous_or_imaginary: return '' return '1' if self.timezone.dst(self.data) else '0' def j(self): "Day of the month without leading zeros; i.e. '1' to '31'" return self.data.day def l(self): # NOQA: E743, E741 "Day of the week, textual, long; e.g. 'Friday'" return WEEKDAYS[self.data.weekday()] def L(self): "Boolean for whether it is a leap year; i.e. True or False" return calendar.isleap(self.data.year) def m(self): "Month; i.e. '01' to '12'" return '%02d' % self.data.month def M(self): "Month, textual, 3 letters; e.g. 'Jan'" return MONTHS_3[self.data.month].title() def n(self): "Month without leading zeros; i.e. '1' to '12'" return self.data.month def N(self): "Month abbreviation in Associated Press style. Proprietary extension." return MONTHS_AP[self.data.month] def o(self): "ISO 8601 year number matching the ISO week number (W)" return self.data.isocalendar()[0] def r(self): "RFC 5322 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'" if type(self.data) is datetime.date: raise TypeError( "The format for date objects may not contain time-related " "format specifiers (found 'r')." ) if is_naive(self.data): dt = make_aware(self.data, timezone=self.timezone) else: dt = self.data return format_datetime_rfc5322(dt) def S(self): "English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'" if self.data.day in (11, 12, 13): # Special case return 'th' last = self.data.day % 10 if last == 1: return 'st' if last == 2: return 'nd' if last == 3: return 'rd' return 'th' def t(self): "Number of days in the given month; i.e. '28' to '31'" return '%02d' % calendar.monthrange(self.data.year, self.data.month)[1] def U(self): "Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)" value = self.data if not isinstance(value, datetime.datetime): value = datetime.datetime.combine(value, datetime.time.min) return int(value.timestamp()) def w(self): "Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)" return (self.data.weekday() + 1) % 7 def W(self): "ISO-8601 week number of year, weeks starting on Monday" return self.data.isocalendar()[1] def y(self): """Year, 2 digits with leading zeros; e.g. '99'.""" return '%02d' % (self.data.year % 100) def Y(self): """Year, 4 digits with leading zeros; e.g. '1999'.""" return '%04d' % self.data.year def z(self): """Day of the year, i.e. 1 to 366.""" return self.data.timetuple().tm_yday def format(value, format_string): "Convenience function" df = DateFormat(value) return df.format(format_string) def time_format(value, format_string): "Convenience function" tf = TimeFormat(value) return tf.format(format_string)
f3b48257489e9b6ffee2cade6cef8cee49613e6e9092f287e517ab6b66b42d4f
# RemovedInDjango50Warning # Copyright (c) 2010 Guilherme Gondim. All rights reserved. # Copyright (c) 2009 Simon Willison. All rights reserved. # Copyright (c) 2002 Drew Perttula. All rights reserved. # # License: # Python Software Foundation License version 2 # # See the file "LICENSE" for terms & conditions for usage, and a DISCLAIMER OF # ALL WARRANTIES. # # This Baseconv distribution contains no GNU General Public Licensed (GPLed) # code so it may be used in proprietary projects just like prior ``baseconv`` # distributions. # # All trademarks referenced herein are property of their respective holders. # """ Convert numbers from base 10 integers to base X strings and back again. Sample usage:: >>> base20 = BaseConverter('0123456789abcdefghij') >>> base20.encode(1234) '31e' >>> base20.decode('31e') 1234 >>> base20.encode(-1234) '-31e' >>> base20.decode('-31e') -1234 >>> base11 = BaseConverter('0123456789-', sign='$') >>> base11.encode(-1234) '$-22' >>> base11.decode('$-22') -1234 """ import warnings from django.utils.deprecation import RemovedInDjango50Warning warnings.warn( 'The django.utils.baseconv module is deprecated.', category=RemovedInDjango50Warning, stacklevel=2, ) BASE2_ALPHABET = '01' BASE16_ALPHABET = '0123456789ABCDEF' BASE56_ALPHABET = '23456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnpqrstuvwxyz' BASE36_ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyz' BASE62_ALPHABET = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' BASE64_ALPHABET = BASE62_ALPHABET + '-_' class BaseConverter: decimal_digits = '0123456789' def __init__(self, digits, sign='-'): self.sign = sign self.digits = digits if sign in self.digits: raise ValueError('Sign character found in converter base digits.') def __repr__(self): return "<%s: base%s (%s)>" % (self.__class__.__name__, len(self.digits), self.digits) def encode(self, i): neg, value = self.convert(i, self.decimal_digits, self.digits, '-') if neg: return self.sign + value return value def decode(self, s): neg, value = self.convert(s, self.digits, self.decimal_digits, self.sign) if neg: value = '-' + value return int(value) def convert(self, number, from_digits, to_digits, sign): if str(number)[0] == sign: number = str(number)[1:] neg = 1 else: neg = 0 # make an integer out of the number x = 0 for digit in str(number): x = x * len(from_digits) + from_digits.index(digit) # create the result in base 'len(to_digits)' if x == 0: res = to_digits[0] else: res = '' while x > 0: digit = x % len(to_digits) res = to_digits[digit] + res x = int(x // len(to_digits)) return neg, res base2 = BaseConverter(BASE2_ALPHABET) base16 = BaseConverter(BASE16_ALPHABET) base36 = BaseConverter(BASE36_ALPHABET) base56 = BaseConverter(BASE56_ALPHABET) base62 = BaseConverter(BASE62_ALPHABET) base64 = BaseConverter(BASE64_ALPHABET, sign='$')
11ff9ac7b40e4fe25a64576ebf7b5cba8a12f7d40580fb6cb5cb56f80b53e9ac
"""HTML utilities suitable for global use.""" import html import json import re from html.parser import HTMLParser from urllib.parse import ( parse_qsl, quote, unquote, urlencode, urlsplit, urlunsplit, ) from django.utils.encoding import punycode from django.utils.functional import Promise, keep_lazy, keep_lazy_text from django.utils.http import RFC3986_GENDELIMS, RFC3986_SUBDELIMS from django.utils.regex_helper import _lazy_re_compile from django.utils.safestring import SafeData, SafeString, mark_safe from django.utils.text import normalize_newlines # Configuration for urlize() function. TRAILING_PUNCTUATION_CHARS = '.,:;!' WRAPPING_PUNCTUATION = [('(', ')'), ('[', ']')] # List of possible strings used for bullets in bulleted lists. DOTS = ['&middot;', '*', '\u2022', '&#149;', '&bull;', '&#8226;'] word_split_re = _lazy_re_compile(r'''([\s<>"']+)''') simple_url_re = _lazy_re_compile(r'^https?://\[?\w', re.IGNORECASE) simple_url_2_re = _lazy_re_compile( r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)($|/.*)$', re.IGNORECASE ) @keep_lazy(str, SafeString) def escape(text): """ Return the given text with ampersands, quotes and angle brackets encoded for use in HTML. Always escape input, even if it's already escaped and marked as such. This may result in double-escaping. If this is a concern, use conditional_escape() instead. """ return mark_safe(html.escape(str(text))) _js_escapes = { ord('\\'): '\\u005C', ord('\''): '\\u0027', ord('"'): '\\u0022', ord('>'): '\\u003E', ord('<'): '\\u003C', ord('&'): '\\u0026', ord('='): '\\u003D', ord('-'): '\\u002D', ord(';'): '\\u003B', ord('`'): '\\u0060', ord('\u2028'): '\\u2028', ord('\u2029'): '\\u2029' } # Escape every ASCII character with a value less than 32. _js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32)) @keep_lazy(str, SafeString) def escapejs(value): """Hex encode characters for use in JavaScript strings.""" return mark_safe(str(value).translate(_js_escapes)) _json_script_escapes = { ord('>'): '\\u003E', ord('<'): '\\u003C', ord('&'): '\\u0026', } def json_script(value, element_id): """ Escape all the HTML/XML special characters with their unicode escapes, so value is safe to be output anywhere except for inside a tag attribute. Wrap the escaped JSON in a script tag. """ from django.core.serializers.json import DjangoJSONEncoder json_str = json.dumps(value, cls=DjangoJSONEncoder).translate(_json_script_escapes) return format_html( '<script id="{}" type="application/json">{}</script>', element_id, mark_safe(json_str) ) def conditional_escape(text): """ Similar to escape(), except that it doesn't operate on pre-escaped strings. This function relies on the __html__ convention used both by Django's SafeData class and by third-party libraries like markupsafe. """ if isinstance(text, Promise): text = str(text) if hasattr(text, '__html__'): return text.__html__() else: return escape(text) def format_html(format_string, *args, **kwargs): """ Similar to str.format, but pass all arguments through conditional_escape(), and call mark_safe() on the result. This function should be used instead of str.format or % interpolation to build up small HTML fragments. """ args_safe = map(conditional_escape, args) kwargs_safe = {k: conditional_escape(v) for (k, v) in kwargs.items()} return mark_safe(format_string.format(*args_safe, **kwargs_safe)) def format_html_join(sep, format_string, args_generator): """ A wrapper of format_html, for the common case of a group of arguments that need to be formatted using the same format string, and then joined using 'sep'. 'sep' is also passed through conditional_escape. 'args_generator' should be an iterator that returns the sequence of 'args' that will be passed to format_html. Example: format_html_join('\n', "<li>{} {}</li>", ((u.first_name, u.last_name) for u in users)) """ return mark_safe(conditional_escape(sep).join( format_html(format_string, *args) for args in args_generator )) @keep_lazy_text def linebreaks(value, autoescape=False): """Convert newlines into <p> and <br>s.""" value = normalize_newlines(value) paras = re.split('\n{2,}', str(value)) if autoescape: paras = ['<p>%s</p>' % escape(p).replace('\n', '<br>') for p in paras] else: paras = ['<p>%s</p>' % p.replace('\n', '<br>') for p in paras] return '\n\n'.join(paras) class MLStripper(HTMLParser): def __init__(self): super().__init__(convert_charrefs=False) self.reset() self.fed = [] def handle_data(self, d): self.fed.append(d) def handle_entityref(self, name): self.fed.append('&%s;' % name) def handle_charref(self, name): self.fed.append('&#%s;' % name) def get_data(self): return ''.join(self.fed) def _strip_once(value): """ Internal tag stripping utility used by strip_tags. """ s = MLStripper() s.feed(value) s.close() return s.get_data() @keep_lazy_text def strip_tags(value): """Return the given HTML with all tags stripped.""" # Note: in typical case this loop executes _strip_once once. Loop condition # is redundant, but helps to reduce number of executions of _strip_once. value = str(value) while '<' in value and '>' in value: new_value = _strip_once(value) if value.count('<') == new_value.count('<'): # _strip_once wasn't able to detect more tags. break value = new_value return value @keep_lazy_text def strip_spaces_between_tags(value): """Return the given HTML with spaces between tags removed.""" return re.sub(r'>\s+<', '><', str(value)) def smart_urlquote(url): """Quote a URL if it isn't already quoted.""" def unquote_quote(segment): segment = unquote(segment) # Tilde is part of RFC3986 Unreserved Characters # https://tools.ietf.org/html/rfc3986#section-2.3 # See also https://bugs.python.org/issue16285 return quote(segment, safe=RFC3986_SUBDELIMS + RFC3986_GENDELIMS + '~') # Handle IDN before quoting. try: scheme, netloc, path, query, fragment = urlsplit(url) except ValueError: # invalid IPv6 URL (normally square brackets in hostname part). return unquote_quote(url) try: netloc = punycode(netloc) # IDN -> ACE except UnicodeError: # invalid domain part return unquote_quote(url) if query: # Separately unquoting key/value, so as to not mix querystring separators # included in query values. See #22267. query_parts = [(unquote(q[0]), unquote(q[1])) for q in parse_qsl(query, keep_blank_values=True)] # urlencode will take care of quoting query = urlencode(query_parts) path = unquote_quote(path) fragment = unquote_quote(fragment) return urlunsplit((scheme, netloc, path, query, fragment)) @keep_lazy_text def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False): """ Convert any URLs in text into clickable links. Works on http://, https://, www. links, and also on links ending in one of the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org). Links can have trailing punctuation (periods, commas, close-parens) and leading punctuation (opening parens) and it'll still do the right thing. If trim_url_limit is not None, truncate the URLs in the link text longer than this limit to trim_url_limit - 1 characters and append an ellipsis. If nofollow is True, give the links a rel="nofollow" attribute. If autoescape is True, autoescape the link text and URLs. """ safe_input = isinstance(text, SafeData) def trim_url(x, limit=trim_url_limit): if limit is None or len(x) <= limit: return x return '%s…' % x[:max(0, limit - 1)] def trim_punctuation(lead, middle, trail): """ Trim trailing and wrapping punctuation from `middle`. Return the items of the new state. """ # Continue trimming until middle remains unchanged. trimmed_something = True while trimmed_something: trimmed_something = False # Trim wrapping punctuation. for opening, closing in WRAPPING_PUNCTUATION: if middle.startswith(opening): middle = middle[len(opening):] lead += opening trimmed_something = True # Keep parentheses at the end only if they're balanced. if (middle.endswith(closing) and middle.count(closing) == middle.count(opening) + 1): middle = middle[:-len(closing)] trail = closing + trail trimmed_something = True # Trim trailing punctuation (after trimming wrapping punctuation, # as encoded entities contain ';'). Unescape entities to avoid # breaking them by removing ';'. middle_unescaped = html.unescape(middle) stripped = middle_unescaped.rstrip(TRAILING_PUNCTUATION_CHARS) if middle_unescaped != stripped: punctuation_count = len(middle_unescaped) - len(stripped) trail = middle[-punctuation_count:] + trail middle = middle[:-punctuation_count] trimmed_something = True return lead, middle, trail def is_email_simple(value): """Return True if value looks like an email address.""" # An @ must be in the middle of the value. if '@' not in value or value.startswith('@') or value.endswith('@'): return False try: p1, p2 = value.split('@') except ValueError: # value contains more than one @. return False # Dot must be in p2 (e.g. example.com) if '.' not in p2 or p2.startswith('.'): return False return True words = word_split_re.split(str(text)) for i, word in enumerate(words): if '.' in word or '@' in word or ':' in word: # lead: Current punctuation trimmed from the beginning of the word. # middle: Current state of the word. # trail: Current punctuation trimmed from the end of the word. lead, middle, trail = '', word, '' # Deal with punctuation. lead, middle, trail = trim_punctuation(lead, middle, trail) # Make URL we want to point to. url = None nofollow_attr = ' rel="nofollow"' if nofollow else '' if simple_url_re.match(middle): url = smart_urlquote(html.unescape(middle)) elif simple_url_2_re.match(middle): url = smart_urlquote('http://%s' % html.unescape(middle)) elif ':' not in middle and is_email_simple(middle): local, domain = middle.rsplit('@', 1) try: domain = punycode(domain) except UnicodeError: continue url = 'mailto:%s@%s' % (local, domain) nofollow_attr = '' # Make link. if url: trimmed = trim_url(middle) if autoescape and not safe_input: lead, trail = escape(lead), escape(trail) trimmed = escape(trimmed) middle = '<a href="%s"%s>%s</a>' % (escape(url), nofollow_attr, trimmed) words[i] = mark_safe('%s%s%s' % (lead, middle, trail)) else: if safe_input: words[i] = mark_safe(word) elif autoescape: words[i] = escape(word) elif safe_input: words[i] = mark_safe(word) elif autoescape: words[i] = escape(word) return ''.join(words) def avoid_wrapping(value): """ Avoid text wrapping in the middle of a phrase by adding non-breaking spaces where there previously were normal spaces. """ return value.replace(" ", "\xa0") def html_safe(klass): """ A decorator that defines the __html__ method. This helps non-Django templates to detect classes whose __str__ methods return SafeString. """ if '__html__' in klass.__dict__: raise ValueError( "can't apply @html_safe to %s because it defines " "__html__()." % klass.__name__ ) if '__str__' not in klass.__dict__: raise ValueError( "can't apply @html_safe to %s because it doesn't " "define __str__()." % klass.__name__ ) klass_str = klass.__str__ klass.__str__ = lambda self: mark_safe(klass_str(self)) klass.__html__ = lambda self: str(self) return klass
9062c5b975940700caf7d1d63f2a21ede017e83ef7e769887519a40eae5e743d
import logging import logging.config # needed when logging_config doesn't start with logging.config from copy import copy from django.conf import settings from django.core import mail from django.core.mail import get_connection from django.core.management.color import color_style from django.utils.module_loading import import_string request_logger = logging.getLogger('django.request') # Default logging for Django. This sends an email to the site admins on every # HTTP 500 error. Depending on DEBUG, all other log records are either sent to # the console (DEBUG=True) or discarded (DEBUG=False) by means of the # require_debug_true filter. This configuration is quoted in # docs/ref/logging.txt; please amend it there if edited here. DEFAULT_LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse', }, 'require_debug_true': { '()': 'django.utils.log.RequireDebugTrue', }, }, 'formatters': { 'django.server': { '()': 'django.utils.log.ServerFormatter', 'format': '[{server_time}] {message}', 'style': '{', } }, 'handlers': { 'console': { 'level': 'INFO', 'filters': ['require_debug_true'], 'class': 'logging.StreamHandler', }, 'django.server': { 'level': 'INFO', 'class': 'logging.StreamHandler', 'formatter': 'django.server', }, 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django': { 'handlers': ['console', 'mail_admins'], 'level': 'INFO', }, 'django.server': { 'handlers': ['django.server'], 'level': 'INFO', 'propagate': False, }, } } def configure_logging(logging_config, logging_settings): if logging_config: # First find the logging configuration function ... logging_config_func = import_string(logging_config) logging.config.dictConfig(DEFAULT_LOGGING) # ... then invoke it with the logging settings if logging_settings: logging_config_func(logging_settings) class AdminEmailHandler(logging.Handler): """An exception log handler that emails log entries to site admins. If the request is passed as the first argument to the log record, request data will be provided in the email report. """ def __init__(self, include_html=False, email_backend=None, reporter_class=None): super().__init__() self.include_html = include_html self.email_backend = email_backend self.reporter_class = import_string(reporter_class or settings.DEFAULT_EXCEPTION_REPORTER) def emit(self, record): try: request = record.request subject = '%s (%s IP): %s' % ( record.levelname, ('internal' if request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS else 'EXTERNAL'), record.getMessage() ) except Exception: subject = '%s: %s' % ( record.levelname, record.getMessage() ) request = None subject = self.format_subject(subject) # Since we add a nicely formatted traceback on our own, create a copy # of the log record without the exception data. no_exc_record = copy(record) no_exc_record.exc_info = None no_exc_record.exc_text = None if record.exc_info: exc_info = record.exc_info else: exc_info = (None, record.getMessage(), None) reporter = self.reporter_class(request, is_email=True, *exc_info) message = "%s\n\n%s" % (self.format(no_exc_record), reporter.get_traceback_text()) html_message = reporter.get_traceback_html() if self.include_html else None self.send_mail(subject, message, fail_silently=True, html_message=html_message) def send_mail(self, subject, message, *args, **kwargs): mail.mail_admins(subject, message, *args, connection=self.connection(), **kwargs) def connection(self): return get_connection(backend=self.email_backend, fail_silently=True) def format_subject(self, subject): """ Escape CR and LF characters. """ return subject.replace('\n', '\\n').replace('\r', '\\r') class CallbackFilter(logging.Filter): """ A logging filter that checks the return value of a given callable (which takes the record-to-be-logged as its only parameter) to decide whether to log a record. """ def __init__(self, callback): self.callback = callback def filter(self, record): if self.callback(record): return 1 return 0 class RequireDebugFalse(logging.Filter): def filter(self, record): return not settings.DEBUG class RequireDebugTrue(logging.Filter): def filter(self, record): return settings.DEBUG class ServerFormatter(logging.Formatter): default_time_format = '%d/%b/%Y %H:%M:%S' def __init__(self, *args, **kwargs): self.style = color_style() super().__init__(*args, **kwargs) def format(self, record): msg = record.msg status_code = getattr(record, 'status_code', None) if status_code: if 200 <= status_code < 300: # Put 2XX first, since it should be the common case msg = self.style.HTTP_SUCCESS(msg) elif 100 <= status_code < 200: msg = self.style.HTTP_INFO(msg) elif status_code == 304: msg = self.style.HTTP_NOT_MODIFIED(msg) elif 300 <= status_code < 400: msg = self.style.HTTP_REDIRECT(msg) elif status_code == 404: msg = self.style.HTTP_NOT_FOUND(msg) elif 400 <= status_code < 500: msg = self.style.HTTP_BAD_REQUEST(msg) else: # Any 5XX, or any other status code msg = self.style.HTTP_SERVER_ERROR(msg) if self.uses_server_time() and not hasattr(record, 'server_time'): record.server_time = self.formatTime(record, self.datefmt) record.msg = msg return super().format(record) def uses_server_time(self): return self._fmt.find('{server_time}') >= 0 def log_response(message, *args, response=None, request=None, logger=request_logger, level=None, exc_info=None): """ Log errors based on HttpResponse status. Log 5xx responses as errors and 4xx responses as warnings (unless a level is given as a keyword argument). The HttpResponse status_code and the request are passed to the logger's extra parameter. """ # Check if the response has already been logged. Multiple requests to log # the same response can be received in some cases, e.g., when the # response is the result of an exception and is logged at the time the # exception is caught so that the exc_info can be recorded. if getattr(response, '_has_been_logged', False): return if level is None: if response.status_code >= 500: level = 'error' elif response.status_code >= 400: level = 'warning' else: level = 'info' getattr(logger, level)( message, *args, extra={ 'status_code': response.status_code, 'request': request, }, exc_info=exc_info, ) response._has_been_logged = True
f14e22f45807fd7b122ac27cbb10c0bf396073dedc44f823e214493993fea626
""" Syndication feed generation library -- used for generating RSS, etc. Sample usage: >>> from django.utils import feedgenerator >>> feed = feedgenerator.Rss201rev2Feed( ... title="Poynter E-Media Tidbits", ... link="http://www.poynter.org/column.asp?id=31", ... description="A group blog by the sharpest minds in online media/journalism/publishing.", ... language="en", ... ) >>> feed.add_item( ... title="Hello", ... link="http://www.holovaty.com/test/", ... description="Testing." ... ) >>> with open('test.rss', 'w') as fp: ... feed.write(fp, 'utf-8') For definitions of the different versions of RSS, see: https://web.archive.org/web/20110718035220/http://diveintomark.org/archives/2004/02/04/incompatible-rss """ import datetime import email from io import StringIO from urllib.parse import urlparse from django.utils.encoding import iri_to_uri from django.utils.timezone import utc from django.utils.xmlutils import SimplerXMLGenerator def rfc2822_date(date): if not isinstance(date, datetime.datetime): date = datetime.datetime.combine(date, datetime.time()) return email.utils.format_datetime(date) def rfc3339_date(date): if not isinstance(date, datetime.datetime): date = datetime.datetime.combine(date, datetime.time()) return date.isoformat() + ('Z' if date.utcoffset() is None else '') def get_tag_uri(url, date): """ Create a TagURI. See https://web.archive.org/web/20110514113830/http://diveintomark.org/archives/2004/05/28/howto-atom-id """ bits = urlparse(url) d = '' if date is not None: d = ',%s' % date.strftime('%Y-%m-%d') return 'tag:%s%s:%s/%s' % (bits.hostname, d, bits.path, bits.fragment) class SyndicationFeed: "Base class for all syndication feeds. Subclasses should provide write()" def __init__(self, title, link, description, language=None, author_email=None, author_name=None, author_link=None, subtitle=None, categories=None, feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs): def to_str(s): return str(s) if s is not None else s categories = categories and [str(c) for c in categories] self.feed = { 'title': to_str(title), 'link': iri_to_uri(link), 'description': to_str(description), 'language': to_str(language), 'author_email': to_str(author_email), 'author_name': to_str(author_name), 'author_link': iri_to_uri(author_link), 'subtitle': to_str(subtitle), 'categories': categories or (), 'feed_url': iri_to_uri(feed_url), 'feed_copyright': to_str(feed_copyright), 'id': feed_guid or link, 'ttl': to_str(ttl), **kwargs, } self.items = [] def add_item(self, title, link, description, author_email=None, author_name=None, author_link=None, pubdate=None, comments=None, unique_id=None, unique_id_is_permalink=None, categories=(), item_copyright=None, ttl=None, updateddate=None, enclosures=None, **kwargs): """ Add an item to the feed. All args are expected to be strings except pubdate and updateddate, which are datetime.datetime objects, and enclosures, which is an iterable of instances of the Enclosure class. """ def to_str(s): return str(s) if s is not None else s categories = categories and [to_str(c) for c in categories] self.items.append({ 'title': to_str(title), 'link': iri_to_uri(link), 'description': to_str(description), 'author_email': to_str(author_email), 'author_name': to_str(author_name), 'author_link': iri_to_uri(author_link), 'pubdate': pubdate, 'updateddate': updateddate, 'comments': to_str(comments), 'unique_id': to_str(unique_id), 'unique_id_is_permalink': unique_id_is_permalink, 'enclosures': enclosures or (), 'categories': categories or (), 'item_copyright': to_str(item_copyright), 'ttl': to_str(ttl), **kwargs, }) def num_items(self): return len(self.items) def root_attributes(self): """ Return extra attributes to place on the root (i.e. feed/channel) element. Called from write(). """ return {} def add_root_elements(self, handler): """ Add elements in the root (i.e. feed/channel) element. Called from write(). """ pass def item_attributes(self, item): """ Return extra attributes to place on each item (i.e. item/entry) element. """ return {} def add_item_elements(self, handler, item): """ Add elements on each item (i.e. item/entry) element. """ pass def write(self, outfile, encoding): """ Output the feed in the given encoding to outfile, which is a file-like object. Subclasses should override this. """ raise NotImplementedError('subclasses of SyndicationFeed must provide a write() method') def writeString(self, encoding): """ Return the feed in the given encoding as a string. """ s = StringIO() self.write(s, encoding) return s.getvalue() def latest_post_date(self): """ Return the latest item's pubdate or updateddate. If no items have either of these attributes this return the current UTC date/time. """ latest_date = None date_keys = ('updateddate', 'pubdate') for item in self.items: for date_key in date_keys: item_date = item.get(date_key) if item_date: if latest_date is None or item_date > latest_date: latest_date = item_date return latest_date or datetime.datetime.now(tz=utc) class Enclosure: """An RSS enclosure""" def __init__(self, url, length, mime_type): "All args are expected to be strings" self.length, self.mime_type = length, mime_type self.url = iri_to_uri(url) class RssFeed(SyndicationFeed): content_type = 'application/rss+xml; charset=utf-8' def write(self, outfile, encoding): handler = SimplerXMLGenerator(outfile, encoding, short_empty_elements=True) handler.startDocument() handler.startElement("rss", self.rss_attributes()) handler.startElement("channel", self.root_attributes()) self.add_root_elements(handler) self.write_items(handler) self.endChannelElement(handler) handler.endElement("rss") def rss_attributes(self): return { 'version': self._version, 'xmlns:atom': 'http://www.w3.org/2005/Atom', } def write_items(self, handler): for item in self.items: handler.startElement('item', self.item_attributes(item)) self.add_item_elements(handler, item) handler.endElement("item") def add_root_elements(self, handler): handler.addQuickElement("title", self.feed['title']) handler.addQuickElement("link", self.feed['link']) handler.addQuickElement("description", self.feed['description']) if self.feed['feed_url'] is not None: handler.addQuickElement("atom:link", None, {"rel": "self", "href": self.feed['feed_url']}) if self.feed['language'] is not None: handler.addQuickElement("language", self.feed['language']) for cat in self.feed['categories']: handler.addQuickElement("category", cat) if self.feed['feed_copyright'] is not None: handler.addQuickElement("copyright", self.feed['feed_copyright']) handler.addQuickElement("lastBuildDate", rfc2822_date(self.latest_post_date())) if self.feed['ttl'] is not None: handler.addQuickElement("ttl", self.feed['ttl']) def endChannelElement(self, handler): handler.endElement("channel") class RssUserland091Feed(RssFeed): _version = "0.91" def add_item_elements(self, handler, item): handler.addQuickElement("title", item['title']) handler.addQuickElement("link", item['link']) if item['description'] is not None: handler.addQuickElement("description", item['description']) class Rss201rev2Feed(RssFeed): # Spec: https://cyber.harvard.edu/rss/rss.html _version = "2.0" def add_item_elements(self, handler, item): handler.addQuickElement("title", item['title']) handler.addQuickElement("link", item['link']) if item['description'] is not None: handler.addQuickElement("description", item['description']) # Author information. if item["author_name"] and item["author_email"]: handler.addQuickElement("author", "%s (%s)" % (item['author_email'], item['author_name'])) elif item["author_email"]: handler.addQuickElement("author", item["author_email"]) elif item["author_name"]: handler.addQuickElement( "dc:creator", item["author_name"], {"xmlns:dc": "http://purl.org/dc/elements/1.1/"} ) if item['pubdate'] is not None: handler.addQuickElement("pubDate", rfc2822_date(item['pubdate'])) if item['comments'] is not None: handler.addQuickElement("comments", item['comments']) if item['unique_id'] is not None: guid_attrs = {} if isinstance(item.get('unique_id_is_permalink'), bool): guid_attrs['isPermaLink'] = str(item['unique_id_is_permalink']).lower() handler.addQuickElement("guid", item['unique_id'], guid_attrs) if item['ttl'] is not None: handler.addQuickElement("ttl", item['ttl']) # Enclosure. if item['enclosures']: enclosures = list(item['enclosures']) if len(enclosures) > 1: raise ValueError( "RSS feed items may only have one enclosure, see " "http://www.rssboard.org/rss-profile#element-channel-item-enclosure" ) enclosure = enclosures[0] handler.addQuickElement('enclosure', '', { 'url': enclosure.url, 'length': enclosure.length, 'type': enclosure.mime_type, }) # Categories. for cat in item['categories']: handler.addQuickElement("category", cat) class Atom1Feed(SyndicationFeed): # Spec: https://tools.ietf.org/html/rfc4287 content_type = 'application/atom+xml; charset=utf-8' ns = "http://www.w3.org/2005/Atom" def write(self, outfile, encoding): handler = SimplerXMLGenerator(outfile, encoding, short_empty_elements=True) handler.startDocument() handler.startElement('feed', self.root_attributes()) self.add_root_elements(handler) self.write_items(handler) handler.endElement("feed") def root_attributes(self): if self.feed['language'] is not None: return {"xmlns": self.ns, "xml:lang": self.feed['language']} else: return {"xmlns": self.ns} def add_root_elements(self, handler): handler.addQuickElement("title", self.feed['title']) handler.addQuickElement("link", "", {"rel": "alternate", "href": self.feed['link']}) if self.feed['feed_url'] is not None: handler.addQuickElement("link", "", {"rel": "self", "href": self.feed['feed_url']}) handler.addQuickElement("id", self.feed['id']) handler.addQuickElement("updated", rfc3339_date(self.latest_post_date())) if self.feed['author_name'] is not None: handler.startElement("author", {}) handler.addQuickElement("name", self.feed['author_name']) if self.feed['author_email'] is not None: handler.addQuickElement("email", self.feed['author_email']) if self.feed['author_link'] is not None: handler.addQuickElement("uri", self.feed['author_link']) handler.endElement("author") if self.feed['subtitle'] is not None: handler.addQuickElement("subtitle", self.feed['subtitle']) for cat in self.feed['categories']: handler.addQuickElement("category", "", {"term": cat}) if self.feed['feed_copyright'] is not None: handler.addQuickElement("rights", self.feed['feed_copyright']) def write_items(self, handler): for item in self.items: handler.startElement("entry", self.item_attributes(item)) self.add_item_elements(handler, item) handler.endElement("entry") def add_item_elements(self, handler, item): handler.addQuickElement("title", item['title']) handler.addQuickElement("link", "", {"href": item['link'], "rel": "alternate"}) if item['pubdate'] is not None: handler.addQuickElement('published', rfc3339_date(item['pubdate'])) if item['updateddate'] is not None: handler.addQuickElement('updated', rfc3339_date(item['updateddate'])) # Author information. if item['author_name'] is not None: handler.startElement("author", {}) handler.addQuickElement("name", item['author_name']) if item['author_email'] is not None: handler.addQuickElement("email", item['author_email']) if item['author_link'] is not None: handler.addQuickElement("uri", item['author_link']) handler.endElement("author") # Unique ID. if item['unique_id'] is not None: unique_id = item['unique_id'] else: unique_id = get_tag_uri(item['link'], item['pubdate']) handler.addQuickElement("id", unique_id) # Summary. if item['description'] is not None: handler.addQuickElement("summary", item['description'], {"type": "html"}) # Enclosures. for enclosure in item['enclosures']: handler.addQuickElement('link', '', { 'rel': 'enclosure', 'href': enclosure.url, 'length': enclosure.length, 'type': enclosure.mime_type, }) # Categories. for cat in item['categories']: handler.addQuickElement("category", "", {"term": cat}) # Rights. if item['item_copyright'] is not None: handler.addQuickElement("rights", item['item_copyright']) # This isolates the decision of what the system default is, so calling code can # do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed". DefaultFeed = Rss201rev2Feed
6dc08f2bd5bb820bf927a1adb26103f08a10a7d5781512fcbde3c227fec25c67
import asyncio import functools import os from django.core.exceptions import SynchronousOnlyOperation def async_unsafe(message): """ Decorator to mark functions as async-unsafe. Someone trying to access the function while in an async context will get an error message. """ def decorator(func): @functools.wraps(func) def inner(*args, **kwargs): if not os.environ.get('DJANGO_ALLOW_ASYNC_UNSAFE'): # Detect a running event loop in this thread. try: asyncio.get_running_loop() except RuntimeError: pass else: raise SynchronousOnlyOperation(message) # Pass onward. return func(*args, **kwargs) return inner # If the message is actually a function, then be a no-arguments decorator. if callable(message): func = message message = 'You cannot call this from an async context - use a thread or sync_to_async.' return decorator(func) else: return decorator
cd4b52d0d21fbfaba862c7a6e67936d2e9cd40f3dcce5f71ff4ffd4b901a8530
import base64 import datetime import re import unicodedata from binascii import Error as BinasciiError from email.utils import formatdate from urllib.parse import ( ParseResult, SplitResult, _coerce_args, _splitnetloc, _splitparams, scheme_chars, urlencode as original_urlencode, uses_params, ) from django.utils.datastructures import MultiValueDict from django.utils.regex_helper import _lazy_re_compile # based on RFC 7232, Appendix C ETAG_MATCH = _lazy_re_compile(r''' \A( # start of string and capture group (?:W/)? # optional weak indicator " # opening quote [^"]* # any sequence of non-quote characters " # end quote )\Z # end of string and capture group ''', re.X) MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split() __D = r'(?P<day>\d{2})' __D2 = r'(?P<day>[ \d]\d)' __M = r'(?P<mon>\w{3})' __Y = r'(?P<year>\d{4})' __Y2 = r'(?P<year>\d{2})' __T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})' RFC1123_DATE = _lazy_re_compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T)) RFC850_DATE = _lazy_re_compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T)) ASCTIME_DATE = _lazy_re_compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y)) RFC3986_GENDELIMS = ":/?#[]@" RFC3986_SUBDELIMS = "!$&'()*+,;=" def urlencode(query, doseq=False): """ A version of Python's urllib.parse.urlencode() function that can operate on MultiValueDict and non-string values. """ if isinstance(query, MultiValueDict): query = query.lists() elif hasattr(query, 'items'): query = query.items() query_params = [] for key, value in query: if value is None: raise TypeError( "Cannot encode None for key '%s' in a query string. Did you " "mean to pass an empty string or omit the value?" % key ) elif not doseq or isinstance(value, (str, bytes)): query_val = value else: try: itr = iter(value) except TypeError: query_val = value else: # Consume generators and iterators, when doseq=True, to # work around https://bugs.python.org/issue31706. query_val = [] for item in itr: if item is None: raise TypeError( "Cannot encode None for key '%s' in a query " "string. Did you mean to pass an empty string or " "omit the value?" % key ) elif not isinstance(item, bytes): item = str(item) query_val.append(item) query_params.append((key, query_val)) return original_urlencode(query_params, doseq) def http_date(epoch_seconds=None): """ Format the time to match the RFC1123 date format as specified by HTTP RFC7231 section 7.1.1.1. `epoch_seconds` is a floating point number expressed in seconds since the epoch, in UTC - such as that outputted by time.time(). If set to None, it defaults to the current time. Output a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'. """ return formatdate(epoch_seconds, usegmt=True) def parse_http_date(date): """ Parse a date format as specified by HTTP RFC7231 section 7.1.1.1. The three formats allowed by the RFC are accepted, even if only the first one is still in widespread use. Return an integer expressed in seconds since the epoch, in UTC. """ # email.utils.parsedate() does the job for RFC1123 dates; unfortunately # RFC7231 makes it mandatory to support RFC850 dates too. So we roll # our own RFC-compliant parsing. for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE: m = regex.match(date) if m is not None: break else: raise ValueError("%r is not in a valid HTTP date format" % date) try: tz = datetime.timezone.utc year = int(m['year']) if year < 100: current_year = datetime.datetime.now(tz=tz).year current_century = current_year - (current_year % 100) if year - (current_year % 100) > 50: # year that appears to be more than 50 years in the future are # interpreted as representing the past. year += current_century - 100 else: year += current_century month = MONTHS.index(m['mon'].lower()) + 1 day = int(m['day']) hour = int(m['hour']) min = int(m['min']) sec = int(m['sec']) result = datetime.datetime(year, month, day, hour, min, sec, tzinfo=tz) return int(result.timestamp()) except Exception as exc: raise ValueError("%r is not a valid date" % date) from exc def parse_http_date_safe(date): """ Same as parse_http_date, but return None if the input is invalid. """ try: return parse_http_date(date) except Exception: pass # Base 36 functions: useful for generating compact URLs def base36_to_int(s): """ Convert a base 36 string to an int. Raise ValueError if the input won't fit into an int. """ # To prevent overconsumption of server resources, reject any # base36 string that is longer than 13 base36 digits (13 digits # is sufficient to base36-encode any 64-bit integer) if len(s) > 13: raise ValueError("Base36 input too large") return int(s, 36) def int_to_base36(i): """Convert an integer to a base36 string.""" char_set = '0123456789abcdefghijklmnopqrstuvwxyz' if i < 0: raise ValueError("Negative base36 conversion input.") if i < 36: return char_set[i] b36 = '' while i != 0: i, n = divmod(i, 36) b36 = char_set[n] + b36 return b36 def urlsafe_base64_encode(s): """ Encode a bytestring to a base64 string for use in URLs. Strip any trailing equal signs. """ return base64.urlsafe_b64encode(s).rstrip(b'\n=').decode('ascii') def urlsafe_base64_decode(s): """ Decode a base64 encoded string. Add back any trailing equal signs that might have been stripped. """ s = s.encode() try: return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'=')) except (LookupError, BinasciiError) as e: raise ValueError(e) def parse_etags(etag_str): """ Parse a string of ETags given in an If-None-Match or If-Match header as defined by RFC 7232. Return a list of quoted ETags, or ['*'] if all ETags should be matched. """ if etag_str.strip() == '*': return ['*'] else: # Parse each ETag individually, and return any that are valid. etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(',')) return [match[1] for match in etag_matches if match] def quote_etag(etag_str): """ If the provided string is already a quoted ETag, return it. Otherwise, wrap the string in quotes, making it a strong ETag. """ if ETAG_MATCH.match(etag_str): return etag_str else: return '"%s"' % etag_str def is_same_domain(host, pattern): """ Return ``True`` if the host is either an exact match or a match to the wildcard pattern. Any pattern beginning with a period matches a domain and all of its subdomains. (e.g. ``.example.com`` matches ``example.com`` and ``foo.example.com``). Anything else is an exact string match. """ if not pattern: return False pattern = pattern.lower() return ( pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or pattern == host ) def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False): """ Return ``True`` if the url uses an allowed host and a safe scheme. Always return ``False`` on an empty url. If ``require_https`` is ``True``, only 'https' will be considered a valid scheme, as opposed to 'http' and 'https' with the default, ``False``. Note: "True" doesn't entail that a URL is "safe". It may still be e.g. quoted incorrectly. Ensure to also use django.utils.encoding.iri_to_uri() on the path component of untrusted URLs. """ if url is not None: url = url.strip() if not url: return False if allowed_hosts is None: allowed_hosts = set() elif isinstance(allowed_hosts, str): allowed_hosts = {allowed_hosts} # Chrome treats \ completely as / in paths but it could be part of some # basic auth credentials so we need to check both URLs. return ( _url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=require_https) and _url_has_allowed_host_and_scheme(url.replace('\\', '/'), allowed_hosts, require_https=require_https) ) # Copied from urllib.parse.urlparse() but uses fixed urlsplit() function. def _urlparse(url, scheme='', allow_fragments=True): """Parse a URL into 6 components: <scheme>://<netloc>/<path>;<params>?<query>#<fragment> Return a 6-tuple: (scheme, netloc, path, params, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" url, scheme, _coerce_result = _coerce_args(url, scheme) splitresult = _urlsplit(url, scheme, allow_fragments) scheme, netloc, url, query, fragment = splitresult if scheme in uses_params and ';' in url: url, params = _splitparams(url) else: params = '' result = ParseResult(scheme, netloc, url, params, query, fragment) return _coerce_result(result) # Copied from urllib.parse.urlsplit() with # https://github.com/python/cpython/pull/661 applied. def _urlsplit(url, scheme='', allow_fragments=True): """Parse a URL into 5 components: <scheme>://<netloc>/<path>?<query>#<fragment> Return a 5-tuple: (scheme, netloc, path, query, fragment). Note that we don't break the components up in smaller bits (e.g. netloc is a single string) and we don't expand % escapes.""" url, scheme, _coerce_result = _coerce_args(url, scheme) netloc = query = fragment = '' i = url.find(':') if i > 0: for c in url[:i]: if c not in scheme_chars: break else: scheme, url = url[:i].lower(), url[i + 1:] if url[:2] == '//': netloc, url = _splitnetloc(url, 2) if (('[' in netloc and ']' not in netloc) or (']' in netloc and '[' not in netloc)): raise ValueError("Invalid IPv6 URL") if allow_fragments and '#' in url: url, fragment = url.split('#', 1) if '?' in url: url, query = url.split('?', 1) v = SplitResult(scheme, netloc, url, query, fragment) return _coerce_result(v) def _url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False): # Chrome considers any URL with more than two slashes to be absolute, but # urlparse is not so flexible. Treat any url with three slashes as unsafe. if url.startswith('///'): return False try: url_info = _urlparse(url) except ValueError: # e.g. invalid IPv6 addresses return False # Forbid URLs like http:///example.com - with a scheme, but without a hostname. # In that URL, example.com is not the hostname but, a path component. However, # Chrome will still consider example.com to be the hostname, so we must not # allow this syntax. if not url_info.netloc and url_info.scheme: return False # Forbid URLs that start with control characters. Some browsers (like # Chrome) ignore quite a few control characters at the start of a # URL and might consider the URL as scheme relative. if unicodedata.category(url[0])[0] == 'C': return False scheme = url_info.scheme # Consider URLs without a scheme (e.g. //example.com/p) to be http. if not url_info.scheme and url_info.netloc: scheme = 'http' valid_schemes = ['https'] if require_https else ['http', 'https'] return ((not url_info.netloc or url_info.netloc in allowed_hosts) and (not scheme or scheme in valid_schemes)) def escape_leading_slashes(url): """ If redirecting to an absolute path (two leading slashes), a slash must be escaped to prevent browsers from handling the path as schemaless and redirecting to another host. """ if url.startswith('//'): url = '/%2F{}'.format(url[2:]) return url
b7922cbbed9c1f6a972f5c60da5ef86813a07d2b13c44b1f316501f93df49438
import datetime import decimal import functools import re import unicodedata from importlib import import_module from django.conf import settings from django.utils import dateformat, numberformat from django.utils.functional import lazy from django.utils.translation import ( check_for_language, get_language, to_locale, ) # format_cache is a mapping from (format_type, lang) to the format string. # By using the cache, it is possible to avoid running get_format_modules # repeatedly. _format_cache = {} _format_modules_cache = {} ISO_INPUT_FORMATS = { 'DATE_INPUT_FORMATS': ['%Y-%m-%d'], 'TIME_INPUT_FORMATS': ['%H:%M:%S', '%H:%M:%S.%f', '%H:%M'], 'DATETIME_INPUT_FORMATS': [ '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M', '%Y-%m-%d' ], } FORMAT_SETTINGS = frozenset([ 'DECIMAL_SEPARATOR', 'THOUSAND_SEPARATOR', 'NUMBER_GROUPING', 'FIRST_DAY_OF_WEEK', 'MONTH_DAY_FORMAT', 'TIME_FORMAT', 'DATE_FORMAT', 'DATETIME_FORMAT', 'SHORT_DATE_FORMAT', 'SHORT_DATETIME_FORMAT', 'YEAR_MONTH_FORMAT', 'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS', ]) def reset_format_cache(): """Clear any cached formats. This method is provided primarily for testing purposes, so that the effects of cached formats can be removed. """ global _format_cache, _format_modules_cache _format_cache = {} _format_modules_cache = {} def iter_format_modules(lang, format_module_path=None): """Find format modules.""" if not check_for_language(lang): return if format_module_path is None: format_module_path = settings.FORMAT_MODULE_PATH format_locations = [] if format_module_path: if isinstance(format_module_path, str): format_module_path = [format_module_path] for path in format_module_path: format_locations.append(path + '.%s') format_locations.append('django.conf.locale.%s') locale = to_locale(lang) locales = [locale] if '_' in locale: locales.append(locale.split('_')[0]) for location in format_locations: for loc in locales: try: yield import_module('%s.formats' % (location % loc)) except ImportError: pass def get_format_modules(lang=None): """Return a list of the format modules found.""" if lang is None: lang = get_language() if lang not in _format_modules_cache: _format_modules_cache[lang] = list(iter_format_modules(lang, settings.FORMAT_MODULE_PATH)) return _format_modules_cache[lang] def get_format(format_type, lang=None, use_l10n=None): """ For a specific format type, return the format for the current language (locale). Default to the format in the settings. format_type is the name of the format, e.g. 'DATE_FORMAT'. If use_l10n is provided and is not None, it forces the value to be localized (or not), overriding the value of settings.USE_L10N. """ use_l10n = use_l10n or (use_l10n is None and settings.USE_L10N) if use_l10n and lang is None: lang = get_language() cache_key = (format_type, lang) try: return _format_cache[cache_key] except KeyError: pass # The requested format_type has not been cached yet. Try to find it in any # of the format_modules for the given lang if l10n is enabled. If it's not # there or if l10n is disabled, fall back to the project settings. val = None if use_l10n: for module in get_format_modules(lang): val = getattr(module, format_type, None) if val is not None: break if val is None: if format_type not in FORMAT_SETTINGS: return format_type val = getattr(settings, format_type) elif format_type in ISO_INPUT_FORMATS: # If a list of input formats from one of the format_modules was # retrieved, make sure the ISO_INPUT_FORMATS are in this list. val = list(val) for iso_input in ISO_INPUT_FORMATS.get(format_type, ()): if iso_input not in val: val.append(iso_input) _format_cache[cache_key] = val return val get_format_lazy = lazy(get_format, str, list, tuple) def date_format(value, format=None, use_l10n=None): """ Format a datetime.date or datetime.datetime object using a localizable format. If use_l10n is provided and is not None, that will force the value to be localized (or not), overriding the value of settings.USE_L10N. """ return dateformat.format(value, get_format(format or 'DATE_FORMAT', use_l10n=use_l10n)) def time_format(value, format=None, use_l10n=None): """ Format a datetime.time object using a localizable format. If use_l10n is provided and is not None, it forces the value to be localized (or not), overriding the value of settings.USE_L10N. """ return dateformat.time_format(value, get_format(format or 'TIME_FORMAT', use_l10n=use_l10n)) def number_format(value, decimal_pos=None, use_l10n=None, force_grouping=False): """ Format a numeric value using localization settings. If use_l10n is provided and is not None, it forces the value to be localized (or not), overriding the value of settings.USE_L10N. """ use_l10n = use_l10n or (use_l10n is None and settings.USE_L10N) lang = get_language() if use_l10n else None return numberformat.format( value, get_format('DECIMAL_SEPARATOR', lang, use_l10n=use_l10n), decimal_pos, get_format('NUMBER_GROUPING', lang, use_l10n=use_l10n), get_format('THOUSAND_SEPARATOR', lang, use_l10n=use_l10n), force_grouping=force_grouping, use_l10n=use_l10n, ) def localize(value, use_l10n=None): """ Check if value is a localizable type (date, number...) and return it formatted as a string using current locale format. If use_l10n is provided and is not None, it forces the value to be localized (or not), overriding the value of settings.USE_L10N. """ if isinstance(value, str): # Handle strings first for performance reasons. return value elif isinstance(value, bool): # Make sure booleans don't get treated as numbers return str(value) elif isinstance(value, (decimal.Decimal, float, int)): if use_l10n is False: return str(value) return number_format(value, use_l10n=use_l10n) elif isinstance(value, datetime.datetime): return date_format(value, 'DATETIME_FORMAT', use_l10n=use_l10n) elif isinstance(value, datetime.date): return date_format(value, use_l10n=use_l10n) elif isinstance(value, datetime.time): return time_format(value, 'TIME_FORMAT', use_l10n=use_l10n) return value def localize_input(value, default=None): """ Check if an input value is a localizable type and return it formatted with the appropriate formatting string of the current locale. """ if isinstance(value, str): # Handle strings first for performance reasons. return value elif isinstance(value, bool): # Don't treat booleans as numbers. return str(value) elif isinstance(value, (decimal.Decimal, float, int)): return number_format(value) elif isinstance(value, datetime.datetime): format = default or get_format('DATETIME_INPUT_FORMATS')[0] format = sanitize_strftime_format(format) return value.strftime(format) elif isinstance(value, datetime.date): format = default or get_format('DATE_INPUT_FORMATS')[0] format = sanitize_strftime_format(format) return value.strftime(format) elif isinstance(value, datetime.time): format = default or get_format('TIME_INPUT_FORMATS')[0] return value.strftime(format) return value @functools.lru_cache() def sanitize_strftime_format(fmt): """ Ensure that certain specifiers are correctly padded with leading zeros. For years < 1000 specifiers %C, %F, %G, and %Y don't work as expected for strftime provided by glibc on Linux as they don't pad the year or century with leading zeros. Support for specifying the padding explicitly is available, however, which can be used to fix this issue. FreeBSD, macOS, and Windows do not support explicitly specifying the padding, but return four digit years (with leading zeros) as expected. This function checks whether the %Y produces a correctly padded string and, if not, makes the following substitutions: - %C → %02C - %F → %010F - %G → %04G - %Y → %04Y See https://bugs.python.org/issue13305 for more details. """ if datetime.date(1, 1, 1).strftime('%Y') == '0001': return fmt mapping = {'C': 2, 'F': 10, 'G': 4, 'Y': 4} return re.sub( r'((?:^|[^%])(?:%%)*)%([CFGY])', lambda m: r'%s%%0%s%s' % (m[1], mapping[m[2]], m[2]), fmt, ) def sanitize_separators(value): """ Sanitize a value according to the current decimal and thousand separator setting. Used with form field input. """ if isinstance(value, str): parts = [] decimal_separator = get_format('DECIMAL_SEPARATOR') if decimal_separator in value: value, decimals = value.split(decimal_separator, 1) parts.append(decimals) if settings.USE_THOUSAND_SEPARATOR: thousand_sep = get_format('THOUSAND_SEPARATOR') if thousand_sep == '.' and value.count('.') == 1 and len(value.split('.')[-1]) != 3: # Special case where we suspect a dot meant decimal separator (see #22171) pass else: for replacement in { thousand_sep, unicodedata.normalize('NFKD', thousand_sep)}: value = value.replace(replacement, '') parts.append(value) value = '.'.join(reversed(parts)) return value
c8db35dd9bccd40de05fde65ca8ba5ff9b365304ee5b10624166a762df6360cb
"""JsLex: a lexer for JavaScript""" # Originally from https://bitbucket.org/ned/jslex import re class Tok: """ A specification for a token class. """ num = 0 def __init__(self, name, regex, next=None): self.id = Tok.num Tok.num += 1 self.name = name self.regex = regex self.next = next def literals(choices, prefix="", suffix=""): """ Create a regex from a space-separated list of literal `choices`. If provided, `prefix` and `suffix` will be attached to each choice individually. """ return "|".join(prefix + re.escape(c) + suffix for c in choices.split()) class Lexer: """ A generic multi-state regex-based lexer. """ def __init__(self, states, first): self.regexes = {} self.toks = {} for state, rules in states.items(): parts = [] for tok in rules: groupid = "t%d" % tok.id self.toks[groupid] = tok parts.append("(?P<%s>%s)" % (groupid, tok.regex)) self.regexes[state] = re.compile("|".join(parts), re.MULTILINE | re.VERBOSE) self.state = first def lex(self, text): """ Lexically analyze `text`. Yield pairs (`name`, `tokentext`). """ end = len(text) state = self.state regexes = self.regexes toks = self.toks start = 0 while start < end: for match in regexes[state].finditer(text, start): name = match.lastgroup tok = toks[name] toktext = match[name] start += len(toktext) yield (tok.name, toktext) if tok.next: state = tok.next break self.state = state class JsLexer(Lexer): """ A JavaScript lexer >>> lexer = JsLexer() >>> list(lexer.lex("a = 1")) [('id', 'a'), ('ws', ' '), ('punct', '='), ('ws', ' '), ('dnum', '1')] This doesn't properly handle non-ASCII characters in the JavaScript source. """ # Because these tokens are matched as alternatives in a regex, longer # possibilities must appear in the list before shorter ones, for example, # '>>' before '>'. # # Note that we don't have to detect malformed JavaScript, only properly # lex correct JavaScript, so much of this is simplified. # Details of JavaScript lexical structure are taken from # http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf # A useful explanation of automatic semicolon insertion is at # http://inimino.org/~inimino/blog/javascript_semicolons both_before = [ Tok("comment", r"/\*(.|\n)*?\*/"), Tok("linecomment", r"//.*?$"), Tok("ws", r"\s+"), Tok("keyword", literals(""" break case catch class const continue debugger default delete do else enum export extends finally for function if import in instanceof new return super switch this throw try typeof var void while with """, suffix=r"\b"), next='reg'), Tok("reserved", literals("null true false", suffix=r"\b"), next='div'), Tok("id", r""" ([a-zA-Z_$ ]|\\u[0-9a-fA-Z]{4}) # first char ([a-zA-Z_$0-9]|\\u[0-9a-fA-F]{4})* # rest chars """, next='div'), Tok("hnum", r"0[xX][0-9a-fA-F]+", next='div'), Tok("onum", r"0[0-7]+"), Tok("dnum", r""" ( (0|[1-9][0-9]*) # DecimalIntegerLiteral \. # dot [0-9]* # DecimalDigits-opt ([eE][-+]?[0-9]+)? # ExponentPart-opt | \. # dot [0-9]+ # DecimalDigits ([eE][-+]?[0-9]+)? # ExponentPart-opt | (0|[1-9][0-9]*) # DecimalIntegerLiteral ([eE][-+]?[0-9]+)? # ExponentPart-opt ) """, next='div'), Tok("punct", literals(""" >>>= === !== >>> <<= >>= <= >= == != << >> && || += -= *= %= &= |= ^= """), next="reg"), Tok("punct", literals("++ -- ) ]"), next='div'), Tok("punct", literals("{ } ( [ . ; , < > + - * % & | ^ ! ~ ? : ="), next='reg'), Tok("string", r'"([^"\\]|(\\(.|\n)))*?"', next='div'), Tok("string", r"'([^'\\]|(\\(.|\n)))*?'", next='div'), ] both_after = [ Tok("other", r"."), ] states = { # slash will mean division 'div': both_before + [ Tok("punct", literals("/= /"), next='reg'), ] + both_after, # slash will mean regex 'reg': both_before + [ Tok("regex", r""" / # opening slash # First character is.. ( [^*\\/[] # anything but * \ / or [ | \\. # or an escape sequence | \[ # or a class, which has ( [^\]\\] # anything but \ or ] | \\. # or an escape sequence )* # many times \] ) # Following characters are same, except for excluding a star ( [^\\/[] # anything but \ / or [ | \\. # or an escape sequence | \[ # or a class, which has ( [^\]\\] # anything but \ or ] | \\. # or an escape sequence )* # many times \] )* # many times / # closing slash [a-zA-Z0-9]* # trailing flags """, next='div'), ] + both_after, } def __init__(self): super().__init__(self.states, 'reg') def prepare_js_for_gettext(js): """ Convert the JavaScript source `js` into something resembling C for xgettext. What actually happens is that all the regex literals are replaced with "REGEX". """ def escape_quotes(m): """Used in a regex to properly escape double quotes.""" s = m[0] if s == '"': return r'\"' else: return s lexer = JsLexer() c = [] for name, tok in lexer.lex(js): if name == 'regex': # C doesn't grok regexes, and they aren't needed for gettext, # so just output a string instead. tok = '"REGEX"' elif name == 'string': # C doesn't have single-quoted strings, so make all strings # double-quoted. if tok.startswith("'"): guts = re.sub(r"\\.|.", escape_quotes, tok[1:-1]) tok = '"' + guts + '"' elif name == 'id': # C can't deal with Unicode escapes in identifiers. We don't # need them for gettext anyway, so replace them with something # innocuous tok = tok.replace("\\", "U") c.append(tok) return ''.join(c)
5f91babe297e9e1265cb4bc110514a7d6ea0a48fcc41196abb430ee846e2a6e3
from pathlib import Path from django.dispatch import receiver from django.template import engines from django.template.backends.django import DjangoTemplates from django.utils._os import to_path from django.utils.autoreload import ( autoreload_started, file_changed, is_django_path, ) def get_template_directories(): # Iterate through each template backend and find # any template_loader that has a 'get_dirs' method. # Collect the directories, filtering out Django templates. items = set() for backend in engines.all(): if not isinstance(backend, DjangoTemplates): continue items.update(Path.cwd() / to_path(dir) for dir in backend.engine.dirs) for loader in backend.engine.template_loaders: if not hasattr(loader, 'get_dirs'): continue items.update( Path.cwd() / to_path(directory) for directory in loader.get_dirs() if not is_django_path(directory) ) return items def reset_loaders(): for backend in engines.all(): if not isinstance(backend, DjangoTemplates): continue for loader in backend.engine.template_loaders: loader.reset() @receiver(autoreload_started, dispatch_uid='template_loaders_watch_changes') def watch_for_template_changes(sender, **kwargs): for directory in get_template_directories(): sender.watch_dir(directory, '**/*') @receiver(file_changed, dispatch_uid='template_loaders_file_changed') def template_changed(sender, file_path, **kwargs): for template_dir in get_template_directories(): if template_dir in file_path.parents: reset_loaders() return True
3511072a047c9a8d5413c0cdad5dae88899abbcc243e33f4b9e7205498167cb7
import posixpath from collections import defaultdict from django.utils.safestring import mark_safe from .base import ( Node, Template, TemplateSyntaxError, TextNode, Variable, token_kwargs, ) from .library import Library register = Library() BLOCK_CONTEXT_KEY = 'block_context' class BlockContext: def __init__(self): # Dictionary of FIFO queues. self.blocks = defaultdict(list) def __repr__(self): return f'<{self.__class__.__qualname__}: blocks={self.blocks!r}>' def add_blocks(self, blocks): for name, block in blocks.items(): self.blocks[name].insert(0, block) def pop(self, name): try: return self.blocks[name].pop() except IndexError: return None def push(self, name, block): self.blocks[name].append(block) def get_block(self, name): try: return self.blocks[name][-1] except IndexError: return None class BlockNode(Node): def __init__(self, name, nodelist, parent=None): self.name, self.nodelist, self.parent = name, nodelist, parent def __repr__(self): return "<Block Node: %s. Contents: %r>" % (self.name, self.nodelist) def render(self, context): block_context = context.render_context.get(BLOCK_CONTEXT_KEY) with context.push(): if block_context is None: context['block'] = self result = self.nodelist.render(context) else: push = block = block_context.pop(self.name) if block is None: block = self # Create new block so we can store context without thread-safety issues. block = type(self)(block.name, block.nodelist) block.context = context context['block'] = block result = block.nodelist.render(context) if push is not None: block_context.push(self.name, push) return result def super(self): if not hasattr(self, 'context'): raise TemplateSyntaxError( "'%s' object has no attribute 'context'. Did you use " "{{ block.super }} in a base template?" % self.__class__.__name__ ) render_context = self.context.render_context if (BLOCK_CONTEXT_KEY in render_context and render_context[BLOCK_CONTEXT_KEY].get_block(self.name) is not None): return mark_safe(self.render(self.context)) return '' class ExtendsNode(Node): must_be_first = True context_key = 'extends_context' def __init__(self, nodelist, parent_name, template_dirs=None): self.nodelist = nodelist self.parent_name = parent_name self.template_dirs = template_dirs self.blocks = {n.name: n for n in nodelist.get_nodes_by_type(BlockNode)} def __repr__(self): return '<%s: extends %s>' % (self.__class__.__name__, self.parent_name.token) def find_template(self, template_name, context): """ This is a wrapper around engine.find_template(). A history is kept in the render_context attribute between successive extends calls and passed as the skip argument. This enables extends to work recursively without extending the same template twice. """ history = context.render_context.setdefault( self.context_key, [self.origin], ) template, origin = context.template.engine.find_template( template_name, skip=history, ) history.append(origin) return template def get_parent(self, context): parent = self.parent_name.resolve(context) if not parent: error_msg = "Invalid template name in 'extends' tag: %r." % parent if self.parent_name.filters or\ isinstance(self.parent_name.var, Variable): error_msg += " Got this from the '%s' variable." %\ self.parent_name.token raise TemplateSyntaxError(error_msg) if isinstance(parent, Template): # parent is a django.template.Template return parent if isinstance(getattr(parent, 'template', None), Template): # parent is a django.template.backends.django.Template return parent.template return self.find_template(parent, context) def render(self, context): compiled_parent = self.get_parent(context) if BLOCK_CONTEXT_KEY not in context.render_context: context.render_context[BLOCK_CONTEXT_KEY] = BlockContext() block_context = context.render_context[BLOCK_CONTEXT_KEY] # Add the block nodes from this node to the block context block_context.add_blocks(self.blocks) # If this block's parent doesn't have an extends node it is the root, # and its block nodes also need to be added to the block context. for node in compiled_parent.nodelist: # The ExtendsNode has to be the first non-text node. if not isinstance(node, TextNode): if not isinstance(node, ExtendsNode): blocks = {n.name: n for n in compiled_parent.nodelist.get_nodes_by_type(BlockNode)} block_context.add_blocks(blocks) break # Call Template._render explicitly so the parser context stays # the same. with context.render_context.push_state(compiled_parent, isolated_context=False): return compiled_parent._render(context) class IncludeNode(Node): context_key = '__include_context' def __init__(self, template, *args, extra_context=None, isolated_context=False, **kwargs): self.template = template self.extra_context = extra_context or {} self.isolated_context = isolated_context super().__init__(*args, **kwargs) def __repr__(self): return f'<{self.__class__.__qualname__}: template={self.template!r}>' def render(self, context): """ Render the specified template and context. Cache the template object in render_context to avoid reparsing and loading when used in a for loop. """ template = self.template.resolve(context) # Does this quack like a Template? if not callable(getattr(template, 'render', None)): # If not, try the cache and select_template(). template_name = template or () if isinstance(template_name, str): template_name = (construct_relative_path( self.origin.template_name, template_name, ),) else: template_name = tuple(template_name) cache = context.render_context.dicts[0].setdefault(self, {}) template = cache.get(template_name) if template is None: template = context.template.engine.select_template(template_name) cache[template_name] = template # Use the base.Template of a backends.django.Template. elif hasattr(template, 'template'): template = template.template values = { name: var.resolve(context) for name, var in self.extra_context.items() } if self.isolated_context: return template.render(context.new(values)) with context.push(**values): return template.render(context) @register.tag('block') def do_block(parser, token): """ Define a block that can be overridden by child templates. """ # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments bits = token.contents.split() if len(bits) != 2: raise TemplateSyntaxError("'%s' tag takes only one argument" % bits[0]) block_name = bits[1] # Keep track of the names of BlockNodes found in this template, so we can # check for duplication. try: if block_name in parser.__loaded_blocks: raise TemplateSyntaxError("'%s' tag with name '%s' appears more than once" % (bits[0], block_name)) parser.__loaded_blocks.append(block_name) except AttributeError: # parser.__loaded_blocks isn't a list yet parser.__loaded_blocks = [block_name] nodelist = parser.parse(('endblock',)) # This check is kept for backwards-compatibility. See #3100. endblock = parser.next_token() acceptable_endblocks = ('endblock', 'endblock %s' % block_name) if endblock.contents not in acceptable_endblocks: parser.invalid_block_tag(endblock, 'endblock', acceptable_endblocks) return BlockNode(block_name, nodelist) def construct_relative_path(current_template_name, relative_name): """ Convert a relative path (starting with './' or '../') to the full template name based on the current_template_name. """ has_quotes = ( (relative_name.startswith('"') and relative_name.endswith('"')) or (relative_name.startswith("'") and relative_name.endswith("'")) ) new_name = relative_name.strip('\'"') if not new_name.startswith(('./', '../')): # relative_name is a variable or a literal that doesn't contain a # relative path. return relative_name new_name = posixpath.normpath( posixpath.join( posixpath.dirname(current_template_name.lstrip('/')), new_name, ) ) if new_name.startswith('../'): raise TemplateSyntaxError( "The relative path '%s' points outside the file hierarchy that " "template '%s' is in." % (relative_name, current_template_name) ) if current_template_name.lstrip('/') == new_name: raise TemplateSyntaxError( "The relative path '%s' was translated to template name '%s', the " "same template in which the tag appears." % (relative_name, current_template_name) ) return f'"{new_name}"' if has_quotes else new_name @register.tag('extends') def do_extends(parser, token): """ Signal that this template extends a parent template. This tag may be used in two ways: ``{% extends "base" %}`` (with quotes) uses the literal value "base" as the name of the parent template to extend, or ``{% extends variable %}`` uses the value of ``variable`` as either the name of the parent template to extend (if it evaluates to a string) or as the parent template itself (if it evaluates to a Template object). """ bits = token.split_contents() if len(bits) != 2: raise TemplateSyntaxError("'%s' takes one argument" % bits[0]) bits[1] = construct_relative_path(parser.origin.template_name, bits[1]) parent_name = parser.compile_filter(bits[1]) nodelist = parser.parse() if nodelist.get_nodes_by_type(ExtendsNode): raise TemplateSyntaxError("'%s' cannot appear more than once in the same template" % bits[0]) return ExtendsNode(nodelist, parent_name) @register.tag('include') def do_include(parser, token): """ Load a template and render it with the current context. You can pass additional context using keyword arguments. Example:: {% include "foo/some_include" %} {% include "foo/some_include" with bar="BAZZ!" baz="BING!" %} Use the ``only`` argument to exclude the current context when rendering the included template:: {% include "foo/some_include" only %} {% include "foo/some_include" with bar="1" only %} """ bits = token.split_contents() if len(bits) < 2: raise TemplateSyntaxError( "%r tag takes at least one argument: the name of the template to " "be included." % bits[0] ) options = {} remaining_bits = bits[2:] while remaining_bits: option = remaining_bits.pop(0) if option in options: raise TemplateSyntaxError('The %r option was specified more ' 'than once.' % option) if option == 'with': value = token_kwargs(remaining_bits, parser, support_legacy=False) if not value: raise TemplateSyntaxError('"with" in %r tag needs at least ' 'one keyword argument.' % bits[0]) elif option == 'only': value = True else: raise TemplateSyntaxError('Unknown argument for %r tag: %r.' % (bits[0], option)) options[option] = value isolated_context = options.get('only', False) namemap = options.get('with', {}) bits[1] = construct_relative_path(parser.origin.template_name, bits[1]) return IncludeNode(parser.compile_filter(bits[1]), extra_context=namemap, isolated_context=isolated_context)
3d0881e9aeb22104d7380a9dedbb252bb6376c197cf7d7bdfbb1f710c4a1390b
""" This is the Django template system. How it works: The Lexer.tokenize() method converts a template string (i.e., a string containing markup with custom template tags) to tokens, which can be either plain text (TokenType.TEXT), variables (TokenType.VAR), or block statements (TokenType.BLOCK). The Parser() class takes a list of tokens in its constructor, and its parse() method returns a compiled template -- which is, under the hood, a list of Node objects. Each Node is responsible for creating some sort of output -- e.g. simple text (TextNode), variable values in a given context (VariableNode), results of basic logic (IfNode), results of looping (ForNode), or anything else. The core Node types are TextNode, VariableNode, IfNode and ForNode, but plugin modules can define their own custom node types. Each Node has a render() method, which takes a Context and returns a string of the rendered node. For example, the render() method of a Variable Node returns the variable's value as a string. The render() method of a ForNode returns the rendered output of whatever was inside the loop, recursively. The Template class is a convenient wrapper that takes care of template compilation and rendering. Usage: The only thing you should ever use directly in this file is the Template class. Create a compiled template object with a template_string, then call render() with a context. In the compilation stage, the TemplateSyntaxError exception will be raised if the template doesn't have proper syntax. Sample code: >>> from django import template >>> s = '<html>{% if test %}<h1>{{ varvalue }}</h1>{% endif %}</html>' >>> t = template.Template(s) (t is now a compiled template, and its render() method can be called multiple times with multiple contexts) >>> c = template.Context({'test':True, 'varvalue': 'Hello'}) >>> t.render(c) '<html><h1>Hello</h1></html>' >>> c = template.Context({'test':False, 'varvalue': 'Hello'}) >>> t.render(c) '<html></html>' """ import inspect import logging import re from enum import Enum from django.template.context import BaseContext from django.utils.formats import localize from django.utils.html import conditional_escape, escape from django.utils.regex_helper import _lazy_re_compile from django.utils.safestring import SafeData, SafeString, mark_safe from django.utils.text import ( get_text_list, smart_split, unescape_string_literal, ) from django.utils.timezone import template_localtime from django.utils.translation import gettext_lazy, pgettext_lazy from .exceptions import TemplateSyntaxError # template syntax constants FILTER_SEPARATOR = '|' FILTER_ARGUMENT_SEPARATOR = ':' VARIABLE_ATTRIBUTE_SEPARATOR = '.' BLOCK_TAG_START = '{%' BLOCK_TAG_END = '%}' VARIABLE_TAG_START = '{{' VARIABLE_TAG_END = '}}' COMMENT_TAG_START = '{#' COMMENT_TAG_END = '#}' TRANSLATOR_COMMENT_MARK = 'Translators' SINGLE_BRACE_START = '{' SINGLE_BRACE_END = '}' # what to report as the origin for templates that come from non-loader sources # (e.g. strings) UNKNOWN_SOURCE = '<unknown source>' # match a variable or block tag and capture the entire tag, including start/end # delimiters tag_re = (_lazy_re_compile('(%s.*?%s|%s.*?%s|%s.*?%s)' % (re.escape(BLOCK_TAG_START), re.escape(BLOCK_TAG_END), re.escape(VARIABLE_TAG_START), re.escape(VARIABLE_TAG_END), re.escape(COMMENT_TAG_START), re.escape(COMMENT_TAG_END)))) logger = logging.getLogger('django.template') class TokenType(Enum): TEXT = 0 VAR = 1 BLOCK = 2 COMMENT = 3 class VariableDoesNotExist(Exception): def __init__(self, msg, params=()): self.msg = msg self.params = params def __str__(self): return self.msg % self.params class Origin: def __init__(self, name, template_name=None, loader=None): self.name = name self.template_name = template_name self.loader = loader def __str__(self): return self.name def __repr__(self): return '<%s name=%r>' % (self.__class__.__qualname__, self.name) def __eq__(self, other): return ( isinstance(other, Origin) and self.name == other.name and self.loader == other.loader ) @property def loader_name(self): if self.loader: return '%s.%s' % ( self.loader.__module__, self.loader.__class__.__name__, ) class Template: def __init__(self, template_string, origin=None, name=None, engine=None): # If Template is instantiated directly rather than from an Engine and # exactly one Django template engine is configured, use that engine. # This is required to preserve backwards-compatibility for direct use # e.g. Template('...').render(Context({...})) if engine is None: from .engine import Engine engine = Engine.get_default() if origin is None: origin = Origin(UNKNOWN_SOURCE) self.name = name self.origin = origin self.engine = engine self.source = str(template_string) # May be lazy. self.nodelist = self.compile_nodelist() def __iter__(self): for node in self.nodelist: yield from node def __repr__(self): return '<%s template_string="%s...">' % ( self.__class__.__qualname__, self.source[:20].replace('\n', ''), ) def _render(self, context): return self.nodelist.render(context) def render(self, context): "Display stage -- can be called many times" with context.render_context.push_state(self): if context.template is None: with context.bind_template(self): context.template_name = self.name return self._render(context) else: return self._render(context) def compile_nodelist(self): """ Parse and compile the template source into a nodelist. If debug is True and an exception occurs during parsing, the exception is annotated with contextual line information where it occurred in the template source. """ if self.engine.debug: lexer = DebugLexer(self.source) else: lexer = Lexer(self.source) tokens = lexer.tokenize() parser = Parser( tokens, self.engine.template_libraries, self.engine.template_builtins, self.origin, ) try: return parser.parse() except Exception as e: if self.engine.debug: e.template_debug = self.get_exception_info(e, e.token) raise def get_exception_info(self, exception, token): """ Return a dictionary containing contextual line information of where the exception occurred in the template. The following information is provided: message The message of the exception raised. source_lines The lines before, after, and including the line the exception occurred on. line The line number the exception occurred on. before, during, after The line the exception occurred on split into three parts: 1. The content before the token that raised the error. 2. The token that raised the error. 3. The content after the token that raised the error. total The number of lines in source_lines. top The line number where source_lines starts. bottom The line number where source_lines ends. start The start position of the token in the template source. end The end position of the token in the template source. """ start, end = token.position context_lines = 10 line = 0 upto = 0 source_lines = [] before = during = after = "" for num, next in enumerate(linebreak_iter(self.source)): if start >= upto and end <= next: line = num before = escape(self.source[upto:start]) during = escape(self.source[start:end]) after = escape(self.source[end:next]) source_lines.append((num, escape(self.source[upto:next]))) upto = next total = len(source_lines) top = max(1, line - context_lines) bottom = min(total, line + 1 + context_lines) # In some rare cases exc_value.args can be empty or an invalid # string. try: message = str(exception.args[0]) except (IndexError, UnicodeDecodeError): message = '(Could not get exception message)' return { 'message': message, 'source_lines': source_lines[top:bottom], 'before': before, 'during': during, 'after': after, 'top': top, 'bottom': bottom, 'total': total, 'line': line, 'name': self.origin.name, 'start': start, 'end': end, } def linebreak_iter(template_source): yield 0 p = template_source.find('\n') while p >= 0: yield p + 1 p = template_source.find('\n', p + 1) yield len(template_source) + 1 class Token: def __init__(self, token_type, contents, position=None, lineno=None): """ A token representing a string from the template. token_type A TokenType, either .TEXT, .VAR, .BLOCK, or .COMMENT. contents The token source string. position An optional tuple containing the start and end index of the token in the template source. This is used for traceback information when debug is on. lineno The line number the token appears on in the template source. This is used for traceback information and gettext files. """ self.token_type, self.contents = token_type, contents self.lineno = lineno self.position = position def __repr__(self): token_name = self.token_type.name.capitalize() return ('<%s token: "%s...">' % (token_name, self.contents[:20].replace('\n', ''))) def split_contents(self): split = [] bits = smart_split(self.contents) for bit in bits: # Handle translation-marked template pieces if bit.startswith(('_("', "_('")): sentinel = bit[2] + ')' trans_bit = [bit] while not bit.endswith(sentinel): bit = next(bits) trans_bit.append(bit) bit = ' '.join(trans_bit) split.append(bit) return split class Lexer: def __init__(self, template_string): self.template_string = template_string self.verbatim = False def __repr__(self): return '<%s template_string="%s...", verbatim=%s>' % ( self.__class__.__qualname__, self.template_string[:20].replace('\n', ''), self.verbatim, ) def tokenize(self): """ Return a list of tokens from a given template_string. """ in_tag = False lineno = 1 result = [] for bit in tag_re.split(self.template_string): if bit: result.append(self.create_token(bit, None, lineno, in_tag)) in_tag = not in_tag lineno += bit.count('\n') return result def create_token(self, token_string, position, lineno, in_tag): """ Convert the given token string into a new Token object and return it. If in_tag is True, we are processing something that matched a tag, otherwise it should be treated as a literal string. """ token_start = token_string[0:2] if in_tag and token_start == BLOCK_TAG_START: # The [2:-2] ranges below strip off *_TAG_START and *_TAG_END. # We could do len(BLOCK_TAG_START) to be more "correct", but we've # hard-coded the 2s here for performance. And it's not like # the TAG_START values are going to change anytime, anyway. block_content = token_string[2:-2].strip() if self.verbatim and block_content == self.verbatim: self.verbatim = False if in_tag and not self.verbatim: if token_start == VARIABLE_TAG_START: return Token(TokenType.VAR, token_string[2:-2].strip(), position, lineno) elif token_start == BLOCK_TAG_START: if block_content[:9] in ('verbatim', 'verbatim '): self.verbatim = 'end%s' % block_content return Token(TokenType.BLOCK, block_content, position, lineno) elif token_start == COMMENT_TAG_START: content = '' if token_string.find(TRANSLATOR_COMMENT_MARK): content = token_string[2:-2].strip() return Token(TokenType.COMMENT, content, position, lineno) else: return Token(TokenType.TEXT, token_string, position, lineno) class DebugLexer(Lexer): def tokenize(self): """ Split a template string into tokens and annotates each token with its start and end position in the source. This is slower than the default lexer so only use it when debug is True. """ lineno = 1 result = [] upto = 0 for match in tag_re.finditer(self.template_string): start, end = match.span() if start > upto: token_string = self.template_string[upto:start] result.append(self.create_token(token_string, (upto, start), lineno, in_tag=False)) lineno += token_string.count('\n') token_string = self.template_string[start:end] result.append(self.create_token(token_string, (start, end), lineno, in_tag=True)) lineno += token_string.count('\n') upto = end last_bit = self.template_string[upto:] if last_bit: result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), lineno, in_tag=False)) return result class Parser: def __init__(self, tokens, libraries=None, builtins=None, origin=None): # Reverse the tokens so delete_first_token(), prepend_token(), and # next_token() can operate at the end of the list in constant time. self.tokens = list(reversed(tokens)) self.tags = {} self.filters = {} self.command_stack = [] if libraries is None: libraries = {} if builtins is None: builtins = [] self.libraries = libraries for builtin in builtins: self.add_library(builtin) self.origin = origin def __repr__(self): return '<%s tokens=%r>' % (self.__class__.__qualname__, self.tokens) def parse(self, parse_until=None): """ Iterate through the parser tokens and compiles each one into a node. If parse_until is provided, parsing will stop once one of the specified tokens has been reached. This is formatted as a list of tokens, e.g. ['elif', 'else', 'endif']. If no matching token is reached, raise an exception with the unclosed block tag details. """ if parse_until is None: parse_until = [] nodelist = NodeList() while self.tokens: token = self.next_token() # Use the raw values here for TokenType.* for a tiny performance boost. token_type = token.token_type.value if token_type == 0: # TokenType.TEXT self.extend_nodelist(nodelist, TextNode(token.contents), token) elif token_type == 1: # TokenType.VAR if not token.contents: raise self.error(token, 'Empty variable tag on line %d' % token.lineno) try: filter_expression = self.compile_filter(token.contents) except TemplateSyntaxError as e: raise self.error(token, e) var_node = VariableNode(filter_expression) self.extend_nodelist(nodelist, var_node, token) elif token_type == 2: # TokenType.BLOCK try: command = token.contents.split()[0] except IndexError: raise self.error(token, 'Empty block tag on line %d' % token.lineno) if command in parse_until: # A matching token has been reached. Return control to # the caller. Put the token back on the token list so the # caller knows where it terminated. self.prepend_token(token) return nodelist # Add the token to the command stack. This is used for error # messages if further parsing fails due to an unclosed block # tag. self.command_stack.append((command, token)) # Get the tag callback function from the ones registered with # the parser. try: compile_func = self.tags[command] except KeyError: self.invalid_block_tag(token, command, parse_until) # Compile the callback into a node object and add it to # the node list. try: compiled_result = compile_func(self, token) except Exception as e: raise self.error(token, e) self.extend_nodelist(nodelist, compiled_result, token) # Compile success. Remove the token from the command stack. self.command_stack.pop() if parse_until: self.unclosed_block_tag(parse_until) return nodelist def skip_past(self, endtag): while self.tokens: token = self.next_token() if token.token_type == TokenType.BLOCK and token.contents == endtag: return self.unclosed_block_tag([endtag]) def extend_nodelist(self, nodelist, node, token): # Check that non-text nodes don't appear before an extends tag. if node.must_be_first and nodelist.contains_nontext: raise self.error( token, '%r must be the first tag in the template.' % node, ) if not isinstance(node, TextNode): nodelist.contains_nontext = True # Set origin and token here since we can't modify the node __init__() # method. node.token = token node.origin = self.origin nodelist.append(node) def error(self, token, e): """ Return an exception annotated with the originating token. Since the parser can be called recursively, check if a token is already set. This ensures the innermost token is highlighted if an exception occurs, e.g. a compile error within the body of an if statement. """ if not isinstance(e, Exception): e = TemplateSyntaxError(e) if not hasattr(e, 'token'): e.token = token return e def invalid_block_tag(self, token, command, parse_until=None): if parse_until: raise self.error( token, "Invalid block tag on line %d: '%s', expected %s. Did you " "forget to register or load this tag?" % ( token.lineno, command, get_text_list(["'%s'" % p for p in parse_until], 'or'), ), ) raise self.error( token, "Invalid block tag on line %d: '%s'. Did you forget to register " "or load this tag?" % (token.lineno, command) ) def unclosed_block_tag(self, parse_until): command, token = self.command_stack.pop() msg = "Unclosed tag on line %d: '%s'. Looking for one of: %s." % ( token.lineno, command, ', '.join(parse_until), ) raise self.error(token, msg) def next_token(self): return self.tokens.pop() def prepend_token(self, token): self.tokens.append(token) def delete_first_token(self): del self.tokens[-1] def add_library(self, lib): self.tags.update(lib.tags) self.filters.update(lib.filters) def compile_filter(self, token): """ Convenient wrapper for FilterExpression """ return FilterExpression(token, self) def find_filter(self, filter_name): if filter_name in self.filters: return self.filters[filter_name] else: raise TemplateSyntaxError("Invalid filter: '%s'" % filter_name) # This only matches constant *strings* (things in quotes or marked for # translation). Numbers are treated as variables for implementation reasons # (so that they retain their type when passed to filters). constant_string = r""" (?:%(i18n_open)s%(strdq)s%(i18n_close)s| %(i18n_open)s%(strsq)s%(i18n_close)s| %(strdq)s| %(strsq)s) """ % { 'strdq': r'"[^"\\]*(?:\\.[^"\\]*)*"', # double-quoted string 'strsq': r"'[^'\\]*(?:\\.[^'\\]*)*'", # single-quoted string 'i18n_open': re.escape("_("), 'i18n_close': re.escape(")"), } constant_string = constant_string.replace("\n", "") filter_raw_string = r""" ^(?P<constant>%(constant)s)| ^(?P<var>[%(var_chars)s]+|%(num)s)| (?:\s*%(filter_sep)s\s* (?P<filter_name>\w+) (?:%(arg_sep)s (?: (?P<constant_arg>%(constant)s)| (?P<var_arg>[%(var_chars)s]+|%(num)s) ) )? )""" % { 'constant': constant_string, 'num': r'[-+\.]?\d[\d\.e]*', 'var_chars': r'\w\.', 'filter_sep': re.escape(FILTER_SEPARATOR), 'arg_sep': re.escape(FILTER_ARGUMENT_SEPARATOR), } filter_re = _lazy_re_compile(filter_raw_string, re.VERBOSE) class FilterExpression: """ Parse a variable token and its optional filters (all as a single string), and return a list of tuples of the filter name and arguments. Sample:: >>> token = 'variable|default:"Default value"|date:"Y-m-d"' >>> p = Parser('') >>> fe = FilterExpression(token, p) >>> len(fe.filters) 2 >>> fe.var <Variable: 'variable'> """ def __init__(self, token, parser): self.token = token matches = filter_re.finditer(token) var_obj = None filters = [] upto = 0 for match in matches: start = match.start() if upto != start: raise TemplateSyntaxError("Could not parse some characters: " "%s|%s|%s" % (token[:upto], token[upto:start], token[start:])) if var_obj is None: var, constant = match['var'], match['constant'] if constant: try: var_obj = Variable(constant).resolve({}) except VariableDoesNotExist: var_obj = None elif var is None: raise TemplateSyntaxError("Could not find variable at " "start of %s." % token) else: var_obj = Variable(var) else: filter_name = match['filter_name'] args = [] constant_arg, var_arg = match['constant_arg'], match['var_arg'] if constant_arg: args.append((False, Variable(constant_arg).resolve({}))) elif var_arg: args.append((True, Variable(var_arg))) filter_func = parser.find_filter(filter_name) self.args_check(filter_name, filter_func, args) filters.append((filter_func, args)) upto = match.end() if upto != len(token): raise TemplateSyntaxError("Could not parse the remainder: '%s' " "from '%s'" % (token[upto:], token)) self.filters = filters self.var = var_obj def resolve(self, context, ignore_failures=False): if isinstance(self.var, Variable): try: obj = self.var.resolve(context) except VariableDoesNotExist: if ignore_failures: obj = None else: string_if_invalid = context.template.engine.string_if_invalid if string_if_invalid: if '%s' in string_if_invalid: return string_if_invalid % self.var else: return string_if_invalid else: obj = string_if_invalid else: obj = self.var for func, args in self.filters: arg_vals = [] for lookup, arg in args: if not lookup: arg_vals.append(mark_safe(arg)) else: arg_vals.append(arg.resolve(context)) if getattr(func, 'expects_localtime', False): obj = template_localtime(obj, context.use_tz) if getattr(func, 'needs_autoescape', False): new_obj = func(obj, autoescape=context.autoescape, *arg_vals) else: new_obj = func(obj, *arg_vals) if getattr(func, 'is_safe', False) and isinstance(obj, SafeData): obj = mark_safe(new_obj) else: obj = new_obj return obj def args_check(name, func, provided): provided = list(provided) # First argument, filter input, is implied. plen = len(provided) + 1 # Check to see if a decorator is providing the real function. func = inspect.unwrap(func) args, _, _, defaults, _, _, _ = inspect.getfullargspec(func) alen = len(args) dlen = len(defaults or []) # Not enough OR Too many if plen < (alen - dlen) or plen > alen: raise TemplateSyntaxError("%s requires %d arguments, %d provided" % (name, alen - dlen, plen)) return True args_check = staticmethod(args_check) def __str__(self): return self.token def __repr__(self): return "<%s %r>" % (self.__class__.__qualname__, self.token) class Variable: """ A template variable, resolvable against a given context. The variable may be a hard-coded string (if it begins and ends with single or double quote marks):: >>> c = {'article': {'section':'News'}} >>> Variable('article.section').resolve(c) 'News' >>> Variable('article').resolve(c) {'section': 'News'} >>> class AClass: pass >>> c = AClass() >>> c.article = AClass() >>> c.article.section = 'News' (The example assumes VARIABLE_ATTRIBUTE_SEPARATOR is '.') """ def __init__(self, var): self.var = var self.literal = None self.lookups = None self.translate = False self.message_context = None if not isinstance(var, str): raise TypeError( "Variable must be a string or number, got %s" % type(var)) try: # First try to treat this variable as a number. # # Note that this could cause an OverflowError here that we're not # catching. Since this should only happen at compile time, that's # probably OK. # Try to interpret values containing a period or an 'e'/'E' # (possibly scientific notation) as a float; otherwise, try int. if '.' in var or 'e' in var.lower(): self.literal = float(var) # "2." is invalid if var[-1] == '.': raise ValueError else: self.literal = int(var) except ValueError: # A ValueError means that the variable isn't a number. if var[0:2] == '_(' and var[-1] == ')': # The result of the lookup should be translated at rendering # time. self.translate = True var = var[2:-1] # If it's wrapped with quotes (single or double), then # we're also dealing with a literal. try: self.literal = mark_safe(unescape_string_literal(var)) except ValueError: # Otherwise we'll set self.lookups so that resolve() knows we're # dealing with a bonafide variable if VARIABLE_ATTRIBUTE_SEPARATOR + '_' in var or var[0] == '_': raise TemplateSyntaxError("Variables and attributes may " "not begin with underscores: '%s'" % var) self.lookups = tuple(var.split(VARIABLE_ATTRIBUTE_SEPARATOR)) def resolve(self, context): """Resolve this variable against a given context.""" if self.lookups is not None: # We're dealing with a variable that needs to be resolved value = self._resolve_lookup(context) else: # We're dealing with a literal, so it's already been "resolved" value = self.literal if self.translate: is_safe = isinstance(value, SafeData) msgid = value.replace('%', '%%') msgid = mark_safe(msgid) if is_safe else msgid if self.message_context: return pgettext_lazy(self.message_context, msgid) else: return gettext_lazy(msgid) return value def __repr__(self): return "<%s: %r>" % (self.__class__.__name__, self.var) def __str__(self): return self.var def _resolve_lookup(self, context): """ Perform resolution of a real variable (i.e. not a literal) against the given context. As indicated by the method's name, this method is an implementation detail and shouldn't be called by external code. Use Variable.resolve() instead. """ current = context try: # catch-all for silent variable failures for bit in self.lookups: try: # dictionary lookup current = current[bit] # ValueError/IndexError are for numpy.array lookup on # numpy < 1.9 and 1.9+ respectively except (TypeError, AttributeError, KeyError, ValueError, IndexError): try: # attribute lookup # Don't return class attributes if the class is the context: if isinstance(current, BaseContext) and getattr(type(current), bit): raise AttributeError current = getattr(current, bit) except (TypeError, AttributeError): # Reraise if the exception was raised by a @property if not isinstance(current, BaseContext) and bit in dir(current): raise try: # list-index lookup current = current[int(bit)] except (IndexError, # list index out of range ValueError, # invalid literal for int() KeyError, # current is a dict without `int(bit)` key TypeError): # unsubscriptable object raise VariableDoesNotExist("Failed lookup for key " "[%s] in %r", (bit, current)) # missing attribute if callable(current): if getattr(current, 'do_not_call_in_templates', False): pass elif getattr(current, 'alters_data', False): current = context.template.engine.string_if_invalid else: try: # method call (assuming no args required) current = current() except TypeError: signature = inspect.signature(current) try: signature.bind() except TypeError: # arguments *were* required current = context.template.engine.string_if_invalid # invalid method call else: raise except Exception as e: template_name = getattr(context, 'template_name', None) or 'unknown' logger.debug( "Exception while resolving variable '%s' in template '%s'.", bit, template_name, exc_info=True, ) if getattr(e, 'silent_variable_failure', False): current = context.template.engine.string_if_invalid else: raise return current class Node: # Set this to True for nodes that must be first in the template (although # they can be preceded by text nodes. must_be_first = False child_nodelists = ('nodelist',) token = None def render(self, context): """ Return the node rendered as a string. """ pass def render_annotated(self, context): """ Render the node. If debug is True and an exception occurs during rendering, the exception is annotated with contextual line information where it occurred in the template. For internal usage this method is preferred over using the render method directly. """ try: return self.render(context) except Exception as e: if context.template.engine.debug: # Store the actual node that caused the exception. if not hasattr(e, '_culprit_node'): e._culprit_node = self if ( not hasattr(e, 'template_debug') and context.render_context.template.origin == e._culprit_node.origin ): e.template_debug = context.render_context.template.get_exception_info( e, e._culprit_node.token, ) raise def __iter__(self): yield self def get_nodes_by_type(self, nodetype): """ Return a list of all nodes (within this node and its nodelist) of the given type """ nodes = [] if isinstance(self, nodetype): nodes.append(self) for attr in self.child_nodelists: nodelist = getattr(self, attr, None) if nodelist: nodes.extend(nodelist.get_nodes_by_type(nodetype)) return nodes class NodeList(list): # Set to True the first time a non-TextNode is inserted by # extend_nodelist(). contains_nontext = False def render(self, context): return SafeString(''.join([ node.render_annotated(context) for node in self ])) def get_nodes_by_type(self, nodetype): "Return a list of all nodes of the given type" nodes = [] for node in self: nodes.extend(node.get_nodes_by_type(nodetype)) return nodes class TextNode(Node): child_nodelists = () def __init__(self, s): self.s = s def __repr__(self): return "<%s: %r>" % (self.__class__.__name__, self.s[:25]) def render(self, context): return self.s def render_annotated(self, context): """ Return the given value. The default implementation of this method handles exceptions raised during rendering, which is not necessary for text nodes. """ return self.s def render_value_in_context(value, context): """ Convert any value to a string to become part of a rendered template. This means escaping, if required, and conversion to a string. If value is a string, it's expected to already be translated. """ value = template_localtime(value, use_tz=context.use_tz) value = localize(value, use_l10n=context.use_l10n) if context.autoescape: if not issubclass(type(value), str): value = str(value) return conditional_escape(value) else: return str(value) class VariableNode(Node): child_nodelists = () def __init__(self, filter_expression): self.filter_expression = filter_expression def __repr__(self): return "<Variable Node: %s>" % self.filter_expression def render(self, context): try: output = self.filter_expression.resolve(context) except UnicodeDecodeError: # Unicode conversion can fail sometimes for reasons out of our # control (e.g. exception rendering). In that case, we fail # quietly. return '' return render_value_in_context(output, context) # Regex for token keyword arguments kwarg_re = _lazy_re_compile(r"(?:(\w+)=)?(.+)") def token_kwargs(bits, parser, support_legacy=False): """ Parse token keyword arguments and return a dictionary of the arguments retrieved from the ``bits`` token list. `bits` is a list containing the remainder of the token (split by spaces) that is to be checked for arguments. Valid arguments are removed from this list. `support_legacy` - if True, the legacy format ``1 as foo`` is accepted. Otherwise, only the standard ``foo=1`` format is allowed. There is no requirement for all remaining token ``bits`` to be keyword arguments, so return the dictionary as soon as an invalid argument format is reached. """ if not bits: return {} match = kwarg_re.match(bits[0]) kwarg_format = match and match[1] if not kwarg_format: if not support_legacy: return {} if len(bits) < 3 or bits[1] != 'as': return {} kwargs = {} while bits: if kwarg_format: match = kwarg_re.match(bits[0]) if not match or not match[1]: return kwargs key, value = match.groups() del bits[:1] else: if len(bits) < 3 or bits[1] != 'as': return kwargs key, value = bits[2], bits[0] del bits[:3] kwargs[key] = parser.compile_filter(value) if bits and not kwarg_format: if bits[0] != 'and': return kwargs del bits[:1] return kwargs
961110d9dd0124bbad2d0e2c9950d049097aa27f8e93ab864903150d801df1d4
import functools from django.core.exceptions import ImproperlyConfigured from django.utils.functional import cached_property from django.utils.module_loading import import_string from .base import Template from .context import Context, _builtin_context_processors from .exceptions import TemplateDoesNotExist from .library import import_library class Engine: default_builtins = [ 'django.template.defaulttags', 'django.template.defaultfilters', 'django.template.loader_tags', ] def __init__(self, dirs=None, app_dirs=False, context_processors=None, debug=False, loaders=None, string_if_invalid='', file_charset='utf-8', libraries=None, builtins=None, autoescape=True): if dirs is None: dirs = [] if context_processors is None: context_processors = [] if loaders is None: loaders = ['django.template.loaders.filesystem.Loader'] if app_dirs: loaders += ['django.template.loaders.app_directories.Loader'] if not debug: loaders = [('django.template.loaders.cached.Loader', loaders)] else: if app_dirs: raise ImproperlyConfigured( "app_dirs must not be set when loaders is defined.") if libraries is None: libraries = {} if builtins is None: builtins = [] self.dirs = dirs self.app_dirs = app_dirs self.autoescape = autoescape self.context_processors = context_processors self.debug = debug self.loaders = loaders self.string_if_invalid = string_if_invalid self.file_charset = file_charset self.libraries = libraries self.template_libraries = self.get_template_libraries(libraries) self.builtins = self.default_builtins + builtins self.template_builtins = self.get_template_builtins(self.builtins) def __repr__(self): return ( '<%s:%s app_dirs=%s%s debug=%s loaders=%s string_if_invalid=%s ' 'file_charset=%s%s%s autoescape=%s>' ) % ( self.__class__.__qualname__, '' if not self.dirs else ' dirs=%s' % repr(self.dirs), self.app_dirs, '' if not self.context_processors else ' context_processors=%s' % repr(self.context_processors), self.debug, repr(self.loaders), repr(self.string_if_invalid), repr(self.file_charset), '' if not self.libraries else ' libraries=%s' % repr(self.libraries), '' if not self.builtins else ' builtins=%s' % repr(self.builtins), repr(self.autoescape), ) @staticmethod @functools.lru_cache() def get_default(): """ Return the first DjangoTemplates backend that's configured, or raise ImproperlyConfigured if none are configured. This is required for preserving historical APIs that rely on a globally available, implicitly configured engine such as: >>> from django.template import Context, Template >>> template = Template("Hello {{ name }}!") >>> context = Context({'name': "world"}) >>> template.render(context) 'Hello world!' """ # Since Engine is imported in django.template and since # DjangoTemplates is a wrapper around this Engine class, # local imports are required to avoid import loops. from django.template import engines from django.template.backends.django import DjangoTemplates for engine in engines.all(): if isinstance(engine, DjangoTemplates): return engine.engine raise ImproperlyConfigured('No DjangoTemplates backend is configured.') @cached_property def template_context_processors(self): context_processors = _builtin_context_processors context_processors += tuple(self.context_processors) return tuple(import_string(path) for path in context_processors) def get_template_builtins(self, builtins): return [import_library(x) for x in builtins] def get_template_libraries(self, libraries): loaded = {} for name, path in libraries.items(): loaded[name] = import_library(path) return loaded @cached_property def template_loaders(self): return self.get_template_loaders(self.loaders) def get_template_loaders(self, template_loaders): loaders = [] for template_loader in template_loaders: loader = self.find_template_loader(template_loader) if loader is not None: loaders.append(loader) return loaders def find_template_loader(self, loader): if isinstance(loader, (tuple, list)): loader, *args = loader else: args = [] if isinstance(loader, str): loader_class = import_string(loader) return loader_class(self, *args) else: raise ImproperlyConfigured( "Invalid value in template loaders configuration: %r" % loader) def find_template(self, name, dirs=None, skip=None): tried = [] for loader in self.template_loaders: try: template = loader.get_template(name, skip=skip) return template, template.origin except TemplateDoesNotExist as e: tried.extend(e.tried) raise TemplateDoesNotExist(name, tried=tried) def from_string(self, template_code): """ Return a compiled Template object for the given template code, handling template inheritance recursively. """ return Template(template_code, engine=self) def get_template(self, template_name): """ Return a compiled Template object for the given template name, handling template inheritance recursively. """ template, origin = self.find_template(template_name) if not hasattr(template, 'render'): # template needs to be compiled template = Template(template, origin, template_name, engine=self) return template def render_to_string(self, template_name, context=None): """ Render the template specified by template_name with the given context. For use in Django's test suite. """ if isinstance(template_name, (list, tuple)): t = self.select_template(template_name) else: t = self.get_template(template_name) # Django < 1.8 accepted a Context in `context` even though that's # unintended. Preserve this ability but don't rewrap `context`. if isinstance(context, Context): return t.render(context) else: return t.render(Context(context, autoescape=self.autoescape)) def select_template(self, template_name_list): """ Given a list of template names, return the first that can be loaded. """ if not template_name_list: raise TemplateDoesNotExist("No template names provided") not_found = [] for template_name in template_name_list: try: return self.get_template(template_name) except TemplateDoesNotExist as exc: if exc.args[0] not in not_found: not_found.append(exc.args[0]) continue # If we get here, none of the templates could be loaded raise TemplateDoesNotExist(', '.join(not_found))
6a8e4d1b56432526dd4c6ef7911223d5c03107dd2d6a13ef2581ca18c3b49559
"""Default tags used by the template system, available to all templates.""" import re import sys import warnings from collections import namedtuple from datetime import datetime from itertools import cycle as itertools_cycle, groupby from django.conf import settings from django.utils import timezone from django.utils.html import conditional_escape, format_html from django.utils.lorem_ipsum import paragraphs, words from django.utils.safestring import mark_safe from .base import ( BLOCK_TAG_END, BLOCK_TAG_START, COMMENT_TAG_END, COMMENT_TAG_START, FILTER_SEPARATOR, SINGLE_BRACE_END, SINGLE_BRACE_START, VARIABLE_ATTRIBUTE_SEPARATOR, VARIABLE_TAG_END, VARIABLE_TAG_START, Node, NodeList, TemplateSyntaxError, VariableDoesNotExist, kwarg_re, render_value_in_context, token_kwargs, ) from .context import Context from .defaultfilters import date from .library import Library from .smartif import IfParser, Literal register = Library() class AutoEscapeControlNode(Node): """Implement the actions of the autoescape tag.""" def __init__(self, setting, nodelist): self.setting, self.nodelist = setting, nodelist def render(self, context): old_setting = context.autoescape context.autoescape = self.setting output = self.nodelist.render(context) context.autoescape = old_setting if self.setting: return mark_safe(output) else: return output class CommentNode(Node): child_nodelists = () def render(self, context): return '' class CsrfTokenNode(Node): child_nodelists = () def render(self, context): csrf_token = context.get('csrf_token') if csrf_token: if csrf_token == 'NOTPROVIDED': return format_html("") else: return format_html('<input type="hidden" name="csrfmiddlewaretoken" value="{}">', csrf_token) else: # It's very probable that the token is missing because of # misconfiguration, so we raise a warning if settings.DEBUG: warnings.warn( "A {% csrf_token %} was used in a template, but the context " "did not provide the value. This is usually caused by not " "using RequestContext." ) return '' class CycleNode(Node): def __init__(self, cyclevars, variable_name=None, silent=False): self.cyclevars = cyclevars self.variable_name = variable_name self.silent = silent def render(self, context): if self not in context.render_context: # First time the node is rendered in template context.render_context[self] = itertools_cycle(self.cyclevars) cycle_iter = context.render_context[self] value = next(cycle_iter).resolve(context) if self.variable_name: context.set_upward(self.variable_name, value) if self.silent: return '' return render_value_in_context(value, context) def reset(self, context): """ Reset the cycle iteration back to the beginning. """ context.render_context[self] = itertools_cycle(self.cyclevars) class DebugNode(Node): def render(self, context): from pprint import pformat output = [pformat(val) for val in context] output.append('\n\n') output.append(pformat(sys.modules)) return ''.join(output) class FilterNode(Node): def __init__(self, filter_expr, nodelist): self.filter_expr, self.nodelist = filter_expr, nodelist def render(self, context): output = self.nodelist.render(context) # Apply filters. with context.push(var=output): return self.filter_expr.resolve(context) class FirstOfNode(Node): def __init__(self, variables, asvar=None): self.vars = variables self.asvar = asvar def render(self, context): first = '' for var in self.vars: value = var.resolve(context, ignore_failures=True) if value: first = render_value_in_context(value, context) break if self.asvar: context[self.asvar] = first return '' return first class ForNode(Node): child_nodelists = ('nodelist_loop', 'nodelist_empty') def __init__(self, loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty=None): self.loopvars, self.sequence = loopvars, sequence self.is_reversed = is_reversed self.nodelist_loop = nodelist_loop if nodelist_empty is None: self.nodelist_empty = NodeList() else: self.nodelist_empty = nodelist_empty def __repr__(self): reversed_text = ' reversed' if self.is_reversed else '' return '<%s: for %s in %s, tail_len: %d%s>' % ( self.__class__.__name__, ', '.join(self.loopvars), self.sequence, len(self.nodelist_loop), reversed_text, ) def render(self, context): if 'forloop' in context: parentloop = context['forloop'] else: parentloop = {} with context.push(): values = self.sequence.resolve(context, ignore_failures=True) if values is None: values = [] if not hasattr(values, '__len__'): values = list(values) len_values = len(values) if len_values < 1: return self.nodelist_empty.render(context) nodelist = [] if self.is_reversed: values = reversed(values) num_loopvars = len(self.loopvars) unpack = num_loopvars > 1 # Create a forloop value in the context. We'll update counters on each # iteration just below. loop_dict = context['forloop'] = {'parentloop': parentloop} for i, item in enumerate(values): # Shortcuts for current loop iteration number. loop_dict['counter0'] = i loop_dict['counter'] = i + 1 # Reverse counter iteration numbers. loop_dict['revcounter'] = len_values - i loop_dict['revcounter0'] = len_values - i - 1 # Boolean values designating first and last times through loop. loop_dict['first'] = (i == 0) loop_dict['last'] = (i == len_values - 1) pop_context = False if unpack: # If there are multiple loop variables, unpack the item into # them. try: len_item = len(item) except TypeError: # not an iterable len_item = 1 # Check loop variable count before unpacking if num_loopvars != len_item: raise ValueError( "Need {} values to unpack in for loop; got {}. " .format(num_loopvars, len_item), ) unpacked_vars = dict(zip(self.loopvars, item)) pop_context = True context.update(unpacked_vars) else: context[self.loopvars[0]] = item for node in self.nodelist_loop: nodelist.append(node.render_annotated(context)) if pop_context: # Pop the loop variables pushed on to the context to avoid # the context ending up in an inconsistent state when other # tags (e.g., include and with) push data to context. context.pop() return mark_safe(''.join(nodelist)) class IfChangedNode(Node): child_nodelists = ('nodelist_true', 'nodelist_false') def __init__(self, nodelist_true, nodelist_false, *varlist): self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false self._varlist = varlist def render(self, context): # Init state storage state_frame = self._get_context_stack_frame(context) state_frame.setdefault(self) nodelist_true_output = None if self._varlist: # Consider multiple parameters. This behaves like an OR evaluation # of the multiple variables. compare_to = [var.resolve(context, ignore_failures=True) for var in self._varlist] else: # The "{% ifchanged %}" syntax (without any variables) compares # the rendered output. compare_to = nodelist_true_output = self.nodelist_true.render(context) if compare_to != state_frame[self]: state_frame[self] = compare_to # render true block if not already rendered return nodelist_true_output or self.nodelist_true.render(context) elif self.nodelist_false: return self.nodelist_false.render(context) return '' def _get_context_stack_frame(self, context): # The Context object behaves like a stack where each template tag can create a new scope. # Find the place where to store the state to detect changes. if 'forloop' in context: # Ifchanged is bound to the local for loop. # When there is a loop-in-loop, the state is bound to the inner loop, # so it resets when the outer loop continues. return context['forloop'] else: # Using ifchanged outside loops. Effectively this is a no-op because the state is associated with 'self'. return context.render_context class IfNode(Node): def __init__(self, conditions_nodelists): self.conditions_nodelists = conditions_nodelists def __repr__(self): return '<%s>' % self.__class__.__name__ def __iter__(self): for _, nodelist in self.conditions_nodelists: yield from nodelist @property def nodelist(self): return NodeList(self) def render(self, context): for condition, nodelist in self.conditions_nodelists: if condition is not None: # if / elif clause try: match = condition.eval(context) except VariableDoesNotExist: match = None else: # else clause match = True if match: return nodelist.render(context) return '' class LoremNode(Node): def __init__(self, count, method, common): self.count, self.method, self.common = count, method, common def render(self, context): try: count = int(self.count.resolve(context)) except (ValueError, TypeError): count = 1 if self.method == 'w': return words(count, common=self.common) else: paras = paragraphs(count, common=self.common) if self.method == 'p': paras = ['<p>%s</p>' % p for p in paras] return '\n\n'.join(paras) GroupedResult = namedtuple('GroupedResult', ['grouper', 'list']) class RegroupNode(Node): def __init__(self, target, expression, var_name): self.target, self.expression = target, expression self.var_name = var_name def resolve_expression(self, obj, context): # This method is called for each object in self.target. See regroup() # for the reason why we temporarily put the object in the context. context[self.var_name] = obj return self.expression.resolve(context, ignore_failures=True) def render(self, context): obj_list = self.target.resolve(context, ignore_failures=True) if obj_list is None: # target variable wasn't found in context; fail silently. context[self.var_name] = [] return '' # List of dictionaries in the format: # {'grouper': 'key', 'list': [list of contents]}. context[self.var_name] = [ GroupedResult(grouper=key, list=list(val)) for key, val in groupby(obj_list, lambda obj: self.resolve_expression(obj, context)) ] return '' class LoadNode(Node): child_nodelists = () def render(self, context): return '' class NowNode(Node): def __init__(self, format_string, asvar=None): self.format_string = format_string self.asvar = asvar def render(self, context): tzinfo = timezone.get_current_timezone() if settings.USE_TZ else None formatted = date(datetime.now(tz=tzinfo), self.format_string) if self.asvar: context[self.asvar] = formatted return '' else: return formatted class ResetCycleNode(Node): def __init__(self, node): self.node = node def render(self, context): self.node.reset(context) return '' class SpacelessNode(Node): def __init__(self, nodelist): self.nodelist = nodelist def render(self, context): from django.utils.html import strip_spaces_between_tags return strip_spaces_between_tags(self.nodelist.render(context).strip()) class TemplateTagNode(Node): mapping = { 'openblock': BLOCK_TAG_START, 'closeblock': BLOCK_TAG_END, 'openvariable': VARIABLE_TAG_START, 'closevariable': VARIABLE_TAG_END, 'openbrace': SINGLE_BRACE_START, 'closebrace': SINGLE_BRACE_END, 'opencomment': COMMENT_TAG_START, 'closecomment': COMMENT_TAG_END, } def __init__(self, tagtype): self.tagtype = tagtype def render(self, context): return self.mapping.get(self.tagtype, '') class URLNode(Node): child_nodelists = () def __init__(self, view_name, args, kwargs, asvar): self.view_name = view_name self.args = args self.kwargs = kwargs self.asvar = asvar def __repr__(self): return "<%s view_name='%s' args=%s kwargs=%s as=%s>" % ( self.__class__.__qualname__, self.view_name, repr(self.args), repr(self.kwargs), repr(self.asvar), ) def render(self, context): from django.urls import NoReverseMatch, reverse args = [arg.resolve(context) for arg in self.args] kwargs = {k: v.resolve(context) for k, v in self.kwargs.items()} view_name = self.view_name.resolve(context) try: current_app = context.request.current_app except AttributeError: try: current_app = context.request.resolver_match.namespace except AttributeError: current_app = None # Try to look up the URL. If it fails, raise NoReverseMatch unless the # {% url ... as var %} construct is used, in which case return nothing. url = '' try: url = reverse(view_name, args=args, kwargs=kwargs, current_app=current_app) except NoReverseMatch: if self.asvar is None: raise if self.asvar: context[self.asvar] = url return '' else: if context.autoescape: url = conditional_escape(url) return url class VerbatimNode(Node): def __init__(self, content): self.content = content def render(self, context): return self.content class WidthRatioNode(Node): def __init__(self, val_expr, max_expr, max_width, asvar=None): self.val_expr = val_expr self.max_expr = max_expr self.max_width = max_width self.asvar = asvar def render(self, context): try: value = self.val_expr.resolve(context) max_value = self.max_expr.resolve(context) max_width = int(self.max_width.resolve(context)) except VariableDoesNotExist: return '' except (ValueError, TypeError): raise TemplateSyntaxError("widthratio final argument must be a number") try: value = float(value) max_value = float(max_value) ratio = (value / max_value) * max_width result = str(round(ratio)) except ZeroDivisionError: result = '0' except (ValueError, TypeError, OverflowError): result = '' if self.asvar: context[self.asvar] = result return '' else: return result class WithNode(Node): def __init__(self, var, name, nodelist, extra_context=None): self.nodelist = nodelist # var and name are legacy attributes, being left in case they are used # by third-party subclasses of this Node. self.extra_context = extra_context or {} if name: self.extra_context[name] = var def __repr__(self): return '<%s>' % self.__class__.__name__ def render(self, context): values = {key: val.resolve(context) for key, val in self.extra_context.items()} with context.push(**values): return self.nodelist.render(context) @register.tag def autoescape(parser, token): """ Force autoescape behavior for this block. """ # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments args = token.contents.split() if len(args) != 2: raise TemplateSyntaxError("'autoescape' tag requires exactly one argument.") arg = args[1] if arg not in ('on', 'off'): raise TemplateSyntaxError("'autoescape' argument should be 'on' or 'off'") nodelist = parser.parse(('endautoescape',)) parser.delete_first_token() return AutoEscapeControlNode((arg == 'on'), nodelist) @register.tag def comment(parser, token): """ Ignore everything between ``{% comment %}`` and ``{% endcomment %}``. """ parser.skip_past('endcomment') return CommentNode() @register.tag def cycle(parser, token): """ Cycle among the given strings each time this tag is encountered. Within a loop, cycles among the given strings each time through the loop:: {% for o in some_list %} <tr class="{% cycle 'row1' 'row2' %}"> ... </tr> {% endfor %} Outside of a loop, give the values a unique name the first time you call it, then use that name each successive time through:: <tr class="{% cycle 'row1' 'row2' 'row3' as rowcolors %}">...</tr> <tr class="{% cycle rowcolors %}">...</tr> <tr class="{% cycle rowcolors %}">...</tr> You can use any number of values, separated by spaces. Commas can also be used to separate values; if a comma is used, the cycle values are interpreted as literal strings. The optional flag "silent" can be used to prevent the cycle declaration from returning any value:: {% for o in some_list %} {% cycle 'row1' 'row2' as rowcolors silent %} <tr class="{{ rowcolors }}">{% include "subtemplate.html " %}</tr> {% endfor %} """ # Note: This returns the exact same node on each {% cycle name %} call; # that is, the node object returned from {% cycle a b c as name %} and the # one returned from {% cycle name %} are the exact same object. This # shouldn't cause problems (heh), but if it does, now you know. # # Ugly hack warning: This stuffs the named template dict into parser so # that names are only unique within each template (as opposed to using # a global variable, which would make cycle names have to be unique across # *all* templates. # # It keeps the last node in the parser to be able to reset it with # {% resetcycle %}. args = token.split_contents() if len(args) < 2: raise TemplateSyntaxError("'cycle' tag requires at least two arguments") if len(args) == 2: # {% cycle foo %} case. name = args[1] if not hasattr(parser, '_named_cycle_nodes'): raise TemplateSyntaxError("No named cycles in template. '%s' is not defined" % name) if name not in parser._named_cycle_nodes: raise TemplateSyntaxError("Named cycle '%s' does not exist" % name) return parser._named_cycle_nodes[name] as_form = False if len(args) > 4: # {% cycle ... as foo [silent] %} case. if args[-3] == "as": if args[-1] != "silent": raise TemplateSyntaxError("Only 'silent' flag is allowed after cycle's name, not '%s'." % args[-1]) as_form = True silent = True args = args[:-1] elif args[-2] == "as": as_form = True silent = False if as_form: name = args[-1] values = [parser.compile_filter(arg) for arg in args[1:-2]] node = CycleNode(values, name, silent=silent) if not hasattr(parser, '_named_cycle_nodes'): parser._named_cycle_nodes = {} parser._named_cycle_nodes[name] = node else: values = [parser.compile_filter(arg) for arg in args[1:]] node = CycleNode(values) parser._last_cycle_node = node return node @register.tag def csrf_token(parser, token): return CsrfTokenNode() @register.tag def debug(parser, token): """ Output a whole load of debugging information, including the current context and imported modules. Sample usage:: <pre> {% debug %} </pre> """ return DebugNode() @register.tag('filter') def do_filter(parser, token): """ Filter the contents of the block through variable filters. Filters can also be piped through each other, and they can have arguments -- just like in variable syntax. Sample usage:: {% filter force_escape|lower %} This text will be HTML-escaped, and will appear in lowercase. {% endfilter %} Note that the ``escape`` and ``safe`` filters are not acceptable arguments. Instead, use the ``autoescape`` tag to manage autoescaping for blocks of template code. """ # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments _, rest = token.contents.split(None, 1) filter_expr = parser.compile_filter("var|%s" % (rest)) for func, unused in filter_expr.filters: filter_name = getattr(func, '_filter_name', None) if filter_name in ('escape', 'safe'): raise TemplateSyntaxError('"filter %s" is not permitted. Use the "autoescape" tag instead.' % filter_name) nodelist = parser.parse(('endfilter',)) parser.delete_first_token() return FilterNode(filter_expr, nodelist) @register.tag def firstof(parser, token): """ Output the first variable passed that is not False. Output nothing if all the passed variables are False. Sample usage:: {% firstof var1 var2 var3 as myvar %} This is equivalent to:: {% if var1 %} {{ var1 }} {% elif var2 %} {{ var2 }} {% elif var3 %} {{ var3 }} {% endif %} but much cleaner! You can also use a literal string as a fallback value in case all passed variables are False:: {% firstof var1 var2 var3 "fallback value" %} If you want to disable auto-escaping of variables you can use:: {% autoescape off %} {% firstof var1 var2 var3 "<strong>fallback value</strong>" %} {% autoescape %} Or if only some variables should be escaped, you can use:: {% firstof var1 var2|safe var3 "<strong>fallback value</strong>"|safe %} """ bits = token.split_contents()[1:] asvar = None if not bits: raise TemplateSyntaxError("'firstof' statement requires at least one argument") if len(bits) >= 2 and bits[-2] == 'as': asvar = bits[-1] bits = bits[:-2] return FirstOfNode([parser.compile_filter(bit) for bit in bits], asvar) @register.tag('for') def do_for(parser, token): """ Loop over each item in an array. For example, to display a list of athletes given ``athlete_list``:: <ul> {% for athlete in athlete_list %} <li>{{ athlete.name }}</li> {% endfor %} </ul> You can loop over a list in reverse by using ``{% for obj in list reversed %}``. You can also unpack multiple values from a two-dimensional array:: {% for key,value in dict.items %} {{ key }}: {{ value }} {% endfor %} The ``for`` tag can take an optional ``{% empty %}`` clause that will be displayed if the given array is empty or could not be found:: <ul> {% for athlete in athlete_list %} <li>{{ athlete.name }}</li> {% empty %} <li>Sorry, no athletes in this list.</li> {% endfor %} <ul> The above is equivalent to -- but shorter, cleaner, and possibly faster than -- the following:: <ul> {% if athlete_list %} {% for athlete in athlete_list %} <li>{{ athlete.name }}</li> {% endfor %} {% else %} <li>Sorry, no athletes in this list.</li> {% endif %} </ul> The for loop sets a number of variables available within the loop: ========================== ================================================ Variable Description ========================== ================================================ ``forloop.counter`` The current iteration of the loop (1-indexed) ``forloop.counter0`` The current iteration of the loop (0-indexed) ``forloop.revcounter`` The number of iterations from the end of the loop (1-indexed) ``forloop.revcounter0`` The number of iterations from the end of the loop (0-indexed) ``forloop.first`` True if this is the first time through the loop ``forloop.last`` True if this is the last time through the loop ``forloop.parentloop`` For nested loops, this is the loop "above" the current one ========================== ================================================ """ bits = token.split_contents() if len(bits) < 4: raise TemplateSyntaxError("'for' statements should have at least four" " words: %s" % token.contents) is_reversed = bits[-1] == 'reversed' in_index = -3 if is_reversed else -2 if bits[in_index] != 'in': raise TemplateSyntaxError("'for' statements should use the format" " 'for x in y': %s" % token.contents) invalid_chars = frozenset((' ', '"', "'", FILTER_SEPARATOR)) loopvars = re.split(r' *, *', ' '.join(bits[1:in_index])) for var in loopvars: if not var or not invalid_chars.isdisjoint(var): raise TemplateSyntaxError("'for' tag received an invalid argument:" " %s" % token.contents) sequence = parser.compile_filter(bits[in_index + 1]) nodelist_loop = parser.parse(('empty', 'endfor',)) token = parser.next_token() if token.contents == 'empty': nodelist_empty = parser.parse(('endfor',)) parser.delete_first_token() else: nodelist_empty = None return ForNode(loopvars, sequence, is_reversed, nodelist_loop, nodelist_empty) class TemplateLiteral(Literal): def __init__(self, value, text): self.value = value self.text = text # for better error messages def display(self): return self.text def eval(self, context): return self.value.resolve(context, ignore_failures=True) class TemplateIfParser(IfParser): error_class = TemplateSyntaxError def __init__(self, parser, *args, **kwargs): self.template_parser = parser super().__init__(*args, **kwargs) def create_var(self, value): return TemplateLiteral(self.template_parser.compile_filter(value), value) @register.tag('if') def do_if(parser, token): """ Evaluate a variable, and if that variable is "true" (i.e., exists, is not empty, and is not a false boolean value), output the contents of the block: :: {% if athlete_list %} Number of athletes: {{ athlete_list|count }} {% elif athlete_in_locker_room_list %} Athletes should be out of the locker room soon! {% else %} No athletes. {% endif %} In the above, if ``athlete_list`` is not empty, the number of athletes will be displayed by the ``{{ athlete_list|count }}`` variable. The ``if`` tag may take one or several `` {% elif %}`` clauses, as well as an ``{% else %}`` clause that will be displayed if all previous conditions fail. These clauses are optional. ``if`` tags may use ``or``, ``and`` or ``not`` to test a number of variables or to negate a given variable:: {% if not athlete_list %} There are no athletes. {% endif %} {% if athlete_list or coach_list %} There are some athletes or some coaches. {% endif %} {% if athlete_list and coach_list %} Both athletes and coaches are available. {% endif %} {% if not athlete_list or coach_list %} There are no athletes, or there are some coaches. {% endif %} {% if athlete_list and not coach_list %} There are some athletes and absolutely no coaches. {% endif %} Comparison operators are also available, and the use of filters is also allowed, for example:: {% if articles|length >= 5 %}...{% endif %} Arguments and operators _must_ have a space between them, so ``{% if 1>2 %}`` is not a valid if tag. All supported operators are: ``or``, ``and``, ``in``, ``not in`` ``==``, ``!=``, ``>``, ``>=``, ``<`` and ``<=``. Operator precedence follows Python. """ # {% if ... %} bits = token.split_contents()[1:] condition = TemplateIfParser(parser, bits).parse() nodelist = parser.parse(('elif', 'else', 'endif')) conditions_nodelists = [(condition, nodelist)] token = parser.next_token() # {% elif ... %} (repeatable) while token.contents.startswith('elif'): bits = token.split_contents()[1:] condition = TemplateIfParser(parser, bits).parse() nodelist = parser.parse(('elif', 'else', 'endif')) conditions_nodelists.append((condition, nodelist)) token = parser.next_token() # {% else %} (optional) if token.contents == 'else': nodelist = parser.parse(('endif',)) conditions_nodelists.append((None, nodelist)) token = parser.next_token() # {% endif %} if token.contents != 'endif': raise TemplateSyntaxError('Malformed template tag at line {}: "{}"'.format(token.lineno, token.contents)) return IfNode(conditions_nodelists) @register.tag def ifchanged(parser, token): """ Check if a value has changed from the last iteration of a loop. The ``{% ifchanged %}`` block tag is used within a loop. It has two possible uses. 1. Check its own rendered contents against its previous state and only displays the content if it has changed. For example, this displays a list of days, only displaying the month if it changes:: <h1>Archive for {{ year }}</h1> {% for date in days %} {% ifchanged %}<h3>{{ date|date:"F" }}</h3>{% endifchanged %} <a href="{{ date|date:"M/d"|lower }}/">{{ date|date:"j" }}</a> {% endfor %} 2. If given one or more variables, check whether any variable has changed. For example, the following shows the date every time it changes, while showing the hour if either the hour or the date has changed:: {% for date in days %} {% ifchanged date.date %} {{ date.date }} {% endifchanged %} {% ifchanged date.hour date.date %} {{ date.hour }} {% endifchanged %} {% endfor %} """ bits = token.split_contents() nodelist_true = parser.parse(('else', 'endifchanged')) token = parser.next_token() if token.contents == 'else': nodelist_false = parser.parse(('endifchanged',)) parser.delete_first_token() else: nodelist_false = NodeList() values = [parser.compile_filter(bit) for bit in bits[1:]] return IfChangedNode(nodelist_true, nodelist_false, *values) def find_library(parser, name): try: return parser.libraries[name] except KeyError: raise TemplateSyntaxError( "'%s' is not a registered tag library. Must be one of:\n%s" % ( name, "\n".join(sorted(parser.libraries)), ), ) def load_from_library(library, label, names): """ Return a subset of tags and filters from a library. """ subset = Library() for name in names: found = False if name in library.tags: found = True subset.tags[name] = library.tags[name] if name in library.filters: found = True subset.filters[name] = library.filters[name] if found is False: raise TemplateSyntaxError( "'%s' is not a valid tag or filter in tag library '%s'" % ( name, label, ), ) return subset @register.tag def load(parser, token): """ Load a custom template tag library into the parser. For example, to load the template tags in ``django/templatetags/news/photos.py``:: {% load news.photos %} Can also be used to load an individual tag/filter from a library:: {% load byline from news %} """ # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments bits = token.contents.split() if len(bits) >= 4 and bits[-2] == "from": # from syntax is used; load individual tags from the library name = bits[-1] lib = find_library(parser, name) subset = load_from_library(lib, name, bits[1:-2]) parser.add_library(subset) else: # one or more libraries are specified; load and add them to the parser for name in bits[1:]: lib = find_library(parser, name) parser.add_library(lib) return LoadNode() @register.tag def lorem(parser, token): """ Create random Latin text useful for providing test data in templates. Usage format:: {% lorem [count] [method] [random] %} ``count`` is a number (or variable) containing the number of paragraphs or words to generate (default is 1). ``method`` is either ``w`` for words, ``p`` for HTML paragraphs, ``b`` for plain-text paragraph blocks (default is ``b``). ``random`` is the word ``random``, which if given, does not use the common paragraph (starting "Lorem ipsum dolor sit amet, consectetuer..."). Examples: * ``{% lorem %}`` outputs the common "lorem ipsum" paragraph * ``{% lorem 3 p %}`` outputs the common "lorem ipsum" paragraph and two random paragraphs each wrapped in HTML ``<p>`` tags * ``{% lorem 2 w random %}`` outputs two random latin words """ bits = list(token.split_contents()) tagname = bits[0] # Random bit common = bits[-1] != 'random' if not common: bits.pop() # Method bit if bits[-1] in ('w', 'p', 'b'): method = bits.pop() else: method = 'b' # Count bit if len(bits) > 1: count = bits.pop() else: count = '1' count = parser.compile_filter(count) if len(bits) != 1: raise TemplateSyntaxError("Incorrect format for %r tag" % tagname) return LoremNode(count, method, common) @register.tag def now(parser, token): """ Display the date, formatted according to the given string. Use the same format as PHP's ``date()`` function; see https://php.net/date for all the possible values. Sample usage:: It is {% now "jS F Y H:i" %} """ bits = token.split_contents() asvar = None if len(bits) == 4 and bits[-2] == 'as': asvar = bits[-1] bits = bits[:-2] if len(bits) != 2: raise TemplateSyntaxError("'now' statement takes one argument") format_string = bits[1][1:-1] return NowNode(format_string, asvar) @register.tag def regroup(parser, token): """ Regroup a list of alike objects by a common attribute. This complex tag is best illustrated by use of an example: say that ``musicians`` is a list of ``Musician`` objects that have ``name`` and ``instrument`` attributes, and you'd like to display a list that looks like: * Guitar: * Django Reinhardt * Emily Remler * Piano: * Lovie Austin * Bud Powell * Trumpet: * Duke Ellington The following snippet of template code would accomplish this dubious task:: {% regroup musicians by instrument as grouped %} <ul> {% for group in grouped %} <li>{{ group.grouper }} <ul> {% for musician in group.list %} <li>{{ musician.name }}</li> {% endfor %} </ul> {% endfor %} </ul> As you can see, ``{% regroup %}`` populates a variable with a list of objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the item that was grouped by; ``list`` contains the list of objects that share that ``grouper``. In this case, ``grouper`` would be ``Guitar``, ``Piano`` and ``Trumpet``, and ``list`` is the list of musicians who play this instrument. Note that ``{% regroup %}`` does not work when the list to be grouped is not sorted by the key you are grouping by! This means that if your list of musicians was not sorted by instrument, you'd need to make sure it is sorted before using it, i.e.:: {% regroup musicians|dictsort:"instrument" by instrument as grouped %} """ bits = token.split_contents() if len(bits) != 6: raise TemplateSyntaxError("'regroup' tag takes five arguments") target = parser.compile_filter(bits[1]) if bits[2] != 'by': raise TemplateSyntaxError("second argument to 'regroup' tag must be 'by'") if bits[4] != 'as': raise TemplateSyntaxError("next-to-last argument to 'regroup' tag must" " be 'as'") var_name = bits[5] # RegroupNode will take each item in 'target', put it in the context under # 'var_name', evaluate 'var_name'.'expression' in the current context, and # group by the resulting value. After all items are processed, it will # save the final result in the context under 'var_name', thus clearing the # temporary values. This hack is necessary because the template engine # doesn't provide a context-aware equivalent of Python's getattr. expression = parser.compile_filter(var_name + VARIABLE_ATTRIBUTE_SEPARATOR + bits[3]) return RegroupNode(target, expression, var_name) @register.tag def resetcycle(parser, token): """ Reset a cycle tag. If an argument is given, reset the last rendered cycle tag whose name matches the argument, else reset the last rendered cycle tag (named or unnamed). """ args = token.split_contents() if len(args) > 2: raise TemplateSyntaxError("%r tag accepts at most one argument." % args[0]) if len(args) == 2: name = args[1] try: return ResetCycleNode(parser._named_cycle_nodes[name]) except (AttributeError, KeyError): raise TemplateSyntaxError("Named cycle '%s' does not exist." % name) try: return ResetCycleNode(parser._last_cycle_node) except AttributeError: raise TemplateSyntaxError("No cycles in template.") @register.tag def spaceless(parser, token): """ Remove whitespace between HTML tags, including tab and newline characters. Example usage:: {% spaceless %} <p> <a href="foo/">Foo</a> </p> {% endspaceless %} This example returns this HTML:: <p><a href="foo/">Foo</a></p> Only space between *tags* is normalized -- not space between tags and text. In this example, the space around ``Hello`` isn't stripped:: {% spaceless %} <strong> Hello </strong> {% endspaceless %} """ nodelist = parser.parse(('endspaceless',)) parser.delete_first_token() return SpacelessNode(nodelist) @register.tag def templatetag(parser, token): """ Output one of the bits used to compose template tags. Since the template system has no concept of "escaping", to display one of the bits used in template tags, you must use the ``{% templatetag %}`` tag. The argument tells which template bit to output: ================== ======= Argument Outputs ================== ======= ``openblock`` ``{%`` ``closeblock`` ``%}`` ``openvariable`` ``{{`` ``closevariable`` ``}}`` ``openbrace`` ``{`` ``closebrace`` ``}`` ``opencomment`` ``{#`` ``closecomment`` ``#}`` ================== ======= """ # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments bits = token.contents.split() if len(bits) != 2: raise TemplateSyntaxError("'templatetag' statement takes one argument") tag = bits[1] if tag not in TemplateTagNode.mapping: raise TemplateSyntaxError("Invalid templatetag argument: '%s'." " Must be one of: %s" % (tag, list(TemplateTagNode.mapping))) return TemplateTagNode(tag) @register.tag def url(parser, token): r""" Return an absolute URL matching the given view with its parameters. This is a way to define links that aren't tied to a particular URL configuration:: {% url "url_name" arg1 arg2 %} or {% url "url_name" name1=value1 name2=value2 %} The first argument is a URL pattern name. Other arguments are space-separated values that will be filled in place of positional and keyword arguments in the URL. Don't mix positional and keyword arguments. All arguments for the URL must be present. For example, if you have a view ``app_name.views.client_details`` taking the client's id and the corresponding line in a URLconf looks like this:: path('client/<int:id>/', views.client_details, name='client-detail-view') and this app's URLconf is included into the project's URLconf under some path:: path('clients/', include('app_name.urls')) then in a template you can create a link for a certain client like this:: {% url "client-detail-view" client.id %} The URL will look like ``/clients/client/123/``. The first argument may also be the name of a template variable that will be evaluated to obtain the view name or the URL name, e.g.:: {% with url_name="client-detail-view" %} {% url url_name client.id %} {% endwith %} """ bits = token.split_contents() if len(bits) < 2: raise TemplateSyntaxError("'%s' takes at least one argument, a URL pattern name." % bits[0]) viewname = parser.compile_filter(bits[1]) args = [] kwargs = {} asvar = None bits = bits[2:] if len(bits) >= 2 and bits[-2] == 'as': asvar = bits[-1] bits = bits[:-2] for bit in bits: match = kwarg_re.match(bit) if not match: raise TemplateSyntaxError("Malformed arguments to url tag") name, value = match.groups() if name: kwargs[name] = parser.compile_filter(value) else: args.append(parser.compile_filter(value)) return URLNode(viewname, args, kwargs, asvar) @register.tag def verbatim(parser, token): """ Stop the template engine from rendering the contents of this block tag. Usage:: {% verbatim %} {% don't process this %} {% endverbatim %} You can also designate a specific closing tag block (allowing the unrendered use of ``{% endverbatim %}``):: {% verbatim myblock %} ... {% endverbatim myblock %} """ nodelist = parser.parse(('endverbatim',)) parser.delete_first_token() return VerbatimNode(nodelist.render(Context())) @register.tag def widthratio(parser, token): """ For creating bar charts and such. Calculate the ratio of a given value to a maximum value, and then apply that ratio to a constant. For example:: <img src="bar.png" alt="Bar" height="10" width="{% widthratio this_value max_value max_width %}"> If ``this_value`` is 175, ``max_value`` is 200, and ``max_width`` is 100, the image in the above example will be 88 pixels wide (because 175/200 = .875; .875 * 100 = 87.5 which is rounded up to 88). In some cases you might want to capture the result of widthratio in a variable. It can be useful for instance in a blocktranslate like this:: {% widthratio this_value max_value max_width as width %} {% blocktranslate %}The width is: {{ width }}{% endblocktranslate %} """ bits = token.split_contents() if len(bits) == 4: tag, this_value_expr, max_value_expr, max_width = bits asvar = None elif len(bits) == 6: tag, this_value_expr, max_value_expr, max_width, as_, asvar = bits if as_ != 'as': raise TemplateSyntaxError("Invalid syntax in widthratio tag. Expecting 'as' keyword") else: raise TemplateSyntaxError("widthratio takes at least three arguments") return WidthRatioNode(parser.compile_filter(this_value_expr), parser.compile_filter(max_value_expr), parser.compile_filter(max_width), asvar=asvar) @register.tag('with') def do_with(parser, token): """ Add one or more values to the context (inside of this block) for caching and easy access. For example:: {% with total=person.some_sql_method %} {{ total }} object{{ total|pluralize }} {% endwith %} Multiple values can be added to the context:: {% with foo=1 bar=2 %} ... {% endwith %} The legacy format of ``{% with person.some_sql_method as total %}`` is still accepted. """ bits = token.split_contents() remaining_bits = bits[1:] extra_context = token_kwargs(remaining_bits, parser, support_legacy=True) if not extra_context: raise TemplateSyntaxError("%r expected at least one variable " "assignment" % bits[0]) if remaining_bits: raise TemplateSyntaxError("%r received an invalid token: %r" % (bits[0], remaining_bits[0])) nodelist = parser.parse(('endwith',)) parser.delete_first_token() return WithNode(None, None, nodelist, extra_context=extra_context)
44d31d64b7e97689a737cedc2f09cfb63ea00f51f25d7eab4d2b5ee9a39dca01
import functools from importlib import import_module from inspect import getfullargspec, unwrap from django.utils.html import conditional_escape from django.utils.itercompat import is_iterable from .base import Node, Template, token_kwargs from .exceptions import TemplateSyntaxError class InvalidTemplateLibrary(Exception): pass class Library: """ A class for registering template tags and filters. Compiled filter and template tag functions are stored in the filters and tags attributes. The filter, simple_tag, and inclusion_tag methods provide a convenient way to register callables as tags. """ def __init__(self): self.filters = {} self.tags = {} def tag(self, name=None, compile_function=None): if name is None and compile_function is None: # @register.tag() return self.tag_function elif name is not None and compile_function is None: if callable(name): # @register.tag return self.tag_function(name) else: # @register.tag('somename') or @register.tag(name='somename') def dec(func): return self.tag(name, func) return dec elif name is not None and compile_function is not None: # register.tag('somename', somefunc) self.tags[name] = compile_function return compile_function else: raise ValueError( "Unsupported arguments to Library.tag: (%r, %r)" % (name, compile_function), ) def tag_function(self, func): self.tags[getattr(func, "_decorated_function", func).__name__] = func return func def filter(self, name=None, filter_func=None, **flags): """ Register a callable as a template filter. Example: @register.filter def lower(value): return value.lower() """ if name is None and filter_func is None: # @register.filter() def dec(func): return self.filter_function(func, **flags) return dec elif name is not None and filter_func is None: if callable(name): # @register.filter return self.filter_function(name, **flags) else: # @register.filter('somename') or @register.filter(name='somename') def dec(func): return self.filter(name, func, **flags) return dec elif name is not None and filter_func is not None: # register.filter('somename', somefunc) self.filters[name] = filter_func for attr in ('expects_localtime', 'is_safe', 'needs_autoescape'): if attr in flags: value = flags[attr] # set the flag on the filter for FilterExpression.resolve setattr(filter_func, attr, value) # set the flag on the innermost decorated function # for decorators that need it, e.g. stringfilter if hasattr(filter_func, "_decorated_function"): setattr(filter_func._decorated_function, attr, value) filter_func._filter_name = name return filter_func else: raise ValueError( "Unsupported arguments to Library.filter: (%r, %r)" % (name, filter_func), ) def filter_function(self, func, **flags): name = getattr(func, "_decorated_function", func).__name__ return self.filter(name, func, **flags) def simple_tag(self, func=None, takes_context=None, name=None): """ Register a callable as a compiled template tag. Example: @register.simple_tag def hello(*args, **kwargs): return 'world' """ def dec(func): params, varargs, varkw, defaults, kwonly, kwonly_defaults, _ = getfullargspec(unwrap(func)) function_name = (name or getattr(func, '_decorated_function', func).__name__) @functools.wraps(func) def compile_func(parser, token): bits = token.split_contents()[1:] target_var = None if len(bits) >= 2 and bits[-2] == 'as': target_var = bits[-1] bits = bits[:-2] args, kwargs = parse_bits( parser, bits, params, varargs, varkw, defaults, kwonly, kwonly_defaults, takes_context, function_name, ) return SimpleNode(func, takes_context, args, kwargs, target_var) self.tag(function_name, compile_func) return func if func is None: # @register.simple_tag(...) return dec elif callable(func): # @register.simple_tag return dec(func) else: raise ValueError("Invalid arguments provided to simple_tag") def inclusion_tag(self, filename, func=None, takes_context=None, name=None): """ Register a callable as an inclusion tag: @register.inclusion_tag('results.html') def show_results(poll): choices = poll.choice_set.all() return {'choices': choices} """ def dec(func): params, varargs, varkw, defaults, kwonly, kwonly_defaults, _ = getfullargspec(unwrap(func)) function_name = (name or getattr(func, '_decorated_function', func).__name__) @functools.wraps(func) def compile_func(parser, token): bits = token.split_contents()[1:] args, kwargs = parse_bits( parser, bits, params, varargs, varkw, defaults, kwonly, kwonly_defaults, takes_context, function_name, ) return InclusionNode( func, takes_context, args, kwargs, filename, ) self.tag(function_name, compile_func) return func return dec class TagHelperNode(Node): """ Base class for tag helper nodes such as SimpleNode and InclusionNode. Manages the positional and keyword arguments to be passed to the decorated function. """ def __init__(self, func, takes_context, args, kwargs): self.func = func self.takes_context = takes_context self.args = args self.kwargs = kwargs def get_resolved_arguments(self, context): resolved_args = [var.resolve(context) for var in self.args] if self.takes_context: resolved_args = [context] + resolved_args resolved_kwargs = {k: v.resolve(context) for k, v in self.kwargs.items()} return resolved_args, resolved_kwargs class SimpleNode(TagHelperNode): child_nodelists = () def __init__(self, func, takes_context, args, kwargs, target_var): super().__init__(func, takes_context, args, kwargs) self.target_var = target_var def render(self, context): resolved_args, resolved_kwargs = self.get_resolved_arguments(context) output = self.func(*resolved_args, **resolved_kwargs) if self.target_var is not None: context[self.target_var] = output return '' if context.autoescape: output = conditional_escape(output) return output class InclusionNode(TagHelperNode): def __init__(self, func, takes_context, args, kwargs, filename): super().__init__(func, takes_context, args, kwargs) self.filename = filename def render(self, context): """ Render the specified template and context. Cache the template object in render_context to avoid reparsing and loading when used in a for loop. """ resolved_args, resolved_kwargs = self.get_resolved_arguments(context) _dict = self.func(*resolved_args, **resolved_kwargs) t = context.render_context.get(self) if t is None: if isinstance(self.filename, Template): t = self.filename elif isinstance(getattr(self.filename, 'template', None), Template): t = self.filename.template elif not isinstance(self.filename, str) and is_iterable(self.filename): t = context.template.engine.select_template(self.filename) else: t = context.template.engine.get_template(self.filename) context.render_context[self] = t new_context = context.new(_dict) # Copy across the CSRF token, if present, because inclusion tags are # often used for forms, and we need instructions for using CSRF # protection to be as simple as possible. csrf_token = context.get('csrf_token') if csrf_token is not None: new_context['csrf_token'] = csrf_token return t.render(new_context) def parse_bits(parser, bits, params, varargs, varkw, defaults, kwonly, kwonly_defaults, takes_context, name): """ Parse bits for template tag helpers simple_tag and inclusion_tag, in particular by detecting syntax errors and by extracting positional and keyword arguments. """ if takes_context: if params[0] == 'context': params = params[1:] else: raise TemplateSyntaxError( "'%s' is decorated with takes_context=True so it must " "have a first argument of 'context'" % name) args = [] kwargs = {} unhandled_params = list(params) unhandled_kwargs = [ kwarg for kwarg in kwonly if not kwonly_defaults or kwarg not in kwonly_defaults ] for bit in bits: # First we try to extract a potential kwarg from the bit kwarg = token_kwargs([bit], parser) if kwarg: # The kwarg was successfully extracted param, value = kwarg.popitem() if param not in params and param not in kwonly and varkw is None: # An unexpected keyword argument was supplied raise TemplateSyntaxError( "'%s' received unexpected keyword argument '%s'" % (name, param)) elif param in kwargs: # The keyword argument has already been supplied once raise TemplateSyntaxError( "'%s' received multiple values for keyword argument '%s'" % (name, param)) else: # All good, record the keyword argument kwargs[str(param)] = value if param in unhandled_params: # If using the keyword syntax for a positional arg, then # consume it. unhandled_params.remove(param) elif param in unhandled_kwargs: # Same for keyword-only arguments unhandled_kwargs.remove(param) else: if kwargs: raise TemplateSyntaxError( "'%s' received some positional argument(s) after some " "keyword argument(s)" % name) else: # Record the positional argument args.append(parser.compile_filter(bit)) try: # Consume from the list of expected positional arguments unhandled_params.pop(0) except IndexError: if varargs is None: raise TemplateSyntaxError( "'%s' received too many positional arguments" % name) if defaults is not None: # Consider the last n params handled, where n is the # number of defaults. unhandled_params = unhandled_params[:-len(defaults)] if unhandled_params or unhandled_kwargs: # Some positional arguments were not supplied raise TemplateSyntaxError( "'%s' did not receive value(s) for the argument(s): %s" % (name, ", ".join("'%s'" % p for p in unhandled_params + unhandled_kwargs))) return args, kwargs def import_library(name): """ Load a Library object from a template tag module. """ try: module = import_module(name) except ImportError as e: raise InvalidTemplateLibrary( "Invalid template library specified. ImportError raised when " "trying to load '%s': %s" % (name, e) ) try: return module.register except AttributeError: raise InvalidTemplateLibrary( "Module %s does not have a variable named 'register'" % name, )
217a7a697536dae38d315ae8ebcd68d0750942d29227a82884e99dc10a1c978c
import re from django.conf import settings from django.http import HttpResponsePermanentRedirect from django.utils.deprecation import MiddlewareMixin class SecurityMiddleware(MiddlewareMixin): def __init__(self, get_response): super().__init__(get_response) self.sts_seconds = settings.SECURE_HSTS_SECONDS self.sts_include_subdomains = settings.SECURE_HSTS_INCLUDE_SUBDOMAINS self.sts_preload = settings.SECURE_HSTS_PRELOAD self.content_type_nosniff = settings.SECURE_CONTENT_TYPE_NOSNIFF self.redirect = settings.SECURE_SSL_REDIRECT self.redirect_host = settings.SECURE_SSL_HOST self.redirect_exempt = [re.compile(r) for r in settings.SECURE_REDIRECT_EXEMPT] self.referrer_policy = settings.SECURE_REFERRER_POLICY self.cross_origin_opener_policy = settings.SECURE_CROSS_ORIGIN_OPENER_POLICY def process_request(self, request): path = request.path.lstrip("/") if (self.redirect and not request.is_secure() and not any(pattern.search(path) for pattern in self.redirect_exempt)): host = self.redirect_host or request.get_host() return HttpResponsePermanentRedirect( "https://%s%s" % (host, request.get_full_path()) ) def process_response(self, request, response): if (self.sts_seconds and request.is_secure() and 'Strict-Transport-Security' not in response): sts_header = "max-age=%s" % self.sts_seconds if self.sts_include_subdomains: sts_header = sts_header + "; includeSubDomains" if self.sts_preload: sts_header = sts_header + "; preload" response.headers['Strict-Transport-Security'] = sts_header if self.content_type_nosniff: response.headers.setdefault('X-Content-Type-Options', 'nosniff') if self.referrer_policy: # Support a comma-separated string or iterable of values to allow # fallback. response.headers.setdefault('Referrer-Policy', ','.join( [v.strip() for v in self.referrer_policy.split(',')] if isinstance(self.referrer_policy, str) else self.referrer_policy )) if self.cross_origin_opener_policy: response.setdefault( 'Cross-Origin-Opener-Policy', self.cross_origin_opener_policy, ) return response
e6ca4d6c766dd48427d815877f35dc40dd564abe17d5fc779e1aade9e7bdb0b9
""" Cross Site Request Forgery Middleware. This module provides a middleware that implements protection against request forgeries from other sites. """ import logging import string from collections import defaultdict from urllib.parse import urlparse from django.conf import settings from django.core.exceptions import DisallowedHost, ImproperlyConfigured from django.http import UnreadablePostError from django.http.request import HttpHeaders from django.urls import get_callable from django.utils.cache import patch_vary_headers from django.utils.crypto import constant_time_compare, get_random_string from django.utils.deprecation import MiddlewareMixin from django.utils.functional import cached_property from django.utils.http import is_same_domain from django.utils.log import log_response from django.utils.regex_helper import _lazy_re_compile logger = logging.getLogger('django.security.csrf') # This matches if any character is not in CSRF_ALLOWED_CHARS. invalid_token_chars_re = _lazy_re_compile('[^a-zA-Z0-9]') REASON_BAD_ORIGIN = "Origin checking failed - %s does not match any trusted origins." REASON_NO_REFERER = "Referer checking failed - no Referer." REASON_BAD_REFERER = "Referer checking failed - %s does not match any trusted origins." REASON_NO_CSRF_COOKIE = "CSRF cookie not set." REASON_CSRF_TOKEN_MISSING = 'CSRF token missing.' REASON_MALFORMED_REFERER = "Referer checking failed - Referer is malformed." REASON_INSECURE_REFERER = "Referer checking failed - Referer is insecure while host is secure." # The reason strings below are for passing to InvalidTokenFormat. They are # phrases without a subject because they can be in reference to either the CSRF # cookie or non-cookie token. REASON_INCORRECT_LENGTH = 'has incorrect length' REASON_INVALID_CHARACTERS = 'has invalid characters' CSRF_SECRET_LENGTH = 32 CSRF_TOKEN_LENGTH = 2 * CSRF_SECRET_LENGTH CSRF_ALLOWED_CHARS = string.ascii_letters + string.digits CSRF_SESSION_KEY = '_csrftoken' def _get_failure_view(): """Return the view to be used for CSRF rejections.""" return get_callable(settings.CSRF_FAILURE_VIEW) def _get_new_csrf_string(): return get_random_string(CSRF_SECRET_LENGTH, allowed_chars=CSRF_ALLOWED_CHARS) def _mask_cipher_secret(secret): """ Given a secret (assumed to be a string of CSRF_ALLOWED_CHARS), generate a token by adding a mask and applying it to the secret. """ mask = _get_new_csrf_string() chars = CSRF_ALLOWED_CHARS pairs = zip((chars.index(x) for x in secret), (chars.index(x) for x in mask)) cipher = ''.join(chars[(x + y) % len(chars)] for x, y in pairs) return mask + cipher def _unmask_cipher_token(token): """ Given a token (assumed to be a string of CSRF_ALLOWED_CHARS, of length CSRF_TOKEN_LENGTH, and that its first half is a mask), use it to decrypt the second half to produce the original secret. """ mask = token[:CSRF_SECRET_LENGTH] token = token[CSRF_SECRET_LENGTH:] chars = CSRF_ALLOWED_CHARS pairs = zip((chars.index(x) for x in token), (chars.index(x) for x in mask)) return ''.join(chars[x - y] for x, y in pairs) # Note negative values are ok def _get_new_csrf_token(): return _mask_cipher_secret(_get_new_csrf_string()) def get_token(request): """ Return the CSRF token required for a POST form. The token is an alphanumeric value. A new token is created if one is not already set. A side effect of calling this function is to make the csrf_protect decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie' header to the outgoing response. For this reason, you may need to use this function lazily, as is done by the csrf context processor. """ if "CSRF_COOKIE" not in request.META: csrf_secret = _get_new_csrf_string() request.META["CSRF_COOKIE"] = _mask_cipher_secret(csrf_secret) else: csrf_secret = _unmask_cipher_token(request.META["CSRF_COOKIE"]) # Since the cookie is being used, flag to send the cookie in # process_response() (even if the client already has it) in order to renew # the expiry timer. request.META['CSRF_COOKIE_NEEDS_UPDATE'] = True return _mask_cipher_secret(csrf_secret) def rotate_token(request): """ Change the CSRF token in use for a request - should be done on login for security purposes. """ request.META.update({ 'CSRF_COOKIE': _get_new_csrf_token(), 'CSRF_COOKIE_NEEDS_UPDATE': True, }) class InvalidTokenFormat(Exception): def __init__(self, reason): self.reason = reason def _sanitize_token(token): if len(token) not in (CSRF_TOKEN_LENGTH, CSRF_SECRET_LENGTH): raise InvalidTokenFormat(REASON_INCORRECT_LENGTH) # Make sure all characters are in CSRF_ALLOWED_CHARS. if invalid_token_chars_re.search(token): raise InvalidTokenFormat(REASON_INVALID_CHARACTERS) if len(token) == CSRF_SECRET_LENGTH: # Older Django versions set cookies to values of CSRF_SECRET_LENGTH # alphanumeric characters. For backwards compatibility, accept # such values as unmasked secrets. # It's easier to mask here and be consistent later, rather than add # different code paths in the checks, although that might be a tad more # efficient. return _mask_cipher_secret(token) return token def _does_token_match(request_csrf_token, csrf_token): # Assume both arguments are sanitized -- that is, strings of # length CSRF_TOKEN_LENGTH, all CSRF_ALLOWED_CHARS. return constant_time_compare( _unmask_cipher_token(request_csrf_token), _unmask_cipher_token(csrf_token), ) class RejectRequest(Exception): def __init__(self, reason): self.reason = reason class CsrfViewMiddleware(MiddlewareMixin): """ Require a present and correct csrfmiddlewaretoken for POST requests that have a CSRF cookie, and set an outgoing CSRF cookie. This middleware should be used in conjunction with the {% csrf_token %} template tag. """ @cached_property def csrf_trusted_origins_hosts(self): return [ urlparse(origin).netloc.lstrip('*') for origin in settings.CSRF_TRUSTED_ORIGINS ] @cached_property def allowed_origins_exact(self): return { origin for origin in settings.CSRF_TRUSTED_ORIGINS if '*' not in origin } @cached_property def allowed_origin_subdomains(self): """ A mapping of allowed schemes to list of allowed netlocs, where all subdomains of the netloc are allowed. """ allowed_origin_subdomains = defaultdict(list) for parsed in (urlparse(origin) for origin in settings.CSRF_TRUSTED_ORIGINS if '*' in origin): allowed_origin_subdomains[parsed.scheme].append(parsed.netloc.lstrip('*')) return allowed_origin_subdomains # The _accept and _reject methods currently only exist for the sake of the # requires_csrf_token decorator. def _accept(self, request): # Avoid checking the request twice by adding a custom attribute to # request. This will be relevant when both decorator and middleware # are used. request.csrf_processing_done = True return None def _reject(self, request, reason): response = _get_failure_view()(request, reason=reason) log_response( 'Forbidden (%s): %s', reason, request.path, response=response, request=request, logger=logger, ) return response def _get_token(self, request): if settings.CSRF_USE_SESSIONS: try: return request.session.get(CSRF_SESSION_KEY) except AttributeError: raise ImproperlyConfigured( 'CSRF_USE_SESSIONS is enabled, but request.session is not ' 'set. SessionMiddleware must appear before CsrfViewMiddleware ' 'in MIDDLEWARE.' ) else: try: cookie_token = request.COOKIES[settings.CSRF_COOKIE_NAME] except KeyError: return None # This can raise InvalidTokenFormat. csrf_token = _sanitize_token(cookie_token) if csrf_token != cookie_token: # Then the cookie token had length CSRF_SECRET_LENGTH, so flag # to replace it with the masked version. request.META['CSRF_COOKIE_NEEDS_UPDATE'] = True return csrf_token def _set_token(self, request, response): if settings.CSRF_USE_SESSIONS: if request.session.get(CSRF_SESSION_KEY) != request.META['CSRF_COOKIE']: request.session[CSRF_SESSION_KEY] = request.META['CSRF_COOKIE'] else: response.set_cookie( settings.CSRF_COOKIE_NAME, request.META['CSRF_COOKIE'], max_age=settings.CSRF_COOKIE_AGE, domain=settings.CSRF_COOKIE_DOMAIN, path=settings.CSRF_COOKIE_PATH, secure=settings.CSRF_COOKIE_SECURE, httponly=settings.CSRF_COOKIE_HTTPONLY, samesite=settings.CSRF_COOKIE_SAMESITE, ) # Set the Vary header since content varies with the CSRF cookie. patch_vary_headers(response, ('Cookie',)) def _origin_verified(self, request): request_origin = request.META['HTTP_ORIGIN'] try: good_host = request.get_host() except DisallowedHost: pass else: good_origin = '%s://%s' % ( 'https' if request.is_secure() else 'http', good_host, ) if request_origin == good_origin: return True if request_origin in self.allowed_origins_exact: return True try: parsed_origin = urlparse(request_origin) except ValueError: return False request_scheme = parsed_origin.scheme request_netloc = parsed_origin.netloc return any( is_same_domain(request_netloc, host) for host in self.allowed_origin_subdomains.get(request_scheme, ()) ) def _check_referer(self, request): referer = request.META.get('HTTP_REFERER') if referer is None: raise RejectRequest(REASON_NO_REFERER) try: referer = urlparse(referer) except ValueError: raise RejectRequest(REASON_MALFORMED_REFERER) # Make sure we have a valid URL for Referer. if '' in (referer.scheme, referer.netloc): raise RejectRequest(REASON_MALFORMED_REFERER) # Ensure that our Referer is also secure. if referer.scheme != 'https': raise RejectRequest(REASON_INSECURE_REFERER) if any( is_same_domain(referer.netloc, host) for host in self.csrf_trusted_origins_hosts ): return # Allow matching the configured cookie domain. good_referer = ( settings.SESSION_COOKIE_DOMAIN if settings.CSRF_USE_SESSIONS else settings.CSRF_COOKIE_DOMAIN ) if good_referer is None: # If no cookie domain is configured, allow matching the current # host:port exactly if it's permitted by ALLOWED_HOSTS. try: # request.get_host() includes the port. good_referer = request.get_host() except DisallowedHost: raise RejectRequest(REASON_BAD_REFERER % referer.geturl()) else: server_port = request.get_port() if server_port not in ('443', '80'): good_referer = '%s:%s' % (good_referer, server_port) if not is_same_domain(referer.netloc, good_referer): raise RejectRequest(REASON_BAD_REFERER % referer.geturl()) def _bad_token_message(self, reason, token_source): if token_source != 'POST': # Assume it is a settings.CSRF_HEADER_NAME value. header_name = HttpHeaders.parse_header_name(token_source) token_source = f'the {header_name!r} HTTP header' return f'CSRF token from {token_source} {reason}.' def _check_token(self, request): # Access csrf_token via self._get_token() as rotate_token() may have # been called by an authentication middleware during the # process_request() phase. try: csrf_token = self._get_token(request) except InvalidTokenFormat as exc: raise RejectRequest(f'CSRF cookie {exc.reason}.') if csrf_token is None: # No CSRF cookie. For POST requests, we insist on a CSRF cookie, # and in this way we can avoid all CSRF attacks, including login # CSRF. raise RejectRequest(REASON_NO_CSRF_COOKIE) # Check non-cookie token for match. request_csrf_token = '' if request.method == 'POST': try: request_csrf_token = request.POST.get('csrfmiddlewaretoken', '') except UnreadablePostError: # Handle a broken connection before we've completed reading the # POST data. process_view shouldn't raise any exceptions, so # we'll ignore and serve the user a 403 (assuming they're still # listening, which they probably aren't because of the error). pass if request_csrf_token == '': # Fall back to X-CSRFToken, to make things easier for AJAX, and # possible for PUT/DELETE. try: request_csrf_token = request.META[settings.CSRF_HEADER_NAME] except KeyError: raise RejectRequest(REASON_CSRF_TOKEN_MISSING) token_source = settings.CSRF_HEADER_NAME else: token_source = 'POST' try: request_csrf_token = _sanitize_token(request_csrf_token) except InvalidTokenFormat as exc: reason = self._bad_token_message(exc.reason, token_source) raise RejectRequest(reason) if not _does_token_match(request_csrf_token, csrf_token): reason = self._bad_token_message('incorrect', token_source) raise RejectRequest(reason) def process_request(self, request): try: csrf_token = self._get_token(request) except InvalidTokenFormat: csrf_token = _get_new_csrf_token() request.META["CSRF_COOKIE_NEEDS_UPDATE"] = True if csrf_token is not None: # Use same token next time. request.META['CSRF_COOKIE'] = csrf_token def process_view(self, request, callback, callback_args, callback_kwargs): if getattr(request, 'csrf_processing_done', False): return None # Wait until request.META["CSRF_COOKIE"] has been manipulated before # bailing out, so that get_token still works if getattr(callback, 'csrf_exempt', False): return None # Assume that anything not defined as 'safe' by RFC7231 needs protection if request.method in ('GET', 'HEAD', 'OPTIONS', 'TRACE'): return self._accept(request) if getattr(request, '_dont_enforce_csrf_checks', False): # Mechanism to turn off CSRF checks for test suite. It comes after # the creation of CSRF cookies, so that everything else continues # to work exactly the same (e.g. cookies are sent, etc.), but # before any branches that call the _reject method. return self._accept(request) # Reject the request if the Origin header doesn't match an allowed # value. if 'HTTP_ORIGIN' in request.META: if not self._origin_verified(request): return self._reject(request, REASON_BAD_ORIGIN % request.META['HTTP_ORIGIN']) elif request.is_secure(): # If the Origin header wasn't provided, reject HTTPS requests if # the Referer header doesn't match an allowed value. # # Suppose user visits http://example.com/ # An active network attacker (man-in-the-middle, MITM) sends a # POST form that targets https://example.com/detonate-bomb/ and # submits it via JavaScript. # # The attacker will need to provide a CSRF cookie and token, but # that's no problem for a MITM and the session-independent secret # we're using. So the MITM can circumvent the CSRF protection. This # is true for any HTTP connection, but anyone using HTTPS expects # better! For this reason, for https://example.com/ we need # additional protection that treats http://example.com/ as # completely untrusted. Under HTTPS, Barth et al. found that the # Referer header is missing for same-domain requests in only about # 0.2% of cases or less, so we can use strict Referer checking. try: self._check_referer(request) except RejectRequest as exc: return self._reject(request, exc.reason) try: self._check_token(request) except RejectRequest as exc: return self._reject(request, exc.reason) return self._accept(request) def process_response(self, request, response): if request.META.get('CSRF_COOKIE_NEEDS_UPDATE'): self._set_token(request, response) # Unset the flag to prevent _set_token() from being unnecessarily # called again in process_response() by other instances of # CsrfViewMiddleware. This can happen e.g. when both a decorator and # middleware are used. However, CSRF_COOKIE_NEEDS_UPDATE is still # respected in subsequent calls e.g. in case rotate_token() is # called in process_response() later by custom middleware but before # those subsequent calls. request.META['CSRF_COOKIE_NEEDS_UPDATE'] = False return response
216c26380a35f69dd84ac22b52fdd3dd8009064fc755cc09795e9ec6cfbad308
"""Functions for use in URLsconfs.""" from functools import partial from importlib import import_module from django.core.exceptions import ImproperlyConfigured from .resolvers import ( LocalePrefixPattern, RegexPattern, RoutePattern, URLPattern, URLResolver, ) def include(arg, namespace=None): app_name = None if isinstance(arg, tuple): # Callable returning a namespace hint. try: urlconf_module, app_name = arg except ValueError: if namespace: raise ImproperlyConfigured( 'Cannot override the namespace for a dynamic module that ' 'provides a namespace.' ) raise ImproperlyConfigured( 'Passing a %d-tuple to include() is not supported. Pass a ' '2-tuple containing the list of patterns and app_name, and ' 'provide the namespace argument to include() instead.' % len(arg) ) else: # No namespace hint - use manually provided namespace. urlconf_module = arg if isinstance(urlconf_module, str): urlconf_module = import_module(urlconf_module) patterns = getattr(urlconf_module, 'urlpatterns', urlconf_module) app_name = getattr(urlconf_module, 'app_name', app_name) if namespace and not app_name: raise ImproperlyConfigured( 'Specifying a namespace in include() without providing an app_name ' 'is not supported. Set the app_name attribute in the included ' 'module, or pass a 2-tuple containing the list of patterns and ' 'app_name instead.', ) namespace = namespace or app_name # Make sure the patterns can be iterated through (without this, some # testcases will break). if isinstance(patterns, (list, tuple)): for url_pattern in patterns: pattern = getattr(url_pattern, 'pattern', None) if isinstance(pattern, LocalePrefixPattern): raise ImproperlyConfigured( 'Using i18n_patterns in an included URLconf is not allowed.' ) return (urlconf_module, app_name, namespace) def _path(route, view, kwargs=None, name=None, Pattern=None): from django.views import View if isinstance(view, (list, tuple)): # For include(...) processing. pattern = Pattern(route, is_endpoint=False) urlconf_module, app_name, namespace = view return URLResolver( pattern, urlconf_module, kwargs, app_name=app_name, namespace=namespace, ) elif callable(view): pattern = Pattern(route, name=name, is_endpoint=True) return URLPattern(pattern, view, kwargs, name) elif isinstance(view, View): view_cls_name = view.__class__.__name__ raise TypeError( f'view must be a callable, pass {view_cls_name}.as_view(), not ' f'{view_cls_name}().' ) else: raise TypeError('view must be a callable or a list/tuple in the case of include().') path = partial(_path, Pattern=RoutePattern) re_path = partial(_path, Pattern=RegexPattern)
6aeb0ed9dedc20f915c97ad0f7453a910ce25b4b65f6b4c1068a91aba918050f
""" This module converts requested URLs to callback view functions. URLResolver is the main class here. Its resolve() method takes a URL (as a string) and returns a ResolverMatch object which provides access to all attributes of the resolved URL match. """ import functools import inspect import re import string from importlib import import_module from pickle import PicklingError from urllib.parse import quote from asgiref.local import Local from django.conf import settings from django.core.checks import Error, Warning from django.core.checks.urls import check_resolver from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist from django.utils.datastructures import MultiValueDict from django.utils.functional import cached_property from django.utils.http import RFC3986_SUBDELIMS, escape_leading_slashes from django.utils.regex_helper import _lazy_re_compile, normalize from django.utils.translation import get_language from .converters import get_converter from .exceptions import NoReverseMatch, Resolver404 from .utils import get_callable class ResolverMatch: def __init__(self, func, args, kwargs, url_name=None, app_names=None, namespaces=None, route=None, tried=None): self.func = func self.args = args self.kwargs = kwargs self.url_name = url_name self.route = route self.tried = tried # If a URLRegexResolver doesn't have a namespace or app_name, it passes # in an empty value. self.app_names = [x for x in app_names if x] if app_names else [] self.app_name = ':'.join(self.app_names) self.namespaces = [x for x in namespaces if x] if namespaces else [] self.namespace = ':'.join(self.namespaces) if not hasattr(func, '__name__'): # A class-based view self._func_path = func.__class__.__module__ + '.' + func.__class__.__name__ else: # A function-based view self._func_path = func.__module__ + '.' + func.__name__ view_path = url_name or self._func_path self.view_name = ':'.join(self.namespaces + [view_path]) def __getitem__(self, index): return (self.func, self.args, self.kwargs)[index] def __repr__(self): if isinstance(self.func, functools.partial): func = repr(self.func) else: func = self._func_path return ( 'ResolverMatch(func=%s, args=%r, kwargs=%r, url_name=%r, ' 'app_names=%r, namespaces=%r, route=%r)' % ( func, self.args, self.kwargs, self.url_name, self.app_names, self.namespaces, self.route, ) ) def __reduce_ex__(self, protocol): raise PicklingError(f'Cannot pickle {self.__class__.__qualname__}.') def get_resolver(urlconf=None): if urlconf is None: urlconf = settings.ROOT_URLCONF return _get_cached_resolver(urlconf) @functools.lru_cache(maxsize=None) def _get_cached_resolver(urlconf=None): return URLResolver(RegexPattern(r'^/'), urlconf) @functools.lru_cache(maxsize=None) def get_ns_resolver(ns_pattern, resolver, converters): # Build a namespaced resolver for the given parent URLconf pattern. # This makes it possible to have captured parameters in the parent # URLconf pattern. pattern = RegexPattern(ns_pattern) pattern.converters = dict(converters) ns_resolver = URLResolver(pattern, resolver.url_patterns) return URLResolver(RegexPattern(r'^/'), [ns_resolver]) class LocaleRegexDescriptor: def __init__(self, attr): self.attr = attr def __get__(self, instance, cls=None): """ Return a compiled regular expression based on the active language. """ if instance is None: return self # As a performance optimization, if the given regex string is a regular # string (not a lazily-translated string proxy), compile it once and # avoid per-language compilation. pattern = getattr(instance, self.attr) if isinstance(pattern, str): instance.__dict__['regex'] = instance._compile(pattern) return instance.__dict__['regex'] language_code = get_language() if language_code not in instance._regex_dict: instance._regex_dict[language_code] = instance._compile(str(pattern)) return instance._regex_dict[language_code] class CheckURLMixin: def describe(self): """ Format the URL pattern for display in warning messages. """ description = "'{}'".format(self) if self.name: description += " [name='{}']".format(self.name) return description def _check_pattern_startswith_slash(self): """ Check that the pattern does not begin with a forward slash. """ regex_pattern = self.regex.pattern if not settings.APPEND_SLASH: # Skip check as it can be useful to start a URL pattern with a slash # when APPEND_SLASH=False. return [] if regex_pattern.startswith(('/', '^/', '^\\/')) and not regex_pattern.endswith('/'): warning = Warning( "Your URL pattern {} has a route beginning with a '/'. Remove this " "slash as it is unnecessary. If this pattern is targeted in an " "include(), ensure the include() pattern has a trailing '/'.".format( self.describe() ), id="urls.W002", ) return [warning] else: return [] class RegexPattern(CheckURLMixin): regex = LocaleRegexDescriptor('_regex') def __init__(self, regex, name=None, is_endpoint=False): self._regex = regex self._regex_dict = {} self._is_endpoint = is_endpoint self.name = name self.converters = {} def match(self, path): match = self.regex.search(path) if match: # If there are any named groups, use those as kwargs, ignoring # non-named groups. Otherwise, pass all non-named arguments as # positional arguments. kwargs = match.groupdict() args = () if kwargs else match.groups() kwargs = {k: v for k, v in kwargs.items() if v is not None} return path[match.end():], args, kwargs return None def check(self): warnings = [] warnings.extend(self._check_pattern_startswith_slash()) if not self._is_endpoint: warnings.extend(self._check_include_trailing_dollar()) return warnings def _check_include_trailing_dollar(self): regex_pattern = self.regex.pattern if regex_pattern.endswith('$') and not regex_pattern.endswith(r'\$'): return [Warning( "Your URL pattern {} uses include with a route ending with a '$'. " "Remove the dollar from the route to avoid problems including " "URLs.".format(self.describe()), id='urls.W001', )] else: return [] def _compile(self, regex): """Compile and return the given regular expression.""" try: return re.compile(regex) except re.error as e: raise ImproperlyConfigured( '"%s" is not a valid regular expression: %s' % (regex, e) ) from e def __str__(self): return str(self._regex) _PATH_PARAMETER_COMPONENT_RE = _lazy_re_compile( r'<(?:(?P<converter>[^>:]+):)?(?P<parameter>[^>]+)>' ) def _route_to_regex(route, is_endpoint=False): """ Convert a path pattern into a regular expression. Return the regular expression and a dictionary mapping the capture names to the converters. For example, 'foo/<int:pk>' returns '^foo\\/(?P<pk>[0-9]+)' and {'pk': <django.urls.converters.IntConverter>}. """ original_route = route parts = ['^'] converters = {} while True: match = _PATH_PARAMETER_COMPONENT_RE.search(route) if not match: parts.append(re.escape(route)) break elif not set(match.group()).isdisjoint(string.whitespace): raise ImproperlyConfigured( "URL route '%s' cannot contain whitespace in angle brackets " "<…>." % original_route ) parts.append(re.escape(route[:match.start()])) route = route[match.end():] parameter = match['parameter'] if not parameter.isidentifier(): raise ImproperlyConfigured( "URL route '%s' uses parameter name %r which isn't a valid " "Python identifier." % (original_route, parameter) ) raw_converter = match['converter'] if raw_converter is None: # If a converter isn't specified, the default is `str`. raw_converter = 'str' try: converter = get_converter(raw_converter) except KeyError as e: raise ImproperlyConfigured( 'URL route %r uses invalid converter %r.' % (original_route, raw_converter) ) from e converters[parameter] = converter parts.append('(?P<' + parameter + '>' + converter.regex + ')') if is_endpoint: parts.append('$') return ''.join(parts), converters class RoutePattern(CheckURLMixin): regex = LocaleRegexDescriptor('_route') def __init__(self, route, name=None, is_endpoint=False): self._route = route self._regex_dict = {} self._is_endpoint = is_endpoint self.name = name self.converters = _route_to_regex(str(route), is_endpoint)[1] def match(self, path): match = self.regex.search(path) if match: # RoutePattern doesn't allow non-named groups so args are ignored. kwargs = match.groupdict() for key, value in kwargs.items(): converter = self.converters[key] try: kwargs[key] = converter.to_python(value) except ValueError: return None return path[match.end():], (), kwargs return None def check(self): warnings = self._check_pattern_startswith_slash() route = self._route if '(?P<' in route or route.startswith('^') or route.endswith('$'): warnings.append(Warning( "Your URL pattern {} has a route that contains '(?P<', begins " "with a '^', or ends with a '$'. This was likely an oversight " "when migrating to django.urls.path().".format(self.describe()), id='2_0.W001', )) return warnings def _compile(self, route): return re.compile(_route_to_regex(route, self._is_endpoint)[0]) def __str__(self): return str(self._route) class LocalePrefixPattern: def __init__(self, prefix_default_language=True): self.prefix_default_language = prefix_default_language self.converters = {} @property def regex(self): # This is only used by reverse() and cached in _reverse_dict. return re.compile(self.language_prefix) @property def language_prefix(self): language_code = get_language() or settings.LANGUAGE_CODE if language_code == settings.LANGUAGE_CODE and not self.prefix_default_language: return '' else: return '%s/' % language_code def match(self, path): language_prefix = self.language_prefix if path.startswith(language_prefix): return path[len(language_prefix):], (), {} return None def check(self): return [] def describe(self): return "'{}'".format(self) def __str__(self): return self.language_prefix class URLPattern: def __init__(self, pattern, callback, default_args=None, name=None): self.pattern = pattern self.callback = callback # the view self.default_args = default_args or {} self.name = name def __repr__(self): return '<%s %s>' % (self.__class__.__name__, self.pattern.describe()) def check(self): warnings = self._check_pattern_name() warnings.extend(self.pattern.check()) warnings.extend(self._check_callback()) return warnings def _check_pattern_name(self): """ Check that the pattern name does not contain a colon. """ if self.pattern.name is not None and ":" in self.pattern.name: warning = Warning( "Your URL pattern {} has a name including a ':'. Remove the colon, to " "avoid ambiguous namespace references.".format(self.pattern.describe()), id="urls.W003", ) return [warning] else: return [] def _check_callback(self): from django.views import View view = self.callback if inspect.isclass(view) and issubclass(view, View): return [Error( 'Your URL pattern %s has an invalid view, pass %s.as_view() ' 'instead of %s.' % ( self.pattern.describe(), view.__name__, view.__name__, ), id='urls.E009', )] return [] def resolve(self, path): match = self.pattern.match(path) if match: new_path, args, kwargs = match # Pass any extra_kwargs as **kwargs. kwargs.update(self.default_args) return ResolverMatch(self.callback, args, kwargs, self.pattern.name, route=str(self.pattern)) @cached_property def lookup_str(self): """ A string that identifies the view (e.g. 'path.to.view_function' or 'path.to.ClassBasedView'). """ callback = self.callback if isinstance(callback, functools.partial): callback = callback.func if hasattr(callback, 'view_class'): callback = callback.view_class elif not hasattr(callback, '__name__'): return callback.__module__ + "." + callback.__class__.__name__ return callback.__module__ + "." + callback.__qualname__ class URLResolver: def __init__(self, pattern, urlconf_name, default_kwargs=None, app_name=None, namespace=None): self.pattern = pattern # urlconf_name is the dotted Python path to the module defining # urlpatterns. It may also be an object with an urlpatterns attribute # or urlpatterns itself. self.urlconf_name = urlconf_name self.callback = None self.default_kwargs = default_kwargs or {} self.namespace = namespace self.app_name = app_name self._reverse_dict = {} self._namespace_dict = {} self._app_dict = {} # set of dotted paths to all functions and classes that are used in # urlpatterns self._callback_strs = set() self._populated = False self._local = Local() def __repr__(self): if isinstance(self.urlconf_name, list) and self.urlconf_name: # Don't bother to output the whole list, it can be huge urlconf_repr = '<%s list>' % self.urlconf_name[0].__class__.__name__ else: urlconf_repr = repr(self.urlconf_name) return '<%s %s (%s:%s) %s>' % ( self.__class__.__name__, urlconf_repr, self.app_name, self.namespace, self.pattern.describe(), ) def check(self): messages = [] for pattern in self.url_patterns: messages.extend(check_resolver(pattern)) messages.extend(self._check_custom_error_handlers()) return messages or self.pattern.check() def _check_custom_error_handlers(self): messages = [] # All handlers take (request, exception) arguments except handler500 # which takes (request). for status_code, num_parameters in [(400, 2), (403, 2), (404, 2), (500, 1)]: try: handler = self.resolve_error_handler(status_code) except (ImportError, ViewDoesNotExist) as e: path = getattr(self.urlconf_module, 'handler%s' % status_code) msg = ( "The custom handler{status_code} view '{path}' could not be imported." ).format(status_code=status_code, path=path) messages.append(Error(msg, hint=str(e), id='urls.E008')) continue signature = inspect.signature(handler) args = [None] * num_parameters try: signature.bind(*args) except TypeError: msg = ( "The custom handler{status_code} view '{path}' does not " "take the correct number of arguments ({args})." ).format( status_code=status_code, path=handler.__module__ + '.' + handler.__qualname__, args='request, exception' if num_parameters == 2 else 'request', ) messages.append(Error(msg, id='urls.E007')) return messages def _populate(self): # Short-circuit if called recursively in this thread to prevent # infinite recursion. Concurrent threads may call this at the same # time and will need to continue, so set 'populating' on a # thread-local variable. if getattr(self._local, 'populating', False): return try: self._local.populating = True lookups = MultiValueDict() namespaces = {} apps = {} language_code = get_language() for url_pattern in reversed(self.url_patterns): p_pattern = url_pattern.pattern.regex.pattern if p_pattern.startswith('^'): p_pattern = p_pattern[1:] if isinstance(url_pattern, URLPattern): self._callback_strs.add(url_pattern.lookup_str) bits = normalize(url_pattern.pattern.regex.pattern) lookups.appendlist( url_pattern.callback, (bits, p_pattern, url_pattern.default_args, url_pattern.pattern.converters) ) if url_pattern.name is not None: lookups.appendlist( url_pattern.name, (bits, p_pattern, url_pattern.default_args, url_pattern.pattern.converters) ) else: # url_pattern is a URLResolver. url_pattern._populate() if url_pattern.app_name: apps.setdefault(url_pattern.app_name, []).append(url_pattern.namespace) namespaces[url_pattern.namespace] = (p_pattern, url_pattern) else: for name in url_pattern.reverse_dict: for matches, pat, defaults, converters in url_pattern.reverse_dict.getlist(name): new_matches = normalize(p_pattern + pat) lookups.appendlist( name, ( new_matches, p_pattern + pat, {**defaults, **url_pattern.default_kwargs}, {**self.pattern.converters, **url_pattern.pattern.converters, **converters} ) ) for namespace, (prefix, sub_pattern) in url_pattern.namespace_dict.items(): current_converters = url_pattern.pattern.converters sub_pattern.pattern.converters.update(current_converters) namespaces[namespace] = (p_pattern + prefix, sub_pattern) for app_name, namespace_list in url_pattern.app_dict.items(): apps.setdefault(app_name, []).extend(namespace_list) self._callback_strs.update(url_pattern._callback_strs) self._namespace_dict[language_code] = namespaces self._app_dict[language_code] = apps self._reverse_dict[language_code] = lookups self._populated = True finally: self._local.populating = False @property def reverse_dict(self): language_code = get_language() if language_code not in self._reverse_dict: self._populate() return self._reverse_dict[language_code] @property def namespace_dict(self): language_code = get_language() if language_code not in self._namespace_dict: self._populate() return self._namespace_dict[language_code] @property def app_dict(self): language_code = get_language() if language_code not in self._app_dict: self._populate() return self._app_dict[language_code] @staticmethod def _extend_tried(tried, pattern, sub_tried=None): if sub_tried is None: tried.append([pattern]) else: tried.extend([pattern, *t] for t in sub_tried) @staticmethod def _join_route(route1, route2): """Join two routes, without the starting ^ in the second route.""" if not route1: return route2 if route2.startswith('^'): route2 = route2[1:] return route1 + route2 def _is_callback(self, name): if not self._populated: self._populate() return name in self._callback_strs def resolve(self, path): path = str(path) # path may be a reverse_lazy object tried = [] match = self.pattern.match(path) if match: new_path, args, kwargs = match for pattern in self.url_patterns: try: sub_match = pattern.resolve(new_path) except Resolver404 as e: self._extend_tried(tried, pattern, e.args[0].get('tried')) else: if sub_match: # Merge captured arguments in match with submatch sub_match_dict = {**kwargs, **self.default_kwargs} # Update the sub_match_dict with the kwargs from the sub_match. sub_match_dict.update(sub_match.kwargs) # If there are *any* named groups, ignore all non-named groups. # Otherwise, pass all non-named arguments as positional arguments. sub_match_args = sub_match.args if not sub_match_dict: sub_match_args = args + sub_match.args current_route = '' if isinstance(pattern, URLPattern) else str(pattern.pattern) self._extend_tried(tried, pattern, sub_match.tried) return ResolverMatch( sub_match.func, sub_match_args, sub_match_dict, sub_match.url_name, [self.app_name] + sub_match.app_names, [self.namespace] + sub_match.namespaces, self._join_route(current_route, sub_match.route), tried, ) tried.append([pattern]) raise Resolver404({'tried': tried, 'path': new_path}) raise Resolver404({'path': path}) @cached_property def urlconf_module(self): if isinstance(self.urlconf_name, str): return import_module(self.urlconf_name) else: return self.urlconf_name @cached_property def url_patterns(self): # urlconf_module might be a valid set of patterns, so we default to it patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module) try: iter(patterns) except TypeError as e: msg = ( "The included URLconf '{name}' does not appear to have " "any patterns in it. If you see the 'urlpatterns' variable " "with valid patterns in the file then the issue is probably " "caused by a circular import." ) raise ImproperlyConfigured(msg.format(name=self.urlconf_name)) from e return patterns def resolve_error_handler(self, view_type): callback = getattr(self.urlconf_module, 'handler%s' % view_type, None) if not callback: # No handler specified in file; use lazy import, since # django.conf.urls imports this file. from django.conf import urls callback = getattr(urls, 'handler%s' % view_type) return get_callable(callback) def reverse(self, lookup_view, *args, **kwargs): return self._reverse_with_prefix(lookup_view, '', *args, **kwargs) def _reverse_with_prefix(self, lookup_view, _prefix, *args, **kwargs): if args and kwargs: raise ValueError("Don't mix *args and **kwargs in call to reverse()!") if not self._populated: self._populate() possibilities = self.reverse_dict.getlist(lookup_view) for possibility, pattern, defaults, converters in possibilities: for result, params in possibility: if args: if len(args) != len(params): continue candidate_subs = dict(zip(params, args)) else: if set(kwargs).symmetric_difference(params).difference(defaults): continue if any(kwargs.get(k, v) != v for k, v in defaults.items()): continue candidate_subs = kwargs # Convert the candidate subs to text using Converter.to_url(). text_candidate_subs = {} match = True for k, v in candidate_subs.items(): if k in converters: try: text_candidate_subs[k] = converters[k].to_url(v) except ValueError: match = False break else: text_candidate_subs[k] = str(v) if not match: continue # WSGI provides decoded URLs, without %xx escapes, and the URL # resolver operates on such URLs. First substitute arguments # without quoting to build a decoded URL and look for a match. # Then, if we have a match, redo the substitution with quoted # arguments in order to return a properly encoded URL. candidate_pat = _prefix.replace('%', '%%') + result if re.search('^%s%s' % (re.escape(_prefix), pattern), candidate_pat % text_candidate_subs): # safe characters from `pchar` definition of RFC 3986 url = quote(candidate_pat % text_candidate_subs, safe=RFC3986_SUBDELIMS + '/~:@') # Don't allow construction of scheme relative urls. return escape_leading_slashes(url) # lookup_view can be URL name or callable, but callables are not # friendly in error messages. m = getattr(lookup_view, '__module__', None) n = getattr(lookup_view, '__name__', None) if m is not None and n is not None: lookup_view_s = "%s.%s" % (m, n) else: lookup_view_s = lookup_view patterns = [pattern for (_, pattern, _, _) in possibilities] if patterns: if args: arg_msg = "arguments '%s'" % (args,) elif kwargs: arg_msg = "keyword arguments '%s'" % kwargs else: arg_msg = "no arguments" msg = ( "Reverse for '%s' with %s not found. %d pattern(s) tried: %s" % (lookup_view_s, arg_msg, len(patterns), patterns) ) else: msg = ( "Reverse for '%(view)s' not found. '%(view)s' is not " "a valid view function or pattern name." % {'view': lookup_view_s} ) raise NoReverseMatch(msg)
a260984e03c7ae94089b14100b7b847264648428087bd20a0531b6a565a24fe7
from django.core.exceptions import ValidationError from django.forms import Form from django.forms.fields import BooleanField, IntegerField from django.forms.utils import ErrorList from django.forms.widgets import CheckboxInput, HiddenInput, NumberInput from django.utils.functional import cached_property from django.utils.html import html_safe from django.utils.safestring import mark_safe from django.utils.translation import gettext_lazy as _, ngettext __all__ = ('BaseFormSet', 'formset_factory', 'all_valid') # special field names TOTAL_FORM_COUNT = 'TOTAL_FORMS' INITIAL_FORM_COUNT = 'INITIAL_FORMS' MIN_NUM_FORM_COUNT = 'MIN_NUM_FORMS' MAX_NUM_FORM_COUNT = 'MAX_NUM_FORMS' ORDERING_FIELD_NAME = 'ORDER' DELETION_FIELD_NAME = 'DELETE' # default minimum number of forms in a formset DEFAULT_MIN_NUM = 0 # default maximum number of forms in a formset, to prevent memory exhaustion DEFAULT_MAX_NUM = 1000 class ManagementForm(Form): """ Keep track of how many form instances are displayed on the page. If adding new forms via JavaScript, you should increment the count field of this form as well. """ def __init__(self, *args, **kwargs): self.base_fields[TOTAL_FORM_COUNT] = IntegerField(widget=HiddenInput) self.base_fields[INITIAL_FORM_COUNT] = IntegerField(widget=HiddenInput) # MIN_NUM_FORM_COUNT and MAX_NUM_FORM_COUNT are output with the rest of # the management form, but only for the convenience of client-side # code. The POST value of them returned from the client is not checked. self.base_fields[MIN_NUM_FORM_COUNT] = IntegerField(required=False, widget=HiddenInput) self.base_fields[MAX_NUM_FORM_COUNT] = IntegerField(required=False, widget=HiddenInput) super().__init__(*args, **kwargs) def clean(self): cleaned_data = super().clean() # When the management form is invalid, we don't know how many forms # were submitted. cleaned_data.setdefault(TOTAL_FORM_COUNT, 0) cleaned_data.setdefault(INITIAL_FORM_COUNT, 0) return cleaned_data @html_safe class BaseFormSet: """ A collection of instances of the same Form class. """ deletion_widget = CheckboxInput ordering_widget = NumberInput default_error_messages = { 'missing_management_form': _( 'ManagementForm data is missing or has been tampered with. Missing fields: ' '%(field_names)s. You may need to file a bug report if the issue persists.' ), } def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None, initial=None, error_class=ErrorList, form_kwargs=None, error_messages=None): self.is_bound = data is not None or files is not None self.prefix = prefix or self.get_default_prefix() self.auto_id = auto_id self.data = data or {} self.files = files or {} self.initial = initial self.form_kwargs = form_kwargs or {} self.error_class = error_class self._errors = None self._non_form_errors = None messages = {} for cls in reversed(type(self).__mro__): messages.update(getattr(cls, 'default_error_messages', {})) if error_messages is not None: messages.update(error_messages) self.error_messages = messages def __str__(self): return self.as_table() def __iter__(self): """Yield the forms in the order they should be rendered.""" return iter(self.forms) def __getitem__(self, index): """Return the form at the given index, based on the rendering order.""" return self.forms[index] def __len__(self): return len(self.forms) def __bool__(self): """ Return True since all formsets have a management form which is not included in the length. """ return True @cached_property def management_form(self): """Return the ManagementForm instance for this FormSet.""" if self.is_bound: form = ManagementForm(self.data, auto_id=self.auto_id, prefix=self.prefix) form.full_clean() else: form = ManagementForm(auto_id=self.auto_id, prefix=self.prefix, initial={ TOTAL_FORM_COUNT: self.total_form_count(), INITIAL_FORM_COUNT: self.initial_form_count(), MIN_NUM_FORM_COUNT: self.min_num, MAX_NUM_FORM_COUNT: self.max_num }) return form def total_form_count(self): """Return the total number of forms in this FormSet.""" if self.is_bound: # return absolute_max if it is lower than the actual total form # count in the data; this is DoS protection to prevent clients # from forcing the server to instantiate arbitrary numbers of # forms return min(self.management_form.cleaned_data[TOTAL_FORM_COUNT], self.absolute_max) else: initial_forms = self.initial_form_count() total_forms = max(initial_forms, self.min_num) + self.extra # Allow all existing related objects/inlines to be displayed, # but don't allow extra beyond max_num. if initial_forms > self.max_num >= 0: total_forms = initial_forms elif total_forms > self.max_num >= 0: total_forms = self.max_num return total_forms def initial_form_count(self): """Return the number of forms that are required in this FormSet.""" if self.is_bound: return self.management_form.cleaned_data[INITIAL_FORM_COUNT] else: # Use the length of the initial data if it's there, 0 otherwise. initial_forms = len(self.initial) if self.initial else 0 return initial_forms @cached_property def forms(self): """Instantiate forms at first property access.""" # DoS protection is included in total_form_count() return [ self._construct_form(i, **self.get_form_kwargs(i)) for i in range(self.total_form_count()) ] def get_form_kwargs(self, index): """ Return additional keyword arguments for each individual formset form. index will be None if the form being constructed is a new empty form. """ return self.form_kwargs.copy() def _construct_form(self, i, **kwargs): """Instantiate and return the i-th form instance in a formset.""" defaults = { 'auto_id': self.auto_id, 'prefix': self.add_prefix(i), 'error_class': self.error_class, # Don't render the HTML 'required' attribute as it may cause # incorrect validation for extra, optional, and deleted # forms in the formset. 'use_required_attribute': False, } if self.is_bound: defaults['data'] = self.data defaults['files'] = self.files if self.initial and 'initial' not in kwargs: try: defaults['initial'] = self.initial[i] except IndexError: pass # Allow extra forms to be empty, unless they're part of # the minimum forms. if i >= self.initial_form_count() and i >= self.min_num: defaults['empty_permitted'] = True defaults.update(kwargs) form = self.form(**defaults) self.add_fields(form, i) return form @property def initial_forms(self): """Return a list of all the initial forms in this formset.""" return self.forms[:self.initial_form_count()] @property def extra_forms(self): """Return a list of all the extra forms in this formset.""" return self.forms[self.initial_form_count():] @property def empty_form(self): form = self.form( auto_id=self.auto_id, prefix=self.add_prefix('__prefix__'), empty_permitted=True, use_required_attribute=False, **self.get_form_kwargs(None) ) self.add_fields(form, None) return form @property def cleaned_data(self): """ Return a list of form.cleaned_data dicts for every form in self.forms. """ if not self.is_valid(): raise AttributeError("'%s' object has no attribute 'cleaned_data'" % self.__class__.__name__) return [form.cleaned_data for form in self.forms] @property def deleted_forms(self): """Return a list of forms that have been marked for deletion.""" if not self.is_valid() or not self.can_delete: return [] # construct _deleted_form_indexes which is just a list of form indexes # that have had their deletion widget set to True if not hasattr(self, '_deleted_form_indexes'): self._deleted_form_indexes = [] for i, form in enumerate(self.forms): # if this is an extra form and hasn't changed, don't consider it if i >= self.initial_form_count() and not form.has_changed(): continue if self._should_delete_form(form): self._deleted_form_indexes.append(i) return [self.forms[i] for i in self._deleted_form_indexes] @property def ordered_forms(self): """ Return a list of form in the order specified by the incoming data. Raise an AttributeError if ordering is not allowed. """ if not self.is_valid() or not self.can_order: raise AttributeError("'%s' object has no attribute 'ordered_forms'" % self.__class__.__name__) # Construct _ordering, which is a list of (form_index, order_field_value) # tuples. After constructing this list, we'll sort it by order_field_value # so we have a way to get to the form indexes in the order specified # by the form data. if not hasattr(self, '_ordering'): self._ordering = [] for i, form in enumerate(self.forms): # if this is an extra form and hasn't changed, don't consider it if i >= self.initial_form_count() and not form.has_changed(): continue # don't add data marked for deletion to self.ordered_data if self.can_delete and self._should_delete_form(form): continue self._ordering.append((i, form.cleaned_data[ORDERING_FIELD_NAME])) # After we're done populating self._ordering, sort it. # A sort function to order things numerically ascending, but # None should be sorted below anything else. Allowing None as # a comparison value makes it so we can leave ordering fields # blank. def compare_ordering_key(k): if k[1] is None: return (1, 0) # +infinity, larger than any number return (0, k[1]) self._ordering.sort(key=compare_ordering_key) # Return a list of form.cleaned_data dicts in the order specified by # the form data. return [self.forms[i[0]] for i in self._ordering] @classmethod def get_default_prefix(cls): return 'form' @classmethod def get_deletion_widget(cls): return cls.deletion_widget @classmethod def get_ordering_widget(cls): return cls.ordering_widget def non_form_errors(self): """ Return an ErrorList of errors that aren't associated with a particular form -- i.e., from formset.clean(). Return an empty ErrorList if there are none. """ if self._non_form_errors is None: self.full_clean() return self._non_form_errors @property def errors(self): """Return a list of form.errors for every form in self.forms.""" if self._errors is None: self.full_clean() return self._errors def total_error_count(self): """Return the number of errors across all forms in the formset.""" return len(self.non_form_errors()) +\ sum(len(form_errors) for form_errors in self.errors) def _should_delete_form(self, form): """Return whether or not the form was marked for deletion.""" return form.cleaned_data.get(DELETION_FIELD_NAME, False) def is_valid(self): """Return True if every form in self.forms is valid.""" if not self.is_bound: return False # Accessing errors triggers a full clean the first time only. self.errors # List comprehension ensures is_valid() is called for all forms. # Forms due to be deleted shouldn't cause the formset to be invalid. forms_valid = all([ form.is_valid() for form in self.forms if not (self.can_delete and self._should_delete_form(form)) ]) return forms_valid and not self.non_form_errors() def full_clean(self): """ Clean all of self.data and populate self._errors and self._non_form_errors. """ self._errors = [] self._non_form_errors = self.error_class(error_class='nonform') empty_forms_count = 0 if not self.is_bound: # Stop further processing. return if not self.management_form.is_valid(): error = ValidationError( self.error_messages['missing_management_form'], params={ 'field_names': ', '.join( self.management_form.add_prefix(field_name) for field_name in self.management_form.errors ), }, code='missing_management_form', ) self._non_form_errors.append(error) for i, form in enumerate(self.forms): # Empty forms are unchanged forms beyond those with initial data. if not form.has_changed() and i >= self.initial_form_count(): empty_forms_count += 1 # Accessing errors calls full_clean() if necessary. # _should_delete_form() requires cleaned_data. form_errors = form.errors if self.can_delete and self._should_delete_form(form): continue self._errors.append(form_errors) try: if (self.validate_max and self.total_form_count() - len(self.deleted_forms) > self.max_num) or \ self.management_form.cleaned_data[TOTAL_FORM_COUNT] > self.absolute_max: raise ValidationError(ngettext( "Please submit at most %d form.", "Please submit at most %d forms.", self.max_num) % self.max_num, code='too_many_forms', ) if (self.validate_min and self.total_form_count() - len(self.deleted_forms) - empty_forms_count < self.min_num): raise ValidationError(ngettext( "Please submit at least %d form.", "Please submit at least %d forms.", self.min_num) % self.min_num, code='too_few_forms') # Give self.clean() a chance to do cross-form validation. self.clean() except ValidationError as e: self._non_form_errors = self.error_class( e.error_list, error_class='nonform' ) def clean(self): """ Hook for doing any extra formset-wide cleaning after Form.clean() has been called on every form. Any ValidationError raised by this method will not be associated with a particular form; it will be accessible via formset.non_form_errors() """ pass def has_changed(self): """Return True if data in any form differs from initial.""" return any(form.has_changed() for form in self) def add_fields(self, form, index): """A hook for adding extra fields on to each form instance.""" initial_form_count = self.initial_form_count() if self.can_order: # Only pre-fill the ordering field for initial forms. if index is not None and index < initial_form_count: form.fields[ORDERING_FIELD_NAME] = IntegerField( label=_('Order'), initial=index + 1, required=False, widget=self.get_ordering_widget(), ) else: form.fields[ORDERING_FIELD_NAME] = IntegerField( label=_('Order'), required=False, widget=self.get_ordering_widget(), ) if self.can_delete and (self.can_delete_extra or index < initial_form_count): form.fields[DELETION_FIELD_NAME] = BooleanField( label=_('Delete'), required=False, widget=self.get_deletion_widget(), ) def add_prefix(self, index): return '%s-%s' % (self.prefix, index) def is_multipart(self): """ Return True if the formset needs to be multipart, i.e. it has FileInput, or False otherwise. """ if self.forms: return self.forms[0].is_multipart() else: return self.empty_form.is_multipart() @property def media(self): # All the forms on a FormSet are the same, so you only need to # interrogate the first form for media. if self.forms: return self.forms[0].media else: return self.empty_form.media def as_table(self): "Return this formset rendered as HTML <tr>s -- excluding the <table></table>." # XXX: there is no semantic division between forms here, there # probably should be. It might make sense to render each form as a # table row with each field as a td. forms = ' '.join(form.as_table() for form in self) return mark_safe(str(self.management_form) + '\n' + forms) def as_p(self): "Return this formset rendered as HTML <p>s." forms = ' '.join(form.as_p() for form in self) return mark_safe(str(self.management_form) + '\n' + forms) def as_ul(self): "Return this formset rendered as HTML <li>s." forms = ' '.join(form.as_ul() for form in self) return mark_safe(str(self.management_form) + '\n' + forms) def formset_factory(form, formset=BaseFormSet, extra=1, can_order=False, can_delete=False, max_num=None, validate_max=False, min_num=None, validate_min=False, absolute_max=None, can_delete_extra=True): """Return a FormSet for the given form class.""" if min_num is None: min_num = DEFAULT_MIN_NUM if max_num is None: max_num = DEFAULT_MAX_NUM # absolute_max is a hard limit on forms instantiated, to prevent # memory-exhaustion attacks. Default to max_num + DEFAULT_MAX_NUM # (which is 2 * DEFAULT_MAX_NUM if max_num is None in the first place). if absolute_max is None: absolute_max = max_num + DEFAULT_MAX_NUM if max_num > absolute_max: raise ValueError( "'absolute_max' must be greater or equal to 'max_num'." ) attrs = { 'form': form, 'extra': extra, 'can_order': can_order, 'can_delete': can_delete, 'can_delete_extra': can_delete_extra, 'min_num': min_num, 'max_num': max_num, 'absolute_max': absolute_max, 'validate_min': validate_min, 'validate_max': validate_max, } return type(form.__name__ + 'FormSet', (formset,), attrs) def all_valid(formsets): """Validate every formset and return True if all are valid.""" # List comprehension ensures is_valid() is called for all formsets. return all([formset.is_valid() for formset in formsets])
af0a60022cc23b0a5473d7a8538ee568fbf189458688968ddc67c4941fac34b6
""" HTML Widget classes """ import copy import datetime import warnings from collections import defaultdict from itertools import chain from django.forms.utils import to_current_timezone from django.templatetags.static import static from django.utils import formats from django.utils.datastructures import OrderedSet from django.utils.dates import MONTHS from django.utils.formats import get_format from django.utils.html import format_html, html_safe from django.utils.regex_helper import _lazy_re_compile from django.utils.safestring import mark_safe from django.utils.topological_sort import ( CyclicDependencyError, stable_topological_sort, ) from django.utils.translation import gettext_lazy as _ from .renderers import get_default_renderer __all__ = ( 'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'NumberInput', 'EmailInput', 'URLInput', 'PasswordInput', 'HiddenInput', 'MultipleHiddenInput', 'FileInput', 'ClearableFileInput', 'Textarea', 'DateInput', 'DateTimeInput', 'TimeInput', 'CheckboxInput', 'Select', 'NullBooleanSelect', 'SelectMultiple', 'RadioSelect', 'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget', 'SplitHiddenDateTimeWidget', 'SelectDateWidget', ) MEDIA_TYPES = ('css', 'js') class MediaOrderConflictWarning(RuntimeWarning): pass @html_safe class Media: def __init__(self, media=None, css=None, js=None): if media is not None: css = getattr(media, 'css', {}) js = getattr(media, 'js', []) else: if css is None: css = {} if js is None: js = [] self._css_lists = [css] self._js_lists = [js] def __repr__(self): return 'Media(css=%r, js=%r)' % (self._css, self._js) def __str__(self): return self.render() @property def _css(self): css = defaultdict(list) for css_list in self._css_lists: for medium, sublist in css_list.items(): css[medium].append(sublist) return {medium: self.merge(*lists) for medium, lists in css.items()} @property def _js(self): return self.merge(*self._js_lists) def render(self): return mark_safe('\n'.join(chain.from_iterable(getattr(self, 'render_' + name)() for name in MEDIA_TYPES))) def render_js(self): return [ format_html( '<script src="{}"></script>', self.absolute_path(path) ) for path in self._js ] def render_css(self): # To keep rendering order consistent, we can't just iterate over items(). # We need to sort the keys, and iterate over the sorted list. media = sorted(self._css) return chain.from_iterable([ format_html( '<link href="{}" type="text/css" media="{}" rel="stylesheet">', self.absolute_path(path), medium ) for path in self._css[medium] ] for medium in media) def absolute_path(self, path): """ Given a relative or absolute path to a static asset, return an absolute path. An absolute path will be returned unchanged while a relative path will be passed to django.templatetags.static.static(). """ if path.startswith(('http://', 'https://', '/')): return path return static(path) def __getitem__(self, name): """Return a Media object that only contains media of the given type.""" if name in MEDIA_TYPES: return Media(**{str(name): getattr(self, '_' + name)}) raise KeyError('Unknown media type "%s"' % name) @staticmethod def merge(*lists): """ Merge lists while trying to keep the relative order of the elements. Warn if the lists have the same elements in a different relative order. For static assets it can be important to have them included in the DOM in a certain order. In JavaScript you may not be able to reference a global or in CSS you might want to override a style. """ dependency_graph = defaultdict(set) all_items = OrderedSet() for list_ in filter(None, lists): head = list_[0] # The first items depend on nothing but have to be part of the # dependency graph to be included in the result. dependency_graph.setdefault(head, set()) for item in list_: all_items.add(item) # No self dependencies if head != item: dependency_graph[item].add(head) head = item try: return stable_topological_sort(all_items, dependency_graph) except CyclicDependencyError: warnings.warn( 'Detected duplicate Media files in an opposite order: {}'.format( ', '.join(repr(list_) for list_ in lists) ), MediaOrderConflictWarning, ) return list(all_items) def __add__(self, other): combined = Media() combined._css_lists = self._css_lists[:] combined._js_lists = self._js_lists[:] for item in other._css_lists: if item and item not in self._css_lists: combined._css_lists.append(item) for item in other._js_lists: if item and item not in self._js_lists: combined._js_lists.append(item) return combined def media_property(cls): def _media(self): # Get the media property of the superclass, if it exists sup_cls = super(cls, self) try: base = sup_cls.media except AttributeError: base = Media() # Get the media definition for this class definition = getattr(cls, 'Media', None) if definition: extend = getattr(definition, 'extend', True) if extend: if extend is True: m = base else: m = Media() for medium in extend: m = m + base[medium] return m + Media(definition) return Media(definition) return base return property(_media) class MediaDefiningClass(type): """ Metaclass for classes that can have media definitions. """ def __new__(mcs, name, bases, attrs): new_class = super().__new__(mcs, name, bases, attrs) if 'media' not in attrs: new_class.media = media_property(new_class) return new_class class Widget(metaclass=MediaDefiningClass): needs_multipart_form = False # Determines does this widget need multipart form is_localized = False is_required = False supports_microseconds = True def __init__(self, attrs=None): self.attrs = {} if attrs is None else attrs.copy() def __deepcopy__(self, memo): obj = copy.copy(self) obj.attrs = self.attrs.copy() memo[id(self)] = obj return obj @property def is_hidden(self): return self.input_type == 'hidden' if hasattr(self, 'input_type') else False def subwidgets(self, name, value, attrs=None): context = self.get_context(name, value, attrs) yield context['widget'] def format_value(self, value): """ Return a value as it should appear when rendered in a template. """ if value == '' or value is None: return None if self.is_localized: return formats.localize_input(value) return str(value) def get_context(self, name, value, attrs): return { 'widget': { 'name': name, 'is_hidden': self.is_hidden, 'required': self.is_required, 'value': self.format_value(value), 'attrs': self.build_attrs(self.attrs, attrs), 'template_name': self.template_name, }, } def render(self, name, value, attrs=None, renderer=None): """Render the widget as an HTML string.""" context = self.get_context(name, value, attrs) return self._render(self.template_name, context, renderer) def _render(self, template_name, context, renderer=None): if renderer is None: renderer = get_default_renderer() return mark_safe(renderer.render(template_name, context)) def build_attrs(self, base_attrs, extra_attrs=None): """Build an attribute dictionary.""" return {**base_attrs, **(extra_attrs or {})} def value_from_datadict(self, data, files, name): """ Given a dictionary of data and this widget's name, return the value of this widget or None if it's not provided. """ return data.get(name) def value_omitted_from_data(self, data, files, name): return name not in data def id_for_label(self, id_): """ Return the HTML ID attribute of this Widget for use by a <label>, given the ID of the field. Return None if no ID is available. This hook is necessary because some widgets have multiple HTML elements and, thus, multiple IDs. In that case, this method should return an ID value that corresponds to the first ID in the widget's tags. """ return id_ def use_required_attribute(self, initial): return not self.is_hidden class Input(Widget): """ Base class for all <input> widgets. """ input_type = None # Subclasses must define this. template_name = 'django/forms/widgets/input.html' def __init__(self, attrs=None): if attrs is not None: attrs = attrs.copy() self.input_type = attrs.pop('type', self.input_type) super().__init__(attrs) def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) context['widget']['type'] = self.input_type return context class TextInput(Input): input_type = 'text' template_name = 'django/forms/widgets/text.html' class NumberInput(Input): input_type = 'number' template_name = 'django/forms/widgets/number.html' class EmailInput(Input): input_type = 'email' template_name = 'django/forms/widgets/email.html' class URLInput(Input): input_type = 'url' template_name = 'django/forms/widgets/url.html' class PasswordInput(Input): input_type = 'password' template_name = 'django/forms/widgets/password.html' def __init__(self, attrs=None, render_value=False): super().__init__(attrs) self.render_value = render_value def get_context(self, name, value, attrs): if not self.render_value: value = None return super().get_context(name, value, attrs) class HiddenInput(Input): input_type = 'hidden' template_name = 'django/forms/widgets/hidden.html' class MultipleHiddenInput(HiddenInput): """ Handle <input type="hidden"> for fields that have a list of values. """ template_name = 'django/forms/widgets/multiple_hidden.html' def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) final_attrs = context['widget']['attrs'] id_ = context['widget']['attrs'].get('id') subwidgets = [] for index, value_ in enumerate(context['widget']['value']): widget_attrs = final_attrs.copy() if id_: # An ID attribute was given. Add a numeric index as a suffix # so that the inputs don't all have the same ID attribute. widget_attrs['id'] = '%s_%s' % (id_, index) widget = HiddenInput() widget.is_required = self.is_required subwidgets.append(widget.get_context(name, value_, widget_attrs)['widget']) context['widget']['subwidgets'] = subwidgets return context def value_from_datadict(self, data, files, name): try: getter = data.getlist except AttributeError: getter = data.get return getter(name) def format_value(self, value): return [] if value is None else value class FileInput(Input): input_type = 'file' needs_multipart_form = True template_name = 'django/forms/widgets/file.html' def format_value(self, value): """File input never renders a value.""" return def value_from_datadict(self, data, files, name): "File widgets take data from FILES, not POST" return files.get(name) def value_omitted_from_data(self, data, files, name): return name not in files def use_required_attribute(self, initial): return super().use_required_attribute(initial) and not initial FILE_INPUT_CONTRADICTION = object() class ClearableFileInput(FileInput): clear_checkbox_label = _('Clear') initial_text = _('Currently') input_text = _('Change') template_name = 'django/forms/widgets/clearable_file_input.html' def clear_checkbox_name(self, name): """ Given the name of the file input, return the name of the clear checkbox input. """ return name + '-clear' def clear_checkbox_id(self, name): """ Given the name of the clear checkbox input, return the HTML id for it. """ return name + '_id' def is_initial(self, value): """ Return whether value is considered to be initial value. """ return bool(value and getattr(value, 'url', False)) def format_value(self, value): """ Return the file object if it has a defined url attribute. """ if self.is_initial(value): return value def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) checkbox_name = self.clear_checkbox_name(name) checkbox_id = self.clear_checkbox_id(checkbox_name) context['widget'].update({ 'checkbox_name': checkbox_name, 'checkbox_id': checkbox_id, 'is_initial': self.is_initial(value), 'input_text': self.input_text, 'initial_text': self.initial_text, 'clear_checkbox_label': self.clear_checkbox_label, }) return context def value_from_datadict(self, data, files, name): upload = super().value_from_datadict(data, files, name) if not self.is_required and CheckboxInput().value_from_datadict( data, files, self.clear_checkbox_name(name)): if upload: # If the user contradicts themselves (uploads a new file AND # checks the "clear" checkbox), we return a unique marker # object that FileField will turn into a ValidationError. return FILE_INPUT_CONTRADICTION # False signals to clear any existing value, as opposed to just None return False return upload def value_omitted_from_data(self, data, files, name): return ( super().value_omitted_from_data(data, files, name) and self.clear_checkbox_name(name) not in data ) class Textarea(Widget): template_name = 'django/forms/widgets/textarea.html' def __init__(self, attrs=None): # Use slightly better defaults than HTML's 20x2 box default_attrs = {'cols': '40', 'rows': '10'} if attrs: default_attrs.update(attrs) super().__init__(default_attrs) class DateTimeBaseInput(TextInput): format_key = '' supports_microseconds = False def __init__(self, attrs=None, format=None): super().__init__(attrs) self.format = format or None def format_value(self, value): return formats.localize_input(value, self.format or formats.get_format(self.format_key)[0]) class DateInput(DateTimeBaseInput): format_key = 'DATE_INPUT_FORMATS' template_name = 'django/forms/widgets/date.html' class DateTimeInput(DateTimeBaseInput): format_key = 'DATETIME_INPUT_FORMATS' template_name = 'django/forms/widgets/datetime.html' class TimeInput(DateTimeBaseInput): format_key = 'TIME_INPUT_FORMATS' template_name = 'django/forms/widgets/time.html' # Defined at module level so that CheckboxInput is picklable (#17976) def boolean_check(v): return not (v is False or v is None or v == '') class CheckboxInput(Input): input_type = 'checkbox' template_name = 'django/forms/widgets/checkbox.html' def __init__(self, attrs=None, check_test=None): super().__init__(attrs) # check_test is a callable that takes a value and returns True # if the checkbox should be checked for that value. self.check_test = boolean_check if check_test is None else check_test def format_value(self, value): """Only return the 'value' attribute if value isn't empty.""" if value is True or value is False or value is None or value == '': return return str(value) def get_context(self, name, value, attrs): if self.check_test(value): attrs = {**(attrs or {}), 'checked': True} return super().get_context(name, value, attrs) def value_from_datadict(self, data, files, name): if name not in data: # A missing value means False because HTML form submission does not # send results for unselected checkboxes. return False value = data.get(name) # Translate true and false strings to boolean values. values = {'true': True, 'false': False} if isinstance(value, str): value = values.get(value.lower(), value) return bool(value) def value_omitted_from_data(self, data, files, name): # HTML checkboxes don't appear in POST data if not checked, so it's # never known if the value is actually omitted. return False class ChoiceWidget(Widget): allow_multiple_selected = False input_type = None template_name = None option_template_name = None add_id_index = True checked_attribute = {'checked': True} option_inherits_attrs = True def __init__(self, attrs=None, choices=()): super().__init__(attrs) # choices can be any iterable, but we may need to render this widget # multiple times. Thus, collapse it into a list so it can be consumed # more than once. self.choices = list(choices) def __deepcopy__(self, memo): obj = copy.copy(self) obj.attrs = self.attrs.copy() obj.choices = copy.copy(self.choices) memo[id(self)] = obj return obj def subwidgets(self, name, value, attrs=None): """ Yield all "subwidgets" of this widget. Used to enable iterating options from a BoundField for choice widgets. """ value = self.format_value(value) yield from self.options(name, value, attrs) def options(self, name, value, attrs=None): """Yield a flat list of options for this widgets.""" for group in self.optgroups(name, value, attrs): yield from group[1] def optgroups(self, name, value, attrs=None): """Return a list of optgroups for this widget.""" groups = [] has_selected = False for index, (option_value, option_label) in enumerate(self.choices): if option_value is None: option_value = '' subgroup = [] if isinstance(option_label, (list, tuple)): group_name = option_value subindex = 0 choices = option_label else: group_name = None subindex = None choices = [(option_value, option_label)] groups.append((group_name, subgroup, index)) for subvalue, sublabel in choices: selected = ( (not has_selected or self.allow_multiple_selected) and str(subvalue) in value ) has_selected |= selected subgroup.append(self.create_option( name, subvalue, sublabel, selected, index, subindex=subindex, attrs=attrs, )) if subindex is not None: subindex += 1 return groups def create_option(self, name, value, label, selected, index, subindex=None, attrs=None): index = str(index) if subindex is None else "%s_%s" % (index, subindex) option_attrs = self.build_attrs(self.attrs, attrs) if self.option_inherits_attrs else {} if selected: option_attrs.update(self.checked_attribute) if 'id' in option_attrs: option_attrs['id'] = self.id_for_label(option_attrs['id'], index) return { 'name': name, 'value': value, 'label': label, 'selected': selected, 'index': index, 'attrs': option_attrs, 'type': self.input_type, 'template_name': self.option_template_name, 'wrap_label': True, } def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) context['widget']['optgroups'] = self.optgroups(name, context['widget']['value'], attrs) return context def id_for_label(self, id_, index='0'): """ Use an incremented id for each option where the main widget references the zero index. """ if id_ and self.add_id_index: id_ = '%s_%s' % (id_, index) return id_ def value_from_datadict(self, data, files, name): getter = data.get if self.allow_multiple_selected: try: getter = data.getlist except AttributeError: pass return getter(name) def format_value(self, value): """Return selected values as a list.""" if value is None and self.allow_multiple_selected: return [] if not isinstance(value, (tuple, list)): value = [value] return [str(v) if v is not None else '' for v in value] class Select(ChoiceWidget): input_type = 'select' template_name = 'django/forms/widgets/select.html' option_template_name = 'django/forms/widgets/select_option.html' add_id_index = False checked_attribute = {'selected': True} option_inherits_attrs = False def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) if self.allow_multiple_selected: context['widget']['attrs']['multiple'] = True return context @staticmethod def _choice_has_empty_value(choice): """Return True if the choice's value is empty string or None.""" value, _ = choice return value is None or value == '' def use_required_attribute(self, initial): """ Don't render 'required' if the first <option> has a value, as that's invalid HTML. """ use_required_attribute = super().use_required_attribute(initial) # 'required' is always okay for <select multiple>. if self.allow_multiple_selected: return use_required_attribute first_choice = next(iter(self.choices), None) return use_required_attribute and first_choice is not None and self._choice_has_empty_value(first_choice) class NullBooleanSelect(Select): """ A Select Widget intended to be used with NullBooleanField. """ def __init__(self, attrs=None): choices = ( ('unknown', _('Unknown')), ('true', _('Yes')), ('false', _('No')), ) super().__init__(attrs, choices) def format_value(self, value): try: return { True: 'true', False: 'false', 'true': 'true', 'false': 'false', # For backwards compatibility with Django < 2.2. '2': 'true', '3': 'false', }[value] except KeyError: return 'unknown' def value_from_datadict(self, data, files, name): value = data.get(name) return { True: True, 'True': True, 'False': False, False: False, 'true': True, 'false': False, # For backwards compatibility with Django < 2.2. '2': True, '3': False, }.get(value) class SelectMultiple(Select): allow_multiple_selected = True def value_from_datadict(self, data, files, name): try: getter = data.getlist except AttributeError: getter = data.get return getter(name) def value_omitted_from_data(self, data, files, name): # An unselected <select multiple> doesn't appear in POST data, so it's # never known if the value is actually omitted. return False class RadioSelect(ChoiceWidget): input_type = 'radio' template_name = 'django/forms/widgets/radio.html' option_template_name = 'django/forms/widgets/radio_option.html' def id_for_label(self, id_, index=None): """ Don't include for="field_0" in <label> to improve accessibility when using a screen reader, in addition clicking such a label would toggle the first input. """ if index is None: return '' return super().id_for_label(id_, index) class CheckboxSelectMultiple(RadioSelect): allow_multiple_selected = True input_type = 'checkbox' template_name = 'django/forms/widgets/checkbox_select.html' option_template_name = 'django/forms/widgets/checkbox_option.html' def use_required_attribute(self, initial): # Don't use the 'required' attribute because browser validation would # require all checkboxes to be checked instead of at least one. return False def value_omitted_from_data(self, data, files, name): # HTML checkboxes don't appear in POST data if not checked, so it's # never known if the value is actually omitted. return False class MultiWidget(Widget): """ A widget that is composed of multiple widgets. In addition to the values added by Widget.get_context(), this widget adds a list of subwidgets to the context as widget['subwidgets']. These can be looped over and rendered like normal widgets. You'll probably want to use this class with MultiValueField. """ template_name = 'django/forms/widgets/multiwidget.html' def __init__(self, widgets, attrs=None): if isinstance(widgets, dict): self.widgets_names = [ ('_%s' % name) if name else '' for name in widgets ] widgets = widgets.values() else: self.widgets_names = ['_%s' % i for i in range(len(widgets))] self.widgets = [w() if isinstance(w, type) else w for w in widgets] super().__init__(attrs) @property def is_hidden(self): return all(w.is_hidden for w in self.widgets) def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) if self.is_localized: for widget in self.widgets: widget.is_localized = self.is_localized # value is a list of values, each corresponding to a widget # in self.widgets. if not isinstance(value, list): value = self.decompress(value) final_attrs = context['widget']['attrs'] input_type = final_attrs.pop('type', None) id_ = final_attrs.get('id') subwidgets = [] for i, (widget_name, widget) in enumerate(zip(self.widgets_names, self.widgets)): if input_type is not None: widget.input_type = input_type widget_name = name + widget_name try: widget_value = value[i] except IndexError: widget_value = None if id_: widget_attrs = final_attrs.copy() widget_attrs['id'] = '%s_%s' % (id_, i) else: widget_attrs = final_attrs subwidgets.append(widget.get_context(widget_name, widget_value, widget_attrs)['widget']) context['widget']['subwidgets'] = subwidgets return context def id_for_label(self, id_): if id_: id_ += '_0' return id_ def value_from_datadict(self, data, files, name): return [ widget.value_from_datadict(data, files, name + widget_name) for widget_name, widget in zip(self.widgets_names, self.widgets) ] def value_omitted_from_data(self, data, files, name): return all( widget.value_omitted_from_data(data, files, name + widget_name) for widget_name, widget in zip(self.widgets_names, self.widgets) ) def decompress(self, value): """ Return a list of decompressed values for the given compressed value. The given value can be assumed to be valid, but not necessarily non-empty. """ raise NotImplementedError('Subclasses must implement this method.') def _get_media(self): """ Media for a multiwidget is the combination of all media of the subwidgets. """ media = Media() for w in self.widgets: media = media + w.media return media media = property(_get_media) def __deepcopy__(self, memo): obj = super().__deepcopy__(memo) obj.widgets = copy.deepcopy(self.widgets) return obj @property def needs_multipart_form(self): return any(w.needs_multipart_form for w in self.widgets) class SplitDateTimeWidget(MultiWidget): """ A widget that splits datetime input into two <input type="text"> boxes. """ supports_microseconds = False template_name = 'django/forms/widgets/splitdatetime.html' def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None): widgets = ( DateInput( attrs=attrs if date_attrs is None else date_attrs, format=date_format, ), TimeInput( attrs=attrs if time_attrs is None else time_attrs, format=time_format, ), ) super().__init__(widgets) def decompress(self, value): if value: value = to_current_timezone(value) return [value.date(), value.time()] return [None, None] class SplitHiddenDateTimeWidget(SplitDateTimeWidget): """ A widget that splits datetime input into two <input type="hidden"> inputs. """ template_name = 'django/forms/widgets/splithiddendatetime.html' def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None): super().__init__(attrs, date_format, time_format, date_attrs, time_attrs) for widget in self.widgets: widget.input_type = 'hidden' class SelectDateWidget(Widget): """ A widget that splits date input into three <select> boxes. This also serves as an example of a Widget that has more than one HTML element and hence implements value_from_datadict. """ none_value = ('', '---') month_field = '%s_month' day_field = '%s_day' year_field = '%s_year' template_name = 'django/forms/widgets/select_date.html' input_type = 'select' select_widget = Select date_re = _lazy_re_compile(r'(\d{4}|0)-(\d\d?)-(\d\d?)$') def __init__(self, attrs=None, years=None, months=None, empty_label=None): self.attrs = attrs or {} # Optional list or tuple of years to use in the "year" select box. if years: self.years = years else: this_year = datetime.date.today().year self.years = range(this_year, this_year + 10) # Optional dict of months to use in the "month" select box. if months: self.months = months else: self.months = MONTHS # Optional string, list, or tuple to use as empty_label. if isinstance(empty_label, (list, tuple)): if not len(empty_label) == 3: raise ValueError('empty_label list/tuple must have 3 elements.') self.year_none_value = ('', empty_label[0]) self.month_none_value = ('', empty_label[1]) self.day_none_value = ('', empty_label[2]) else: if empty_label is not None: self.none_value = ('', empty_label) self.year_none_value = self.none_value self.month_none_value = self.none_value self.day_none_value = self.none_value def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) date_context = {} year_choices = [(i, str(i)) for i in self.years] if not self.is_required: year_choices.insert(0, self.year_none_value) year_name = self.year_field % name date_context['year'] = self.select_widget(attrs, choices=year_choices).get_context( name=year_name, value=context['widget']['value']['year'], attrs={**context['widget']['attrs'], 'id': 'id_%s' % year_name}, ) month_choices = list(self.months.items()) if not self.is_required: month_choices.insert(0, self.month_none_value) month_name = self.month_field % name date_context['month'] = self.select_widget(attrs, choices=month_choices).get_context( name=month_name, value=context['widget']['value']['month'], attrs={**context['widget']['attrs'], 'id': 'id_%s' % month_name}, ) day_choices = [(i, i) for i in range(1, 32)] if not self.is_required: day_choices.insert(0, self.day_none_value) day_name = self.day_field % name date_context['day'] = self.select_widget(attrs, choices=day_choices,).get_context( name=day_name, value=context['widget']['value']['day'], attrs={**context['widget']['attrs'], 'id': 'id_%s' % day_name}, ) subwidgets = [] for field in self._parse_date_fmt(): subwidgets.append(date_context[field]['widget']) context['widget']['subwidgets'] = subwidgets return context def format_value(self, value): """ Return a dict containing the year, month, and day of the current value. Use dict instead of a datetime to allow invalid dates such as February 31 to display correctly. """ year, month, day = None, None, None if isinstance(value, (datetime.date, datetime.datetime)): year, month, day = value.year, value.month, value.day elif isinstance(value, str): match = self.date_re.match(value) if match: # Convert any zeros in the date to empty strings to match the # empty option value. year, month, day = [int(val) or '' for val in match.groups()] else: input_format = get_format('DATE_INPUT_FORMATS')[0] try: d = datetime.datetime.strptime(value, input_format) except ValueError: pass else: year, month, day = d.year, d.month, d.day return {'year': year, 'month': month, 'day': day} @staticmethod def _parse_date_fmt(): fmt = get_format('DATE_FORMAT') escaped = False for char in fmt: if escaped: escaped = False elif char == '\\': escaped = True elif char in 'Yy': yield 'year' elif char in 'bEFMmNn': yield 'month' elif char in 'dj': yield 'day' def id_for_label(self, id_): for first_select in self._parse_date_fmt(): return '%s_%s' % (id_, first_select) return '%s_month' % id_ def value_from_datadict(self, data, files, name): y = data.get(self.year_field % name) m = data.get(self.month_field % name) d = data.get(self.day_field % name) if y == m == d == '': return None if y is not None and m is not None and d is not None: input_format = get_format('DATE_INPUT_FORMATS')[0] input_format = formats.sanitize_strftime_format(input_format) try: date_value = datetime.date(int(y), int(m), int(d)) except ValueError: # Return pseudo-ISO dates with zeros for any unselected values, # e.g. '2017-0-23'. return '%s-%s-%s' % (y or 0, m or 0, d or 0) return date_value.strftime(input_format) return data.get(name) def value_omitted_from_data(self, data, files, name): return not any( ('{}_{}'.format(name, interval) in data) for interval in ('year', 'month', 'day') )
63bb1004d952108003de51daacdafd544d268cdbca92690f4585f1fcad8eed9d
import re from django.core.exceptions import ValidationError from django.forms.utils import flatatt, pretty_name from django.forms.widgets import Textarea, TextInput from django.utils.functional import cached_property from django.utils.html import conditional_escape, format_html, html_safe from django.utils.safestring import mark_safe from django.utils.translation import gettext_lazy as _ __all__ = ('BoundField',) @html_safe class BoundField: "A Field plus data" def __init__(self, form, field, name): self.form = form self.field = field self.name = name self.html_name = form.add_prefix(name) self.html_initial_name = form.add_initial_prefix(name) self.html_initial_id = form.add_initial_prefix(self.auto_id) if self.field.label is None: self.label = pretty_name(name) else: self.label = self.field.label self.help_text = field.help_text or '' def __str__(self): """Render this field as an HTML widget.""" if self.field.show_hidden_initial: return self.as_widget() + self.as_hidden(only_initial=True) return self.as_widget() @cached_property def subwidgets(self): """ Most widgets yield a single subwidget, but others like RadioSelect and CheckboxSelectMultiple produce one subwidget for each choice. This property is cached so that only one database query occurs when rendering ModelChoiceFields. """ id_ = self.field.widget.attrs.get('id') or self.auto_id attrs = {'id': id_} if id_ else {} attrs = self.build_widget_attrs(attrs) return [ BoundWidget(self.field.widget, widget, self.form.renderer) for widget in self.field.widget.subwidgets(self.html_name, self.value(), attrs=attrs) ] def __bool__(self): # BoundField evaluates to True even if it doesn't have subwidgets. return True def __iter__(self): return iter(self.subwidgets) def __len__(self): return len(self.subwidgets) def __getitem__(self, idx): # Prevent unnecessary reevaluation when accessing BoundField's attrs # from templates. if not isinstance(idx, (int, slice)): raise TypeError( 'BoundField indices must be integers or slices, not %s.' % type(idx).__name__ ) return self.subwidgets[idx] @property def errors(self): """ Return an ErrorList (empty if there are no errors) for this field. """ return self.form.errors.get(self.name, self.form.error_class()) def as_widget(self, widget=None, attrs=None, only_initial=False): """ Render the field by rendering the passed widget, adding any HTML attributes passed as attrs. If a widget isn't specified, use the field's default widget. """ widget = widget or self.field.widget if self.field.localize: widget.is_localized = True attrs = attrs or {} attrs = self.build_widget_attrs(attrs, widget) if self.auto_id and 'id' not in widget.attrs: attrs.setdefault('id', self.html_initial_id if only_initial else self.auto_id) return widget.render( name=self.html_initial_name if only_initial else self.html_name, value=self.value(), attrs=attrs, renderer=self.form.renderer, ) def as_text(self, attrs=None, **kwargs): """ Return a string of HTML for representing this as an <input type="text">. """ return self.as_widget(TextInput(), attrs, **kwargs) def as_textarea(self, attrs=None, **kwargs): """Return a string of HTML for representing this as a <textarea>.""" return self.as_widget(Textarea(), attrs, **kwargs) def as_hidden(self, attrs=None, **kwargs): """ Return a string of HTML for representing this as an <input type="hidden">. """ return self.as_widget(self.field.hidden_widget(), attrs, **kwargs) @property def data(self): """ Return the data for this BoundField, or None if it wasn't given. """ return self.form._widget_data_value(self.field.widget, self.html_name) def value(self): """ Return the value for this BoundField, using the initial value if the form is not bound or the data otherwise. """ data = self.initial if self.form.is_bound: data = self.field.bound_data(self.data, data) return self.field.prepare_value(data) def _has_changed(self): field = self.field if field.show_hidden_initial: hidden_widget = field.hidden_widget() initial_value = self.form._widget_data_value( hidden_widget, self.html_initial_name, ) try: initial_value = field.to_python(initial_value) except ValidationError: # Always assume data has changed if validation fails. return True else: initial_value = self.initial return field.has_changed(initial_value, self.data) def label_tag(self, contents=None, attrs=None, label_suffix=None): """ Wrap the given contents in a <label>, if the field has an ID attribute. contents should be mark_safe'd to avoid HTML escaping. If contents aren't given, use the field's HTML-escaped label. If attrs are given, use them as HTML attributes on the <label> tag. label_suffix overrides the form's label_suffix. """ contents = contents or self.label if label_suffix is None: label_suffix = (self.field.label_suffix if self.field.label_suffix is not None else self.form.label_suffix) # Only add the suffix if the label does not end in punctuation. # Translators: If found as last label character, these punctuation # characters will prevent the default label_suffix to be appended to the label if label_suffix and contents and contents[-1] not in _(':?.!'): contents = format_html('{}{}', contents, label_suffix) widget = self.field.widget id_ = widget.attrs.get('id') or self.auto_id if id_: id_for_label = widget.id_for_label(id_) if id_for_label: attrs = {**(attrs or {}), 'for': id_for_label} if self.field.required and hasattr(self.form, 'required_css_class'): attrs = attrs or {} if 'class' in attrs: attrs['class'] += ' ' + self.form.required_css_class else: attrs['class'] = self.form.required_css_class attrs = flatatt(attrs) if attrs else '' contents = format_html('<label{}>{}</label>', attrs, contents) else: contents = conditional_escape(contents) return mark_safe(contents) def css_classes(self, extra_classes=None): """ Return a string of space-separated CSS classes for this field. """ if hasattr(extra_classes, 'split'): extra_classes = extra_classes.split() extra_classes = set(extra_classes or []) if self.errors and hasattr(self.form, 'error_css_class'): extra_classes.add(self.form.error_css_class) if self.field.required and hasattr(self.form, 'required_css_class'): extra_classes.add(self.form.required_css_class) return ' '.join(extra_classes) @property def is_hidden(self): """Return True if this BoundField's widget is hidden.""" return self.field.widget.is_hidden @property def auto_id(self): """ Calculate and return the ID attribute for this BoundField, if the associated Form has specified auto_id. Return an empty string otherwise. """ auto_id = self.form.auto_id # Boolean or string if auto_id and '%s' in str(auto_id): return auto_id % self.html_name elif auto_id: return self.html_name return '' @property def id_for_label(self): """ Wrapper around the field widget's `id_for_label` method. Useful, for example, for focusing on this field regardless of whether it has a single widget or a MultiWidget. """ widget = self.field.widget id_ = widget.attrs.get('id') or self.auto_id return widget.id_for_label(id_) @cached_property def initial(self): return self.form.get_initial_for_field(self.field, self.name) def build_widget_attrs(self, attrs, widget=None): widget = widget or self.field.widget attrs = dict(attrs) # Copy attrs to avoid modifying the argument. if widget.use_required_attribute(self.initial) and self.field.required and self.form.use_required_attribute: attrs['required'] = True if self.field.disabled: attrs['disabled'] = True return attrs @property def widget_type(self): return re.sub(r'widget$|input$', '', self.field.widget.__class__.__name__.lower()) @html_safe class BoundWidget: """ A container class used for iterating over widgets. This is useful for widgets that have choices. For example, the following can be used in a template: {% for radio in myform.beatles %} <label for="{{ radio.id_for_label }}"> {{ radio.choice_label }} <span class="radio">{{ radio.tag }}</span> </label> {% endfor %} """ def __init__(self, parent_widget, data, renderer): self.parent_widget = parent_widget self.data = data self.renderer = renderer def __str__(self): return self.tag(wrap_label=True) def tag(self, wrap_label=False): context = {'widget': {**self.data, 'wrap_label': wrap_label}} return self.parent_widget._render(self.template_name, context, self.renderer) @property def template_name(self): if 'template_name' in self.data: return self.data['template_name'] return self.parent_widget.template_name @property def id_for_label(self): return self.data['attrs'].get('id') @property def choice_label(self): return self.data['label']
3ccbe611038e656d4e2afa17d3edc58b802b4f3334f6d7c5d1cbb0608a27f71d
""" Field classes. """ import copy import datetime import json import math import operator import os import re import uuid from decimal import Decimal, DecimalException from io import BytesIO from urllib.parse import urlsplit, urlunsplit from django.core import validators from django.core.exceptions import ValidationError from django.forms.boundfield import BoundField from django.forms.utils import from_current_timezone, to_current_timezone from django.forms.widgets import ( FILE_INPUT_CONTRADICTION, CheckboxInput, ClearableFileInput, DateInput, DateTimeInput, EmailInput, FileInput, HiddenInput, MultipleHiddenInput, NullBooleanSelect, NumberInput, Select, SelectMultiple, SplitDateTimeWidget, SplitHiddenDateTimeWidget, Textarea, TextInput, TimeInput, URLInput, ) from django.utils import formats from django.utils.dateparse import parse_datetime, parse_duration from django.utils.duration import duration_string from django.utils.ipv6 import clean_ipv6_address from django.utils.regex_helper import _lazy_re_compile from django.utils.translation import gettext_lazy as _, ngettext_lazy __all__ = ( 'Field', 'CharField', 'IntegerField', 'DateField', 'TimeField', 'DateTimeField', 'DurationField', 'RegexField', 'EmailField', 'FileField', 'ImageField', 'URLField', 'BooleanField', 'NullBooleanField', 'ChoiceField', 'MultipleChoiceField', 'ComboField', 'MultiValueField', 'FloatField', 'DecimalField', 'SplitDateTimeField', 'GenericIPAddressField', 'FilePathField', 'JSONField', 'SlugField', 'TypedChoiceField', 'TypedMultipleChoiceField', 'UUIDField', ) class Field: widget = TextInput # Default widget to use when rendering this type of Field. hidden_widget = HiddenInput # Default widget to use when rendering this as "hidden". default_validators = [] # Default set of validators # Add an 'invalid' entry to default_error_message if you want a specific # field error message not raised by the field validators. default_error_messages = { 'required': _('This field is required.'), } empty_values = list(validators.EMPTY_VALUES) def __init__(self, *, required=True, widget=None, label=None, initial=None, help_text='', error_messages=None, show_hidden_initial=False, validators=(), localize=False, disabled=False, label_suffix=None): # required -- Boolean that specifies whether the field is required. # True by default. # widget -- A Widget class, or instance of a Widget class, that should # be used for this Field when displaying it. Each Field has a # default Widget that it'll use if you don't specify this. In # most cases, the default widget is TextInput. # label -- A verbose name for this field, for use in displaying this # field in a form. By default, Django will use a "pretty" # version of the form field name, if the Field is part of a # Form. # initial -- A value to use in this Field's initial display. This value # is *not* used as a fallback if data isn't given. # help_text -- An optional string to use as "help text" for this Field. # error_messages -- An optional dictionary to override the default # messages that the field will raise. # show_hidden_initial -- Boolean that specifies if it is needed to render a # hidden widget with initial value after widget. # validators -- List of additional validators to use # localize -- Boolean that specifies if the field should be localized. # disabled -- Boolean that specifies whether the field is disabled, that # is its widget is shown in the form but not editable. # label_suffix -- Suffix to be added to the label. Overrides # form's label_suffix. self.required, self.label, self.initial = required, label, initial self.show_hidden_initial = show_hidden_initial self.help_text = help_text self.disabled = disabled self.label_suffix = label_suffix widget = widget or self.widget if isinstance(widget, type): widget = widget() else: widget = copy.deepcopy(widget) # Trigger the localization machinery if needed. self.localize = localize if self.localize: widget.is_localized = True # Let the widget know whether it should display as required. widget.is_required = self.required # Hook into self.widget_attrs() for any Field-specific HTML attributes. extra_attrs = self.widget_attrs(widget) if extra_attrs: widget.attrs.update(extra_attrs) self.widget = widget messages = {} for c in reversed(self.__class__.__mro__): messages.update(getattr(c, 'default_error_messages', {})) messages.update(error_messages or {}) self.error_messages = messages self.validators = [*self.default_validators, *validators] super().__init__() def prepare_value(self, value): return value def to_python(self, value): return value def validate(self, value): if value in self.empty_values and self.required: raise ValidationError(self.error_messages['required'], code='required') def run_validators(self, value): if value in self.empty_values: return errors = [] for v in self.validators: try: v(value) except ValidationError as e: if hasattr(e, 'code') and e.code in self.error_messages: e.message = self.error_messages[e.code] errors.extend(e.error_list) if errors: raise ValidationError(errors) def clean(self, value): """ Validate the given value and return its "cleaned" value as an appropriate Python object. Raise ValidationError for any errors. """ value = self.to_python(value) self.validate(value) self.run_validators(value) return value def bound_data(self, data, initial): """ Return the value that should be shown for this field on render of a bound form, given the submitted POST data for the field and the initial data, if any. For most fields, this will simply be data; FileFields need to handle it a bit differently. """ if self.disabled: return initial return data def widget_attrs(self, widget): """ Given a Widget instance (*not* a Widget class), return a dictionary of any HTML attributes that should be added to the Widget, based on this Field. """ return {} def has_changed(self, initial, data): """Return True if data differs from initial.""" # Always return False if the field is disabled since self.bound_data # always uses the initial value in this case. if self.disabled: return False try: data = self.to_python(data) if hasattr(self, '_coerce'): return self._coerce(data) != self._coerce(initial) except ValidationError: return True # For purposes of seeing whether something has changed, None is # the same as an empty string, if the data or initial value we get # is None, replace it with ''. initial_value = initial if initial is not None else '' data_value = data if data is not None else '' return initial_value != data_value def get_bound_field(self, form, field_name): """ Return a BoundField instance that will be used when accessing the form field in a template. """ return BoundField(form, self, field_name) def __deepcopy__(self, memo): result = copy.copy(self) memo[id(self)] = result result.widget = copy.deepcopy(self.widget, memo) result.error_messages = self.error_messages.copy() result.validators = self.validators[:] return result class CharField(Field): def __init__(self, *, max_length=None, min_length=None, strip=True, empty_value='', **kwargs): self.max_length = max_length self.min_length = min_length self.strip = strip self.empty_value = empty_value super().__init__(**kwargs) if min_length is not None: self.validators.append(validators.MinLengthValidator(int(min_length))) if max_length is not None: self.validators.append(validators.MaxLengthValidator(int(max_length))) self.validators.append(validators.ProhibitNullCharactersValidator()) def to_python(self, value): """Return a string.""" if value not in self.empty_values: value = str(value) if self.strip: value = value.strip() if value in self.empty_values: return self.empty_value return value def widget_attrs(self, widget): attrs = super().widget_attrs(widget) if self.max_length is not None and not widget.is_hidden: # The HTML attribute is maxlength, not max_length. attrs['maxlength'] = str(self.max_length) if self.min_length is not None and not widget.is_hidden: # The HTML attribute is minlength, not min_length. attrs['minlength'] = str(self.min_length) return attrs class IntegerField(Field): widget = NumberInput default_error_messages = { 'invalid': _('Enter a whole number.'), } re_decimal = _lazy_re_compile(r'\.0*\s*$') def __init__(self, *, max_value=None, min_value=None, **kwargs): self.max_value, self.min_value = max_value, min_value if kwargs.get('localize') and self.widget == NumberInput: # Localized number input is not well supported on most browsers kwargs.setdefault('widget', super().widget) super().__init__(**kwargs) if max_value is not None: self.validators.append(validators.MaxValueValidator(max_value)) if min_value is not None: self.validators.append(validators.MinValueValidator(min_value)) def to_python(self, value): """ Validate that int() can be called on the input. Return the result of int() or None for empty values. """ value = super().to_python(value) if value in self.empty_values: return None if self.localize: value = formats.sanitize_separators(value) # Strip trailing decimal and zeros. try: value = int(self.re_decimal.sub('', str(value))) except (ValueError, TypeError): raise ValidationError(self.error_messages['invalid'], code='invalid') return value def widget_attrs(self, widget): attrs = super().widget_attrs(widget) if isinstance(widget, NumberInput): if self.min_value is not None: attrs['min'] = self.min_value if self.max_value is not None: attrs['max'] = self.max_value return attrs class FloatField(IntegerField): default_error_messages = { 'invalid': _('Enter a number.'), } def to_python(self, value): """ Validate that float() can be called on the input. Return the result of float() or None for empty values. """ value = super(IntegerField, self).to_python(value) if value in self.empty_values: return None if self.localize: value = formats.sanitize_separators(value) try: value = float(value) except (ValueError, TypeError): raise ValidationError(self.error_messages['invalid'], code='invalid') return value def validate(self, value): super().validate(value) if value in self.empty_values: return if not math.isfinite(value): raise ValidationError(self.error_messages['invalid'], code='invalid') def widget_attrs(self, widget): attrs = super().widget_attrs(widget) if isinstance(widget, NumberInput) and 'step' not in widget.attrs: attrs.setdefault('step', 'any') return attrs class DecimalField(IntegerField): default_error_messages = { 'invalid': _('Enter a number.'), } def __init__(self, *, max_value=None, min_value=None, max_digits=None, decimal_places=None, **kwargs): self.max_digits, self.decimal_places = max_digits, decimal_places super().__init__(max_value=max_value, min_value=min_value, **kwargs) self.validators.append(validators.DecimalValidator(max_digits, decimal_places)) def to_python(self, value): """ Validate that the input is a decimal number. Return a Decimal instance or None for empty values. Ensure that there are no more than max_digits in the number and no more than decimal_places digits after the decimal point. """ if value in self.empty_values: return None if self.localize: value = formats.sanitize_separators(value) try: value = Decimal(str(value)) except DecimalException: raise ValidationError(self.error_messages['invalid'], code='invalid') return value def validate(self, value): super().validate(value) if value in self.empty_values: return if not value.is_finite(): raise ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def widget_attrs(self, widget): attrs = super().widget_attrs(widget) if isinstance(widget, NumberInput) and 'step' not in widget.attrs: if self.decimal_places is not None: # Use exponential notation for small values since they might # be parsed as 0 otherwise. ref #20765 step = str(Decimal(1).scaleb(-self.decimal_places)).lower() else: step = 'any' attrs.setdefault('step', step) return attrs class BaseTemporalField(Field): def __init__(self, *, input_formats=None, **kwargs): super().__init__(**kwargs) if input_formats is not None: self.input_formats = input_formats def to_python(self, value): value = value.strip() # Try to strptime against each input format. for format in self.input_formats: try: return self.strptime(value, format) except (ValueError, TypeError): continue raise ValidationError(self.error_messages['invalid'], code='invalid') def strptime(self, value, format): raise NotImplementedError('Subclasses must define this method.') class DateField(BaseTemporalField): widget = DateInput input_formats = formats.get_format_lazy('DATE_INPUT_FORMATS') default_error_messages = { 'invalid': _('Enter a valid date.'), } def to_python(self, value): """ Validate that the input can be converted to a date. Return a Python datetime.date object. """ if value in self.empty_values: return None if isinstance(value, datetime.datetime): return value.date() if isinstance(value, datetime.date): return value return super().to_python(value) def strptime(self, value, format): return datetime.datetime.strptime(value, format).date() class TimeField(BaseTemporalField): widget = TimeInput input_formats = formats.get_format_lazy('TIME_INPUT_FORMATS') default_error_messages = { 'invalid': _('Enter a valid time.') } def to_python(self, value): """ Validate that the input can be converted to a time. Return a Python datetime.time object. """ if value in self.empty_values: return None if isinstance(value, datetime.time): return value return super().to_python(value) def strptime(self, value, format): return datetime.datetime.strptime(value, format).time() class DateTimeFormatsIterator: def __iter__(self): yield from formats.get_format('DATETIME_INPUT_FORMATS') yield from formats.get_format('DATE_INPUT_FORMATS') class DateTimeField(BaseTemporalField): widget = DateTimeInput input_formats = DateTimeFormatsIterator() default_error_messages = { 'invalid': _('Enter a valid date/time.'), } def prepare_value(self, value): if isinstance(value, datetime.datetime): value = to_current_timezone(value) return value def to_python(self, value): """ Validate that the input can be converted to a datetime. Return a Python datetime.datetime object. """ if value in self.empty_values: return None if isinstance(value, datetime.datetime): return from_current_timezone(value) if isinstance(value, datetime.date): result = datetime.datetime(value.year, value.month, value.day) return from_current_timezone(result) try: result = parse_datetime(value.strip()) except ValueError: raise ValidationError(self.error_messages['invalid'], code='invalid') if not result: result = super().to_python(value) return from_current_timezone(result) def strptime(self, value, format): return datetime.datetime.strptime(value, format) class DurationField(Field): default_error_messages = { 'invalid': _('Enter a valid duration.'), 'overflow': _('The number of days must be between {min_days} and {max_days}.') } def prepare_value(self, value): if isinstance(value, datetime.timedelta): return duration_string(value) return value def to_python(self, value): if value in self.empty_values: return None if isinstance(value, datetime.timedelta): return value try: value = parse_duration(str(value)) except OverflowError: raise ValidationError(self.error_messages['overflow'].format( min_days=datetime.timedelta.min.days, max_days=datetime.timedelta.max.days, ), code='overflow') if value is None: raise ValidationError(self.error_messages['invalid'], code='invalid') return value class RegexField(CharField): def __init__(self, regex, **kwargs): """ regex can be either a string or a compiled regular expression object. """ kwargs.setdefault('strip', False) super().__init__(**kwargs) self._set_regex(regex) def _get_regex(self): return self._regex def _set_regex(self, regex): if isinstance(regex, str): regex = re.compile(regex) self._regex = regex if hasattr(self, '_regex_validator') and self._regex_validator in self.validators: self.validators.remove(self._regex_validator) self._regex_validator = validators.RegexValidator(regex=regex) self.validators.append(self._regex_validator) regex = property(_get_regex, _set_regex) class EmailField(CharField): widget = EmailInput default_validators = [validators.validate_email] def __init__(self, **kwargs): super().__init__(strip=True, **kwargs) class FileField(Field): widget = ClearableFileInput default_error_messages = { 'invalid': _("No file was submitted. Check the encoding type on the form."), 'missing': _("No file was submitted."), 'empty': _("The submitted file is empty."), 'max_length': ngettext_lazy( 'Ensure this filename has at most %(max)d character (it has %(length)d).', 'Ensure this filename has at most %(max)d characters (it has %(length)d).', 'max'), 'contradiction': _('Please either submit a file or check the clear checkbox, not both.') } def __init__(self, *, max_length=None, allow_empty_file=False, **kwargs): self.max_length = max_length self.allow_empty_file = allow_empty_file super().__init__(**kwargs) def to_python(self, data): if data in self.empty_values: return None # UploadedFile objects should have name and size attributes. try: file_name = data.name file_size = data.size except AttributeError: raise ValidationError(self.error_messages['invalid'], code='invalid') if self.max_length is not None and len(file_name) > self.max_length: params = {'max': self.max_length, 'length': len(file_name)} raise ValidationError(self.error_messages['max_length'], code='max_length', params=params) if not file_name: raise ValidationError(self.error_messages['invalid'], code='invalid') if not self.allow_empty_file and not file_size: raise ValidationError(self.error_messages['empty'], code='empty') return data def clean(self, data, initial=None): # If the widget got contradictory inputs, we raise a validation error if data is FILE_INPUT_CONTRADICTION: raise ValidationError(self.error_messages['contradiction'], code='contradiction') # False means the field value should be cleared; further validation is # not needed. if data is False: if not self.required: return False # If the field is required, clearing is not possible (the widget # shouldn't return False data in that case anyway). False is not # in self.empty_value; if a False value makes it this far # it should be validated from here on out as None (so it will be # caught by the required check). data = None if not data and initial: return initial return super().clean(data) def bound_data(self, data, initial): if data in (None, FILE_INPUT_CONTRADICTION): return initial return data def has_changed(self, initial, data): return not self.disabled and data is not None class ImageField(FileField): default_validators = [validators.validate_image_file_extension] default_error_messages = { 'invalid_image': _( "Upload a valid image. The file you uploaded was either not an " "image or a corrupted image." ), } def to_python(self, data): """ Check that the file-upload field data contains a valid image (GIF, JPG, PNG, etc. -- whatever Pillow supports). """ f = super().to_python(data) if f is None: return None from PIL import Image # We need to get a file object for Pillow. We might have a path or we might # have to read the data into memory. if hasattr(data, 'temporary_file_path'): file = data.temporary_file_path() else: if hasattr(data, 'read'): file = BytesIO(data.read()) else: file = BytesIO(data['content']) try: # load() could spot a truncated JPEG, but it loads the entire # image in memory, which is a DoS vector. See #3848 and #18520. image = Image.open(file) # verify() must be called immediately after the constructor. image.verify() # Annotating so subclasses can reuse it for their own validation f.image = image # Pillow doesn't detect the MIME type of all formats. In those # cases, content_type will be None. f.content_type = Image.MIME.get(image.format) except Exception as exc: # Pillow doesn't recognize it as an image. raise ValidationError( self.error_messages['invalid_image'], code='invalid_image', ) from exc if hasattr(f, 'seek') and callable(f.seek): f.seek(0) return f def widget_attrs(self, widget): attrs = super().widget_attrs(widget) if isinstance(widget, FileInput) and 'accept' not in widget.attrs: attrs.setdefault('accept', 'image/*') return attrs class URLField(CharField): widget = URLInput default_error_messages = { 'invalid': _('Enter a valid URL.'), } default_validators = [validators.URLValidator()] def __init__(self, **kwargs): super().__init__(strip=True, **kwargs) def to_python(self, value): def split_url(url): """ Return a list of url parts via urlparse.urlsplit(), or raise ValidationError for some malformed URLs. """ try: return list(urlsplit(url)) except ValueError: # urlparse.urlsplit can raise a ValueError with some # misformatted URLs. raise ValidationError(self.error_messages['invalid'], code='invalid') value = super().to_python(value) if value: url_fields = split_url(value) if not url_fields[0]: # If no URL scheme given, assume http:// url_fields[0] = 'http' if not url_fields[1]: # Assume that if no domain is provided, that the path segment # contains the domain. url_fields[1] = url_fields[2] url_fields[2] = '' # Rebuild the url_fields list, since the domain segment may now # contain the path too. url_fields = split_url(urlunsplit(url_fields)) value = urlunsplit(url_fields) return value class BooleanField(Field): widget = CheckboxInput def to_python(self, value): """Return a Python boolean object.""" # Explicitly check for the string 'False', which is what a hidden field # will submit for False. Also check for '0', since this is what # RadioSelect will provide. Because bool("True") == bool('1') == True, # we don't need to handle that explicitly. if isinstance(value, str) and value.lower() in ('false', '0'): value = False else: value = bool(value) return super().to_python(value) def validate(self, value): if not value and self.required: raise ValidationError(self.error_messages['required'], code='required') def has_changed(self, initial, data): if self.disabled: return False # Sometimes data or initial may be a string equivalent of a boolean # so we should run it through to_python first to get a boolean value return self.to_python(initial) != self.to_python(data) class NullBooleanField(BooleanField): """ A field whose valid values are None, True, and False. Clean invalid values to None. """ widget = NullBooleanSelect def to_python(self, value): """ Explicitly check for the string 'True' and 'False', which is what a hidden field will submit for True and False, for 'true' and 'false', which are likely to be returned by JavaScript serializations of forms, and for '1' and '0', which is what a RadioField will submit. Unlike the Booleanfield, this field must check for True because it doesn't use the bool() function. """ if value in (True, 'True', 'true', '1'): return True elif value in (False, 'False', 'false', '0'): return False else: return None def validate(self, value): pass class CallableChoiceIterator: def __init__(self, choices_func): self.choices_func = choices_func def __iter__(self): yield from self.choices_func() class ChoiceField(Field): widget = Select default_error_messages = { 'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'), } def __init__(self, *, choices=(), **kwargs): super().__init__(**kwargs) self.choices = choices def __deepcopy__(self, memo): result = super().__deepcopy__(memo) result._choices = copy.deepcopy(self._choices, memo) return result def _get_choices(self): return self._choices def _set_choices(self, value): # Setting choices also sets the choices on the widget. # choices can be any iterable, but we call list() on it because # it will be consumed more than once. if callable(value): value = CallableChoiceIterator(value) else: value = list(value) self._choices = self.widget.choices = value choices = property(_get_choices, _set_choices) def to_python(self, value): """Return a string.""" if value in self.empty_values: return '' return str(value) def validate(self, value): """Validate that the input is in self.choices.""" super().validate(value) if value and not self.valid_value(value): raise ValidationError( self.error_messages['invalid_choice'], code='invalid_choice', params={'value': value}, ) def valid_value(self, value): """Check to see if the provided value is a valid choice.""" text_value = str(value) for k, v in self.choices: if isinstance(v, (list, tuple)): # This is an optgroup, so look inside the group for options for k2, v2 in v: if value == k2 or text_value == str(k2): return True else: if value == k or text_value == str(k): return True return False class TypedChoiceField(ChoiceField): def __init__(self, *, coerce=lambda val: val, empty_value='', **kwargs): self.coerce = coerce self.empty_value = empty_value super().__init__(**kwargs) def _coerce(self, value): """ Validate that the value can be coerced to the right type (if not empty). """ if value == self.empty_value or value in self.empty_values: return self.empty_value try: value = self.coerce(value) except (ValueError, TypeError, ValidationError): raise ValidationError( self.error_messages['invalid_choice'], code='invalid_choice', params={'value': value}, ) return value def clean(self, value): value = super().clean(value) return self._coerce(value) class MultipleChoiceField(ChoiceField): hidden_widget = MultipleHiddenInput widget = SelectMultiple default_error_messages = { 'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'), 'invalid_list': _('Enter a list of values.'), } def to_python(self, value): if not value: return [] elif not isinstance(value, (list, tuple)): raise ValidationError(self.error_messages['invalid_list'], code='invalid_list') return [str(val) for val in value] def validate(self, value): """Validate that the input is a list or tuple.""" if self.required and not value: raise ValidationError(self.error_messages['required'], code='required') # Validate that each value in the value list is in self.choices. for val in value: if not self.valid_value(val): raise ValidationError( self.error_messages['invalid_choice'], code='invalid_choice', params={'value': val}, ) def has_changed(self, initial, data): if self.disabled: return False if initial is None: initial = [] if data is None: data = [] if len(initial) != len(data): return True initial_set = {str(value) for value in initial} data_set = {str(value) for value in data} return data_set != initial_set class TypedMultipleChoiceField(MultipleChoiceField): def __init__(self, *, coerce=lambda val: val, **kwargs): self.coerce = coerce self.empty_value = kwargs.pop('empty_value', []) super().__init__(**kwargs) def _coerce(self, value): """ Validate that the values are in self.choices and can be coerced to the right type. """ if value == self.empty_value or value in self.empty_values: return self.empty_value new_value = [] for choice in value: try: new_value.append(self.coerce(choice)) except (ValueError, TypeError, ValidationError): raise ValidationError( self.error_messages['invalid_choice'], code='invalid_choice', params={'value': choice}, ) return new_value def clean(self, value): value = super().clean(value) return self._coerce(value) def validate(self, value): if value != self.empty_value: super().validate(value) elif self.required: raise ValidationError(self.error_messages['required'], code='required') class ComboField(Field): """ A Field whose clean() method calls multiple Field clean() methods. """ def __init__(self, fields, **kwargs): super().__init__(**kwargs) # Set 'required' to False on the individual fields, because the # required validation will be handled by ComboField, not by those # individual fields. for f in fields: f.required = False self.fields = fields def clean(self, value): """ Validate the given value against all of self.fields, which is a list of Field instances. """ super().clean(value) for field in self.fields: value = field.clean(value) return value class MultiValueField(Field): """ Aggregate the logic of multiple Fields. Its clean() method takes a "decompressed" list of values, which are then cleaned into a single value according to self.fields. Each value in this list is cleaned by the corresponding field -- the first value is cleaned by the first field, the second value is cleaned by the second field, etc. Once all fields are cleaned, the list of clean values is "compressed" into a single value. Subclasses should not have to implement clean(). Instead, they must implement compress(), which takes a list of valid values and returns a "compressed" version of those values -- a single value. You'll probably want to use this with MultiWidget. """ default_error_messages = { 'invalid': _('Enter a list of values.'), 'incomplete': _('Enter a complete value.'), } def __init__(self, fields, *, require_all_fields=True, **kwargs): self.require_all_fields = require_all_fields super().__init__(**kwargs) for f in fields: f.error_messages.setdefault('incomplete', self.error_messages['incomplete']) if self.disabled: f.disabled = True if self.require_all_fields: # Set 'required' to False on the individual fields, because the # required validation will be handled by MultiValueField, not # by those individual fields. f.required = False self.fields = fields def __deepcopy__(self, memo): result = super().__deepcopy__(memo) result.fields = tuple(x.__deepcopy__(memo) for x in self.fields) return result def validate(self, value): pass def clean(self, value): """ Validate every value in the given list. A value is validated against the corresponding Field in self.fields. For example, if this MultiValueField was instantiated with fields=(DateField(), TimeField()), clean() would call DateField.clean(value[0]) and TimeField.clean(value[1]). """ clean_data = [] errors = [] if self.disabled and not isinstance(value, list): value = self.widget.decompress(value) if not value or isinstance(value, (list, tuple)): if not value or not [v for v in value if v not in self.empty_values]: if self.required: raise ValidationError(self.error_messages['required'], code='required') else: return self.compress([]) else: raise ValidationError(self.error_messages['invalid'], code='invalid') for i, field in enumerate(self.fields): try: field_value = value[i] except IndexError: field_value = None if field_value in self.empty_values: if self.require_all_fields: # Raise a 'required' error if the MultiValueField is # required and any field is empty. if self.required: raise ValidationError(self.error_messages['required'], code='required') elif field.required: # Otherwise, add an 'incomplete' error to the list of # collected errors and skip field cleaning, if a required # field is empty. if field.error_messages['incomplete'] not in errors: errors.append(field.error_messages['incomplete']) continue try: clean_data.append(field.clean(field_value)) except ValidationError as e: # Collect all validation errors in a single list, which we'll # raise at the end of clean(), rather than raising a single # exception for the first error we encounter. Skip duplicates. errors.extend(m for m in e.error_list if m not in errors) if errors: raise ValidationError(errors) out = self.compress(clean_data) self.validate(out) self.run_validators(out) return out def compress(self, data_list): """ Return a single value for the given list of values. The values can be assumed to be valid. For example, if this MultiValueField was instantiated with fields=(DateField(), TimeField()), this might return a datetime object created by combining the date and time in data_list. """ raise NotImplementedError('Subclasses must implement this method.') def has_changed(self, initial, data): if self.disabled: return False if initial is None: initial = ['' for x in range(0, len(data))] else: if not isinstance(initial, list): initial = self.widget.decompress(initial) for field, initial, data in zip(self.fields, initial, data): try: initial = field.to_python(initial) except ValidationError: return True if field.has_changed(initial, data): return True return False class FilePathField(ChoiceField): def __init__(self, path, *, match=None, recursive=False, allow_files=True, allow_folders=False, **kwargs): self.path, self.match, self.recursive = path, match, recursive self.allow_files, self.allow_folders = allow_files, allow_folders super().__init__(choices=(), **kwargs) if self.required: self.choices = [] else: self.choices = [("", "---------")] if self.match is not None: self.match_re = re.compile(self.match) if recursive: for root, dirs, files in sorted(os.walk(self.path)): if self.allow_files: for f in sorted(files): if self.match is None or self.match_re.search(f): f = os.path.join(root, f) self.choices.append((f, f.replace(path, "", 1))) if self.allow_folders: for f in sorted(dirs): if f == '__pycache__': continue if self.match is None or self.match_re.search(f): f = os.path.join(root, f) self.choices.append((f, f.replace(path, "", 1))) else: choices = [] with os.scandir(self.path) as entries: for f in entries: if f.name == '__pycache__': continue if (( (self.allow_files and f.is_file()) or (self.allow_folders and f.is_dir()) ) and (self.match is None or self.match_re.search(f.name))): choices.append((f.path, f.name)) choices.sort(key=operator.itemgetter(1)) self.choices.extend(choices) self.widget.choices = self.choices class SplitDateTimeField(MultiValueField): widget = SplitDateTimeWidget hidden_widget = SplitHiddenDateTimeWidget default_error_messages = { 'invalid_date': _('Enter a valid date.'), 'invalid_time': _('Enter a valid time.'), } def __init__(self, *, input_date_formats=None, input_time_formats=None, **kwargs): errors = self.default_error_messages.copy() if 'error_messages' in kwargs: errors.update(kwargs['error_messages']) localize = kwargs.get('localize', False) fields = ( DateField(input_formats=input_date_formats, error_messages={'invalid': errors['invalid_date']}, localize=localize), TimeField(input_formats=input_time_formats, error_messages={'invalid': errors['invalid_time']}, localize=localize), ) super().__init__(fields, **kwargs) def compress(self, data_list): if data_list: # Raise a validation error if time or date is empty # (possible if SplitDateTimeField has required=False). if data_list[0] in self.empty_values: raise ValidationError(self.error_messages['invalid_date'], code='invalid_date') if data_list[1] in self.empty_values: raise ValidationError(self.error_messages['invalid_time'], code='invalid_time') result = datetime.datetime.combine(*data_list) return from_current_timezone(result) return None class GenericIPAddressField(CharField): def __init__(self, *, protocol='both', unpack_ipv4=False, **kwargs): self.unpack_ipv4 = unpack_ipv4 self.default_validators = validators.ip_address_validators(protocol, unpack_ipv4)[0] super().__init__(**kwargs) def to_python(self, value): if value in self.empty_values: return '' value = value.strip() if value and ':' in value: return clean_ipv6_address(value, self.unpack_ipv4) return value class SlugField(CharField): default_validators = [validators.validate_slug] def __init__(self, *, allow_unicode=False, **kwargs): self.allow_unicode = allow_unicode if self.allow_unicode: self.default_validators = [validators.validate_unicode_slug] super().__init__(**kwargs) class UUIDField(CharField): default_error_messages = { 'invalid': _('Enter a valid UUID.'), } def prepare_value(self, value): if isinstance(value, uuid.UUID): return str(value) return value def to_python(self, value): value = super().to_python(value) if value in self.empty_values: return None if not isinstance(value, uuid.UUID): try: value = uuid.UUID(value) except ValueError: raise ValidationError(self.error_messages['invalid'], code='invalid') return value class InvalidJSONInput(str): pass class JSONString(str): pass class JSONField(CharField): default_error_messages = { 'invalid': _('Enter a valid JSON.'), } widget = Textarea def __init__(self, encoder=None, decoder=None, **kwargs): self.encoder = encoder self.decoder = decoder super().__init__(**kwargs) def to_python(self, value): if self.disabled: return value if value in self.empty_values: return None elif isinstance(value, (list, dict, int, float, JSONString)): return value try: converted = json.loads(value, cls=self.decoder) except json.JSONDecodeError: raise ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) if isinstance(converted, str): return JSONString(converted) else: return converted def bound_data(self, data, initial): if self.disabled: return initial if data is None: return None try: return json.loads(data, cls=self.decoder) except json.JSONDecodeError: return InvalidJSONInput(data) def prepare_value(self, value): if isinstance(value, InvalidJSONInput): return value return json.dumps(value, ensure_ascii=False, cls=self.encoder) def has_changed(self, initial, data): if super().has_changed(initial, data): return True # For purposes of seeing whether something has changed, True isn't the # same as 1 and the order of keys doesn't matter. return ( json.dumps(initial, sort_keys=True, cls=self.encoder) != json.dumps(self.to_python(data), sort_keys=True, cls=self.encoder) )
8613b4bd6eb12c7bc6f363222dbc6dbd32e122ea93eeea10b1e57c6e8d4b7c30
""" Form classes """ import copy import datetime from django.core.exceptions import NON_FIELD_ERRORS, ValidationError from django.forms.fields import Field, FileField from django.forms.utils import ErrorDict, ErrorList from django.forms.widgets import Media, MediaDefiningClass from django.utils.datastructures import MultiValueDict from django.utils.functional import cached_property from django.utils.html import conditional_escape, html_safe from django.utils.safestring import mark_safe from django.utils.translation import gettext as _ from .renderers import get_default_renderer __all__ = ('BaseForm', 'Form') class DeclarativeFieldsMetaclass(MediaDefiningClass): """Collect Fields declared on the base classes.""" def __new__(mcs, name, bases, attrs): # Collect fields from current class and remove them from attrs. attrs['declared_fields'] = { key: attrs.pop(key) for key, value in list(attrs.items()) if isinstance(value, Field) } new_class = super().__new__(mcs, name, bases, attrs) # Walk through the MRO. declared_fields = {} for base in reversed(new_class.__mro__): # Collect fields from base class. if hasattr(base, 'declared_fields'): declared_fields.update(base.declared_fields) # Field shadowing. for attr, value in base.__dict__.items(): if value is None and attr in declared_fields: declared_fields.pop(attr) new_class.base_fields = declared_fields new_class.declared_fields = declared_fields return new_class @html_safe class BaseForm: """ The main implementation of all the Form logic. Note that this class is different than Form. See the comments by the Form class for more info. Any improvements to the form API should be made to this class, not to the Form class. """ default_renderer = None field_order = None prefix = None use_required_attribute = True def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None, initial=None, error_class=ErrorList, label_suffix=None, empty_permitted=False, field_order=None, use_required_attribute=None, renderer=None): self.is_bound = data is not None or files is not None self.data = MultiValueDict() if data is None else data self.files = MultiValueDict() if files is None else files self.auto_id = auto_id if prefix is not None: self.prefix = prefix self.initial = initial or {} self.error_class = error_class # Translators: This is the default suffix added to form field labels self.label_suffix = label_suffix if label_suffix is not None else _(':') self.empty_permitted = empty_permitted self._errors = None # Stores the errors after clean() has been called. # The base_fields class attribute is the *class-wide* definition of # fields. Because a particular *instance* of the class might want to # alter self.fields, we create self.fields here by copying base_fields. # Instances should always modify self.fields; they should not modify # self.base_fields. self.fields = copy.deepcopy(self.base_fields) self._bound_fields_cache = {} self.order_fields(self.field_order if field_order is None else field_order) if use_required_attribute is not None: self.use_required_attribute = use_required_attribute if self.empty_permitted and self.use_required_attribute: raise ValueError( 'The empty_permitted and use_required_attribute arguments may ' 'not both be True.' ) # Initialize form renderer. Use a global default if not specified # either as an argument or as self.default_renderer. if renderer is None: if self.default_renderer is None: renderer = get_default_renderer() else: renderer = self.default_renderer if isinstance(self.default_renderer, type): renderer = renderer() self.renderer = renderer def order_fields(self, field_order): """ Rearrange the fields according to field_order. field_order is a list of field names specifying the order. Append fields not included in the list in the default order for backward compatibility with subclasses not overriding field_order. If field_order is None, keep all fields in the order defined in the class. Ignore unknown fields in field_order to allow disabling fields in form subclasses without redefining ordering. """ if field_order is None: return fields = {} for key in field_order: try: fields[key] = self.fields.pop(key) except KeyError: # ignore unknown fields pass fields.update(self.fields) # add remaining fields in original order self.fields = fields def __str__(self): return self.as_table() def __repr__(self): if self._errors is None: is_valid = "Unknown" else: is_valid = self.is_bound and not self._errors return '<%(cls)s bound=%(bound)s, valid=%(valid)s, fields=(%(fields)s)>' % { 'cls': self.__class__.__name__, 'bound': self.is_bound, 'valid': is_valid, 'fields': ';'.join(self.fields), } def _bound_items(self): """Yield (name, bf) pairs, where bf is a BoundField object.""" for name in self.fields: yield name, self[name] def __iter__(self): """Yield the form's fields as BoundField objects.""" for name in self.fields: yield self[name] def __getitem__(self, name): """Return a BoundField with the given name.""" try: return self._bound_fields_cache[name] except KeyError: pass try: field = self.fields[name] except KeyError: raise KeyError( "Key '%s' not found in '%s'. Choices are: %s." % ( name, self.__class__.__name__, ', '.join(sorted(self.fields)), ) ) bound_field = field.get_bound_field(self, name) self._bound_fields_cache[name] = bound_field return bound_field @property def errors(self): """Return an ErrorDict for the data provided for the form.""" if self._errors is None: self.full_clean() return self._errors def is_valid(self): """Return True if the form has no errors, or False otherwise.""" return self.is_bound and not self.errors def add_prefix(self, field_name): """ Return the field name with a prefix appended, if this Form has a prefix set. Subclasses may wish to override. """ return '%s-%s' % (self.prefix, field_name) if self.prefix else field_name def add_initial_prefix(self, field_name): """Add an 'initial' prefix for checking dynamic initial values.""" return 'initial-%s' % self.add_prefix(field_name) def _widget_data_value(self, widget, html_name): # value_from_datadict() gets the data from the data dictionaries. # Each widget type knows how to retrieve its own data, because some # widgets split data over several HTML fields. return widget.value_from_datadict(self.data, self.files, html_name) def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row): "Output HTML. Used by as_table(), as_ul(), as_p()." # Errors that should be displayed above all fields. top_errors = self.non_field_errors().copy() output, hidden_fields = [], [] for name, bf in self._bound_items(): field = bf.field html_class_attr = '' bf_errors = self.error_class(bf.errors) if bf.is_hidden: if bf_errors: top_errors.extend( [_('(Hidden field %(name)s) %(error)s') % {'name': name, 'error': str(e)} for e in bf_errors]) hidden_fields.append(str(bf)) else: # Create a 'class="..."' attribute if the row should have any # CSS classes applied. css_classes = bf.css_classes() if css_classes: html_class_attr = ' class="%s"' % css_classes if errors_on_separate_row and bf_errors: output.append(error_row % str(bf_errors)) if bf.label: label = conditional_escape(bf.label) label = bf.label_tag(label) or '' else: label = '' if field.help_text: help_text = help_text_html % field.help_text else: help_text = '' output.append(normal_row % { 'errors': bf_errors, 'label': label, 'field': bf, 'help_text': help_text, 'html_class_attr': html_class_attr, 'css_classes': css_classes, 'field_name': bf.html_name, }) if top_errors: output.insert(0, error_row % top_errors) if hidden_fields: # Insert any hidden fields in the last row. str_hidden = ''.join(hidden_fields) if output: last_row = output[-1] # Chop off the trailing row_ender (e.g. '</td></tr>') and # insert the hidden fields. if not last_row.endswith(row_ender): # This can happen in the as_p() case (and possibly others # that users write): if there are only top errors, we may # not be able to conscript the last row for our purposes, # so insert a new, empty row. last_row = (normal_row % { 'errors': '', 'label': '', 'field': '', 'help_text': '', 'html_class_attr': html_class_attr, 'css_classes': '', 'field_name': '', }) output.append(last_row) output[-1] = last_row[:-len(row_ender)] + str_hidden + row_ender else: # If there aren't any rows in the output, just append the # hidden fields. output.append(str_hidden) return mark_safe('\n'.join(output)) def as_table(self): "Return this form rendered as HTML <tr>s -- excluding the <table></table>." return self._html_output( normal_row='<tr%(html_class_attr)s><th>%(label)s</th><td>%(errors)s%(field)s%(help_text)s</td></tr>', error_row='<tr><td colspan="2">%s</td></tr>', row_ender='</td></tr>', help_text_html='<br><span class="helptext">%s</span>', errors_on_separate_row=False, ) def as_ul(self): "Return this form rendered as HTML <li>s -- excluding the <ul></ul>." return self._html_output( normal_row='<li%(html_class_attr)s>%(errors)s%(label)s %(field)s%(help_text)s</li>', error_row='<li>%s</li>', row_ender='</li>', help_text_html=' <span class="helptext">%s</span>', errors_on_separate_row=False, ) def as_p(self): "Return this form rendered as HTML <p>s." return self._html_output( normal_row='<p%(html_class_attr)s>%(label)s %(field)s%(help_text)s</p>', error_row='%s', row_ender='</p>', help_text_html=' <span class="helptext">%s</span>', errors_on_separate_row=True, ) def non_field_errors(self): """ Return an ErrorList of errors that aren't associated with a particular field -- i.e., from Form.clean(). Return an empty ErrorList if there are none. """ return self.errors.get(NON_FIELD_ERRORS, self.error_class(error_class='nonfield')) def add_error(self, field, error): """ Update the content of `self._errors`. The `field` argument is the name of the field to which the errors should be added. If it's None, treat the errors as NON_FIELD_ERRORS. The `error` argument can be a single error, a list of errors, or a dictionary that maps field names to lists of errors. An "error" can be either a simple string or an instance of ValidationError with its message attribute set and a "list or dictionary" can be an actual `list` or `dict` or an instance of ValidationError with its `error_list` or `error_dict` attribute set. If `error` is a dictionary, the `field` argument *must* be None and errors will be added to the fields that correspond to the keys of the dictionary. """ if not isinstance(error, ValidationError): # Normalize to ValidationError and let its constructor # do the hard work of making sense of the input. error = ValidationError(error) if hasattr(error, 'error_dict'): if field is not None: raise TypeError( "The argument `field` must be `None` when the `error` " "argument contains errors for multiple fields." ) else: error = error.error_dict else: error = {field or NON_FIELD_ERRORS: error.error_list} for field, error_list in error.items(): if field not in self.errors: if field != NON_FIELD_ERRORS and field not in self.fields: raise ValueError( "'%s' has no field named '%s'." % (self.__class__.__name__, field)) if field == NON_FIELD_ERRORS: self._errors[field] = self.error_class(error_class='nonfield') else: self._errors[field] = self.error_class() self._errors[field].extend(error_list) if field in self.cleaned_data: del self.cleaned_data[field] def has_error(self, field, code=None): return field in self.errors and ( code is None or any(error.code == code for error in self.errors.as_data()[field]) ) def full_clean(self): """ Clean all of self.data and populate self._errors and self.cleaned_data. """ self._errors = ErrorDict() if not self.is_bound: # Stop further processing. return self.cleaned_data = {} # If the form is permitted to be empty, and none of the form data has # changed from the initial data, short circuit any validation. if self.empty_permitted and not self.has_changed(): return self._clean_fields() self._clean_form() self._post_clean() def _clean_fields(self): for name, bf in self._bound_items(): field = bf.field value = bf.initial if field.disabled else bf.data try: if isinstance(field, FileField): value = field.clean(value, bf.initial) else: value = field.clean(value) self.cleaned_data[name] = value if hasattr(self, 'clean_%s' % name): value = getattr(self, 'clean_%s' % name)() self.cleaned_data[name] = value except ValidationError as e: self.add_error(name, e) def _clean_form(self): try: cleaned_data = self.clean() except ValidationError as e: self.add_error(None, e) else: if cleaned_data is not None: self.cleaned_data = cleaned_data def _post_clean(self): """ An internal hook for performing additional cleaning after form cleaning is complete. Used for model validation in model forms. """ pass def clean(self): """ Hook for doing any extra form-wide cleaning after Field.clean() has been called on every field. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field named '__all__'. """ return self.cleaned_data def has_changed(self): """Return True if data differs from initial.""" return bool(self.changed_data) @cached_property def changed_data(self): return [name for name, bf in self._bound_items() if bf._has_changed()] @property def media(self): """Return all media required to render the widgets on this form.""" media = Media() for field in self.fields.values(): media = media + field.widget.media return media def is_multipart(self): """ Return True if the form needs to be multipart-encoded, i.e. it has FileInput, or False otherwise. """ return any(field.widget.needs_multipart_form for field in self.fields.values()) def hidden_fields(self): """ Return a list of all the BoundField objects that are hidden fields. Useful for manual form layout in templates. """ return [field for field in self if field.is_hidden] def visible_fields(self): """ Return a list of BoundField objects that aren't hidden fields. The opposite of the hidden_fields() method. """ return [field for field in self if not field.is_hidden] def get_initial_for_field(self, field, field_name): """ Return initial data for field on form. Use initial data from the form or the field, in that order. Evaluate callable values. """ value = self.initial.get(field_name, field.initial) if callable(value): value = value() # If this is an auto-generated default date, nix the microseconds # for standardized handling. See #22502. if (isinstance(value, (datetime.datetime, datetime.time)) and not field.widget.supports_microseconds): value = value.replace(microsecond=0) return value class Form(BaseForm, metaclass=DeclarativeFieldsMetaclass): "A collection of Fields, plus their associated data." # This is a separate class from BaseForm in order to abstract the way # self.fields is specified. This class (Form) is the one that does the # fancy metaclass stuff purely for the semantic sugar -- it allows one # to define a form using declarative syntax. # BaseForm itself has no way of designating self.fields.
b7d3ebcab76ffeb153a32194ba7a2454d726b8fa08fece9b410f29709046e60f
""" Functions for creating and restoring url-safe signed JSON objects. The format used looks like this: >>> signing.dumps("hello") 'ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk' There are two components here, separated by a ':'. The first component is a URLsafe base64 encoded JSON of the object passed to dumps(). The second component is a base64 encoded hmac/SHA1 hash of "$first_component:$secret" signing.loads(s) checks the signature and returns the deserialized object. If the signature fails, a BadSignature exception is raised. >>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk") 'hello' >>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified") ... BadSignature: Signature failed: ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified You can optionally compress the JSON prior to base64 encoding it to save space, using the compress=True argument. This checks if compression actually helps and only applies compression if the result is a shorter string: >>> signing.dumps(list(range(1, 20)), compress=True) '.eJwFwcERACAIwLCF-rCiILN47r-GyZVJsNgkxaFxoDgxcOHGxMKD_T7vhAml:1QaUaL:BA0thEZrp4FQVXIXuOvYJtLJSrQ' The fact that the string is compressed is signalled by the prefixed '.' at the start of the base64 JSON. There are 65 url-safe characters: the 64 used by url-safe base64 and the ':'. These functions make use of all of them. """ import base64 import datetime import json import time import zlib from django.conf import settings from django.utils.crypto import constant_time_compare, salted_hmac from django.utils.encoding import force_bytes from django.utils.module_loading import import_string from django.utils.regex_helper import _lazy_re_compile _SEP_UNSAFE = _lazy_re_compile(r'^[A-z0-9-_=]*$') BASE62_ALPHABET = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' class BadSignature(Exception): """Signature does not match.""" pass class SignatureExpired(BadSignature): """Signature timestamp is older than required max_age.""" pass def b62_encode(s): if s == 0: return '0' sign = '-' if s < 0 else '' s = abs(s) encoded = '' while s > 0: s, remainder = divmod(s, 62) encoded = BASE62_ALPHABET[remainder] + encoded return sign + encoded def b62_decode(s): if s == '0': return 0 sign = 1 if s[0] == '-': s = s[1:] sign = -1 decoded = 0 for digit in s: decoded = decoded * 62 + BASE62_ALPHABET.index(digit) return sign * decoded def b64_encode(s): return base64.urlsafe_b64encode(s).strip(b'=') def b64_decode(s): pad = b'=' * (-len(s) % 4) return base64.urlsafe_b64decode(s + pad) def base64_hmac(salt, value, key, algorithm='sha1'): return b64_encode(salted_hmac(salt, value, key, algorithm=algorithm).digest()).decode() def get_cookie_signer(salt='django.core.signing.get_cookie_signer'): Signer = import_string(settings.SIGNING_BACKEND) key = force_bytes(settings.SECRET_KEY) # SECRET_KEY may be str or bytes. return Signer(b'django.http.cookies' + key, salt=salt) class JSONSerializer: """ Simple wrapper around json to be used in signing.dumps and signing.loads. """ def dumps(self, obj): return json.dumps(obj, separators=(',', ':')).encode('latin-1') def loads(self, data): return json.loads(data.decode('latin-1')) def dumps(obj, key=None, salt='django.core.signing', serializer=JSONSerializer, compress=False): """ Return URL-safe, hmac signed base64 compressed JSON string. If key is None, use settings.SECRET_KEY instead. The hmac algorithm is the default Signer algorithm. If compress is True (not the default), check if compressing using zlib can save some space. Prepend a '.' to signify compression. This is included in the signature, to protect against zip bombs. Salt can be used to namespace the hash, so that a signed string is only valid for a given namespace. Leaving this at the default value or re-using a salt value across different parts of your application without good cause is a security risk. The serializer is expected to return a bytestring. """ return TimestampSigner(key, salt=salt).sign_object(obj, serializer=serializer, compress=compress) def loads(s, key=None, salt='django.core.signing', serializer=JSONSerializer, max_age=None): """ Reverse of dumps(), raise BadSignature if signature fails. The serializer is expected to accept a bytestring. """ return TimestampSigner(key, salt=salt).unsign_object(s, serializer=serializer, max_age=max_age) class Signer: def __init__(self, key=None, sep=':', salt=None, algorithm=None): self.key = key or settings.SECRET_KEY self.sep = sep if _SEP_UNSAFE.match(self.sep): raise ValueError( 'Unsafe Signer separator: %r (cannot be empty or consist of ' 'only A-z0-9-_=)' % sep, ) self.salt = salt or '%s.%s' % (self.__class__.__module__, self.__class__.__name__) self.algorithm = algorithm or 'sha256' def signature(self, value): return base64_hmac(self.salt + 'signer', value, self.key, algorithm=self.algorithm) def sign(self, value): return '%s%s%s' % (value, self.sep, self.signature(value)) def unsign(self, signed_value): if self.sep not in signed_value: raise BadSignature('No "%s" found in value' % self.sep) value, sig = signed_value.rsplit(self.sep, 1) if constant_time_compare(sig, self.signature(value)): return value raise BadSignature('Signature "%s" does not match' % sig) def sign_object(self, obj, serializer=JSONSerializer, compress=False): """ Return URL-safe, hmac signed base64 compressed JSON string. If compress is True (not the default), check if compressing using zlib can save some space. Prepend a '.' to signify compression. This is included in the signature, to protect against zip bombs. The serializer is expected to return a bytestring. """ data = serializer().dumps(obj) # Flag for if it's been compressed or not. is_compressed = False if compress: # Avoid zlib dependency unless compress is being used. compressed = zlib.compress(data) if len(compressed) < (len(data) - 1): data = compressed is_compressed = True base64d = b64_encode(data).decode() if is_compressed: base64d = '.' + base64d return self.sign(base64d) def unsign_object(self, signed_obj, serializer=JSONSerializer, **kwargs): # Signer.unsign() returns str but base64 and zlib compression operate # on bytes. base64d = self.unsign(signed_obj, **kwargs).encode() decompress = base64d[:1] == b'.' if decompress: # It's compressed; uncompress it first. base64d = base64d[1:] data = b64_decode(base64d) if decompress: data = zlib.decompress(data) return serializer().loads(data) class TimestampSigner(Signer): def timestamp(self): return b62_encode(int(time.time())) def sign(self, value): value = '%s%s%s' % (value, self.sep, self.timestamp()) return super().sign(value) def unsign(self, value, max_age=None): """ Retrieve original value and check it wasn't signed more than max_age seconds ago. """ result = super().unsign(value) value, timestamp = result.rsplit(self.sep, 1) timestamp = b62_decode(timestamp) if max_age is not None: if isinstance(max_age, datetime.timedelta): max_age = max_age.total_seconds() # Check timestamp is not older than max_age age = time.time() - timestamp if age > max_age: raise SignatureExpired( 'Signature age %s > %s seconds' % (age, max_age)) return value
938172d2870fc8e293bb61256ce489b9541cae4021abb58fc60930c69707d5ad
import ipaddress import re import warnings from pathlib import Path from urllib.parse import urlsplit, urlunsplit from django.core.exceptions import ValidationError from django.utils.deconstruct import deconstructible from django.utils.deprecation import RemovedInDjango41Warning from django.utils.encoding import punycode from django.utils.ipv6 import is_valid_ipv6_address from django.utils.regex_helper import _lazy_re_compile from django.utils.translation import gettext_lazy as _, ngettext_lazy # These values, if given to validate(), will trigger the self.required check. EMPTY_VALUES = (None, '', [], (), {}) @deconstructible class RegexValidator: regex = '' message = _('Enter a valid value.') code = 'invalid' inverse_match = False flags = 0 def __init__(self, regex=None, message=None, code=None, inverse_match=None, flags=None): if regex is not None: self.regex = regex if message is not None: self.message = message if code is not None: self.code = code if inverse_match is not None: self.inverse_match = inverse_match if flags is not None: self.flags = flags if self.flags and not isinstance(self.regex, str): raise TypeError("If the flags are set, regex must be a regular expression string.") self.regex = _lazy_re_compile(self.regex, self.flags) def __call__(self, value): """ Validate that the input contains (or does *not* contain, if inverse_match is True) a match for the regular expression. """ regex_matches = self.regex.search(str(value)) invalid_input = regex_matches if self.inverse_match else not regex_matches if invalid_input: raise ValidationError(self.message, code=self.code, params={'value': value}) def __eq__(self, other): return ( isinstance(other, RegexValidator) and self.regex.pattern == other.regex.pattern and self.regex.flags == other.regex.flags and (self.message == other.message) and (self.code == other.code) and (self.inverse_match == other.inverse_match) ) @deconstructible class URLValidator(RegexValidator): ul = '\u00a1-\uffff' # Unicode letters range (must not be a raw string). # IP patterns ipv4_re = r'(?:0|25[0-5]|2[0-4]\d|1\d?\d?|[1-9]\d?)(?:\.(?:0|25[0-5]|2[0-4]\d|1\d?\d?|[1-9]\d?)){3}' ipv6_re = r'\[[0-9a-f:.]+\]' # (simple regex, validated later) # Host patterns hostname_re = r'[a-z' + ul + r'0-9](?:[a-z' + ul + r'0-9-]{0,61}[a-z' + ul + r'0-9])?' # Max length for domain name labels is 63 characters per RFC 1034 sec. 3.1 domain_re = r'(?:\.(?!-)[a-z' + ul + r'0-9-]{1,63}(?<!-))*' tld_re = ( r'\.' # dot r'(?!-)' # can't start with a dash r'(?:[a-z' + ul + '-]{2,63}' # domain label r'|xn--[a-z0-9]{1,59})' # or punycode label r'(?<!-)' # can't end with a dash r'\.?' # may have a trailing dot ) host_re = '(' + hostname_re + domain_re + tld_re + '|localhost)' regex = _lazy_re_compile( r'^(?:[a-z0-9.+-]*)://' # scheme is validated separately r'(?:[^\s:@/]+(?::[^\s:@/]*)?@)?' # user:pass authentication r'(?:' + ipv4_re + '|' + ipv6_re + '|' + host_re + ')' r'(?::\d{1,5})?' # port r'(?:[/?#][^\s]*)?' # resource path r'\Z', re.IGNORECASE) message = _('Enter a valid URL.') schemes = ['http', 'https', 'ftp', 'ftps'] unsafe_chars = frozenset('\t\r\n') def __init__(self, schemes=None, **kwargs): super().__init__(**kwargs) if schemes is not None: self.schemes = schemes def __call__(self, value): if not isinstance(value, str): raise ValidationError(self.message, code=self.code, params={'value': value}) if self.unsafe_chars.intersection(value): raise ValidationError(self.message, code=self.code, params={'value': value}) # Check if the scheme is valid. scheme = value.split('://')[0].lower() if scheme not in self.schemes: raise ValidationError(self.message, code=self.code, params={'value': value}) # Then check full URL try: super().__call__(value) except ValidationError as e: # Trivial case failed. Try for possible IDN domain if value: try: scheme, netloc, path, query, fragment = urlsplit(value) except ValueError: # for example, "Invalid IPv6 URL" raise ValidationError(self.message, code=self.code, params={'value': value}) try: netloc = punycode(netloc) # IDN -> ACE except UnicodeError: # invalid domain part raise e url = urlunsplit((scheme, netloc, path, query, fragment)) super().__call__(url) else: raise else: # Now verify IPv6 in the netloc part host_match = re.search(r'^\[(.+)\](?::\d{1,5})?$', urlsplit(value).netloc) if host_match: potential_ip = host_match[1] try: validate_ipv6_address(potential_ip) except ValidationError: raise ValidationError(self.message, code=self.code, params={'value': value}) # The maximum length of a full host name is 253 characters per RFC 1034 # section 3.1. It's defined to be 255 bytes or less, but this includes # one byte for the length of the name and one byte for the trailing dot # that's used to indicate absolute names in DNS. if len(urlsplit(value).hostname) > 253: raise ValidationError(self.message, code=self.code, params={'value': value}) integer_validator = RegexValidator( _lazy_re_compile(r'^-?\d+\Z'), message=_('Enter a valid integer.'), code='invalid', ) def validate_integer(value): return integer_validator(value) @deconstructible class EmailValidator: message = _('Enter a valid email address.') code = 'invalid' user_regex = _lazy_re_compile( r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\Z" # dot-atom r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)', # quoted-string re.IGNORECASE) domain_regex = _lazy_re_compile( # max length for domain name labels is 63 characters per RFC 1034 r'((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+)(?:[A-Z0-9-]{2,63}(?<!-))\Z', re.IGNORECASE) literal_regex = _lazy_re_compile( # literal form, ipv4 or ipv6 address (SMTP 4.1.3) r'\[([A-f0-9:.]+)\]\Z', re.IGNORECASE) domain_allowlist = ['localhost'] @property def domain_whitelist(self): warnings.warn( 'The domain_whitelist attribute is deprecated in favor of ' 'domain_allowlist.', RemovedInDjango41Warning, stacklevel=2, ) return self.domain_allowlist @domain_whitelist.setter def domain_whitelist(self, allowlist): warnings.warn( 'The domain_whitelist attribute is deprecated in favor of ' 'domain_allowlist.', RemovedInDjango41Warning, stacklevel=2, ) self.domain_allowlist = allowlist def __init__(self, message=None, code=None, allowlist=None, *, whitelist=None): if whitelist is not None: allowlist = whitelist warnings.warn( 'The whitelist argument is deprecated in favor of allowlist.', RemovedInDjango41Warning, stacklevel=2, ) if message is not None: self.message = message if code is not None: self.code = code if allowlist is not None: self.domain_allowlist = allowlist def __call__(self, value): if not value or '@' not in value: raise ValidationError(self.message, code=self.code, params={'value': value}) user_part, domain_part = value.rsplit('@', 1) if not self.user_regex.match(user_part): raise ValidationError(self.message, code=self.code, params={'value': value}) if (domain_part not in self.domain_allowlist and not self.validate_domain_part(domain_part)): # Try for possible IDN domain-part try: domain_part = punycode(domain_part) except UnicodeError: pass else: if self.validate_domain_part(domain_part): return raise ValidationError(self.message, code=self.code, params={'value': value}) def validate_domain_part(self, domain_part): if self.domain_regex.match(domain_part): return True literal_match = self.literal_regex.match(domain_part) if literal_match: ip_address = literal_match[1] try: validate_ipv46_address(ip_address) return True except ValidationError: pass return False def __eq__(self, other): return ( isinstance(other, EmailValidator) and (self.domain_allowlist == other.domain_allowlist) and (self.message == other.message) and (self.code == other.code) ) validate_email = EmailValidator() slug_re = _lazy_re_compile(r'^[-a-zA-Z0-9_]+\Z') validate_slug = RegexValidator( slug_re, # Translators: "letters" means latin letters: a-z and A-Z. _('Enter a valid “slug” consisting of letters, numbers, underscores or hyphens.'), 'invalid' ) slug_unicode_re = _lazy_re_compile(r'^[-\w]+\Z') validate_unicode_slug = RegexValidator( slug_unicode_re, _('Enter a valid “slug” consisting of Unicode letters, numbers, underscores, or hyphens.'), 'invalid' ) def validate_ipv4_address(value): try: ipaddress.IPv4Address(value) except ValueError: raise ValidationError(_('Enter a valid IPv4 address.'), code='invalid', params={'value': value}) else: # Leading zeros are forbidden to avoid ambiguity with the octal # notation. This restriction is included in Python 3.9.5+. # TODO: Remove when dropping support for PY39. if any( octet != '0' and octet[0] == '0' for octet in value.split('.') ): raise ValidationError( _('Enter a valid IPv4 address.'), code='invalid', params={'value': value}, ) def validate_ipv6_address(value): if not is_valid_ipv6_address(value): raise ValidationError(_('Enter a valid IPv6 address.'), code='invalid', params={'value': value}) def validate_ipv46_address(value): try: validate_ipv4_address(value) except ValidationError: try: validate_ipv6_address(value) except ValidationError: raise ValidationError(_('Enter a valid IPv4 or IPv6 address.'), code='invalid', params={'value': value}) ip_address_validator_map = { 'both': ([validate_ipv46_address], _('Enter a valid IPv4 or IPv6 address.')), 'ipv4': ([validate_ipv4_address], _('Enter a valid IPv4 address.')), 'ipv6': ([validate_ipv6_address], _('Enter a valid IPv6 address.')), } def ip_address_validators(protocol, unpack_ipv4): """ Depending on the given parameters, return the appropriate validators for the GenericIPAddressField. """ if protocol != 'both' and unpack_ipv4: raise ValueError( "You can only use `unpack_ipv4` if `protocol` is set to 'both'") try: return ip_address_validator_map[protocol.lower()] except KeyError: raise ValueError("The protocol '%s' is unknown. Supported: %s" % (protocol, list(ip_address_validator_map))) def int_list_validator(sep=',', message=None, code='invalid', allow_negative=False): regexp = _lazy_re_compile(r'^%(neg)s\d+(?:%(sep)s%(neg)s\d+)*\Z' % { 'neg': '(-)?' if allow_negative else '', 'sep': re.escape(sep), }) return RegexValidator(regexp, message=message, code=code) validate_comma_separated_integer_list = int_list_validator( message=_('Enter only digits separated by commas.'), ) @deconstructible class BaseValidator: message = _('Ensure this value is %(limit_value)s (it is %(show_value)s).') code = 'limit_value' def __init__(self, limit_value, message=None): self.limit_value = limit_value if message: self.message = message def __call__(self, value): cleaned = self.clean(value) limit_value = self.limit_value() if callable(self.limit_value) else self.limit_value params = {'limit_value': limit_value, 'show_value': cleaned, 'value': value} if self.compare(cleaned, limit_value): raise ValidationError(self.message, code=self.code, params=params) def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return ( self.limit_value == other.limit_value and self.message == other.message and self.code == other.code ) def compare(self, a, b): return a is not b def clean(self, x): return x @deconstructible class MaxValueValidator(BaseValidator): message = _('Ensure this value is less than or equal to %(limit_value)s.') code = 'max_value' def compare(self, a, b): return a > b @deconstructible class MinValueValidator(BaseValidator): message = _('Ensure this value is greater than or equal to %(limit_value)s.') code = 'min_value' def compare(self, a, b): return a < b @deconstructible class MinLengthValidator(BaseValidator): message = ngettext_lazy( 'Ensure this value has at least %(limit_value)d character (it has %(show_value)d).', 'Ensure this value has at least %(limit_value)d characters (it has %(show_value)d).', 'limit_value') code = 'min_length' def compare(self, a, b): return a < b def clean(self, x): return len(x) @deconstructible class MaxLengthValidator(BaseValidator): message = ngettext_lazy( 'Ensure this value has at most %(limit_value)d character (it has %(show_value)d).', 'Ensure this value has at most %(limit_value)d characters (it has %(show_value)d).', 'limit_value') code = 'max_length' def compare(self, a, b): return a > b def clean(self, x): return len(x) @deconstructible class DecimalValidator: """ Validate that the input does not exceed the maximum number of digits expected, otherwise raise ValidationError. """ messages = { 'invalid': _('Enter a number.'), 'max_digits': ngettext_lazy( 'Ensure that there are no more than %(max)s digit in total.', 'Ensure that there are no more than %(max)s digits in total.', 'max' ), 'max_decimal_places': ngettext_lazy( 'Ensure that there are no more than %(max)s decimal place.', 'Ensure that there are no more than %(max)s decimal places.', 'max' ), 'max_whole_digits': ngettext_lazy( 'Ensure that there are no more than %(max)s digit before the decimal point.', 'Ensure that there are no more than %(max)s digits before the decimal point.', 'max' ), } def __init__(self, max_digits, decimal_places): self.max_digits = max_digits self.decimal_places = decimal_places def __call__(self, value): digit_tuple, exponent = value.as_tuple()[1:] if exponent in {'F', 'n', 'N'}: raise ValidationError(self.messages['invalid'], code='invalid', params={'value': value}) if exponent >= 0: # A positive exponent adds that many trailing zeros. digits = len(digit_tuple) + exponent decimals = 0 else: # If the absolute value of the negative exponent is larger than the # number of digits, then it's the same as the number of digits, # because it'll consume all of the digits in digit_tuple and then # add abs(exponent) - len(digit_tuple) leading zeros after the # decimal point. if abs(exponent) > len(digit_tuple): digits = decimals = abs(exponent) else: digits = len(digit_tuple) decimals = abs(exponent) whole_digits = digits - decimals if self.max_digits is not None and digits > self.max_digits: raise ValidationError( self.messages['max_digits'], code='max_digits', params={'max': self.max_digits, 'value': value}, ) if self.decimal_places is not None and decimals > self.decimal_places: raise ValidationError( self.messages['max_decimal_places'], code='max_decimal_places', params={'max': self.decimal_places, 'value': value}, ) if (self.max_digits is not None and self.decimal_places is not None and whole_digits > (self.max_digits - self.decimal_places)): raise ValidationError( self.messages['max_whole_digits'], code='max_whole_digits', params={'max': (self.max_digits - self.decimal_places), 'value': value}, ) def __eq__(self, other): return ( isinstance(other, self.__class__) and self.max_digits == other.max_digits and self.decimal_places == other.decimal_places ) @deconstructible class FileExtensionValidator: message = _( 'File extension “%(extension)s” is not allowed. ' 'Allowed extensions are: %(allowed_extensions)s.' ) code = 'invalid_extension' def __init__(self, allowed_extensions=None, message=None, code=None): if allowed_extensions is not None: allowed_extensions = [allowed_extension.lower() for allowed_extension in allowed_extensions] self.allowed_extensions = allowed_extensions if message is not None: self.message = message if code is not None: self.code = code def __call__(self, value): extension = Path(value.name).suffix[1:].lower() if self.allowed_extensions is not None and extension not in self.allowed_extensions: raise ValidationError( self.message, code=self.code, params={ 'extension': extension, 'allowed_extensions': ', '.join(self.allowed_extensions), 'value': value, } ) def __eq__(self, other): return ( isinstance(other, self.__class__) and self.allowed_extensions == other.allowed_extensions and self.message == other.message and self.code == other.code ) def get_available_image_extensions(): try: from PIL import Image except ImportError: return [] else: Image.init() return [ext.lower()[1:] for ext in Image.EXTENSION] def validate_image_file_extension(value): return FileExtensionValidator(allowed_extensions=get_available_image_extensions())(value) @deconstructible class ProhibitNullCharactersValidator: """Validate that the string doesn't contain the null character.""" message = _('Null characters are not allowed.') code = 'null_characters_not_allowed' def __init__(self, message=None, code=None): if message is not None: self.message = message if code is not None: self.code = code def __call__(self, value): if '\x00' in str(value): raise ValidationError(self.message, code=self.code, params={'value': value}) def __eq__(self, other): return ( isinstance(other, self.__class__) and self.message == other.message and self.code == other.code )
dc930bf48e0e628263906bc4d2d7d1e44f63dd558392b0f31d8a3763f2d62da0
""" Multi-part parsing for file uploads. Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to file upload handlers for processing. """ import base64 import binascii import cgi import collections import html from urllib.parse import unquote from django.conf import settings from django.core.exceptions import ( RequestDataTooBig, SuspiciousMultipartForm, TooManyFieldsSent, ) from django.core.files.uploadhandler import ( SkipFile, StopFutureHandlers, StopUpload, ) from django.utils.datastructures import MultiValueDict from django.utils.encoding import force_str __all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted') class MultiPartParserError(Exception): pass class InputStreamExhausted(Exception): """ No more reads are allowed from this device. """ pass RAW = "raw" FILE = "file" FIELD = "field" class MultiPartParser: """ A rfc2388 multipart/form-data parser. ``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``. """ def __init__(self, META, input_data, upload_handlers, encoding=None): """ Initialize the MultiPartParser object. :META: The standard ``META`` dictionary in Django request objects. :input_data: The raw post data, as a file-like object. :upload_handlers: A list of UploadHandler instances that perform operations on the uploaded data. :encoding: The encoding with which to treat the incoming data. """ # Content-Type should contain multipart and the boundary information. content_type = META.get('CONTENT_TYPE', '') if not content_type.startswith('multipart/'): raise MultiPartParserError('Invalid Content-Type: %s' % content_type) # Parse the header to get the boundary to split the parts. try: ctypes, opts = parse_header(content_type.encode('ascii')) except UnicodeEncodeError: raise MultiPartParserError('Invalid non-ASCII Content-Type in multipart: %s' % force_str(content_type)) boundary = opts.get('boundary') if not boundary or not cgi.valid_boundary(boundary): raise MultiPartParserError('Invalid boundary in multipart: %s' % force_str(boundary)) # Content-Length should contain the length of the body we are about # to receive. try: content_length = int(META.get('CONTENT_LENGTH', 0)) except (ValueError, TypeError): content_length = 0 if content_length < 0: # This means we shouldn't continue...raise an error. raise MultiPartParserError("Invalid content length: %r" % content_length) if isinstance(boundary, str): boundary = boundary.encode('ascii') self._boundary = boundary self._input_data = input_data # For compatibility with low-level network APIs (with 32-bit integers), # the chunk size should be < 2^31, but still divisible by 4. possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size] self._chunk_size = min([2 ** 31 - 4] + possible_sizes) self._meta = META self._encoding = encoding or settings.DEFAULT_CHARSET self._content_length = content_length self._upload_handlers = upload_handlers def parse(self): """ Parse the POST data and break it into a FILES MultiValueDict and a POST MultiValueDict. Return a tuple containing the POST and FILES dictionary, respectively. """ from django.http import QueryDict encoding = self._encoding handlers = self._upload_handlers # HTTP spec says that Content-Length >= 0 is valid # handling content-length == 0 before continuing if self._content_length == 0: return QueryDict(encoding=self._encoding), MultiValueDict() # See if any of the handlers take care of the parsing. # This allows overriding everything if need be. for handler in handlers: result = handler.handle_raw_input( self._input_data, self._meta, self._content_length, self._boundary, encoding, ) # Check to see if it was handled if result is not None: return result[0], result[1] # Create the data structures to be used later. self._post = QueryDict(mutable=True) self._files = MultiValueDict() # Instantiate the parser and stream: stream = LazyStream(ChunkIter(self._input_data, self._chunk_size)) # Whether or not to signal a file-completion at the beginning of the loop. old_field_name = None counters = [0] * len(handlers) # Number of bytes that have been read. num_bytes_read = 0 # To count the number of keys in the request. num_post_keys = 0 # To limit the amount of data read from the request. read_size = None # Whether a file upload is finished. uploaded_file = True try: for item_type, meta_data, field_stream in Parser(stream, self._boundary): if old_field_name: # We run this at the beginning of the next loop # since we cannot be sure a file is complete until # we hit the next boundary/part of the multipart content. self.handle_file_complete(old_field_name, counters) old_field_name = None uploaded_file = True try: disposition = meta_data['content-disposition'][1] field_name = disposition['name'].strip() except (KeyError, IndexError, AttributeError): continue transfer_encoding = meta_data.get('content-transfer-encoding') if transfer_encoding is not None: transfer_encoding = transfer_encoding[0].strip() field_name = force_str(field_name, encoding, errors='replace') if item_type == FIELD: # Avoid storing more than DATA_UPLOAD_MAX_NUMBER_FIELDS. num_post_keys += 1 if (settings.DATA_UPLOAD_MAX_NUMBER_FIELDS is not None and settings.DATA_UPLOAD_MAX_NUMBER_FIELDS < num_post_keys): raise TooManyFieldsSent( 'The number of GET/POST parameters exceeded ' 'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.' ) # Avoid reading more than DATA_UPLOAD_MAX_MEMORY_SIZE. if settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None: read_size = settings.DATA_UPLOAD_MAX_MEMORY_SIZE - num_bytes_read # This is a post field, we can just set it in the post if transfer_encoding == 'base64': raw_data = field_stream.read(size=read_size) num_bytes_read += len(raw_data) try: data = base64.b64decode(raw_data) except binascii.Error: data = raw_data else: data = field_stream.read(size=read_size) num_bytes_read += len(data) # Add two here to make the check consistent with the # x-www-form-urlencoded check that includes '&='. num_bytes_read += len(field_name) + 2 if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and num_bytes_read > settings.DATA_UPLOAD_MAX_MEMORY_SIZE): raise RequestDataTooBig('Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.') self._post.appendlist(field_name, force_str(data, encoding, errors='replace')) elif item_type == FILE: # This is a file, use the handler... file_name = disposition.get('filename') if file_name: file_name = force_str(file_name, encoding, errors='replace') file_name = self.sanitize_file_name(file_name) if not file_name: continue content_type, content_type_extra = meta_data.get('content-type', ('', {})) content_type = content_type.strip() charset = content_type_extra.get('charset') try: content_length = int(meta_data.get('content-length')[0]) except (IndexError, TypeError, ValueError): content_length = None counters = [0] * len(handlers) uploaded_file = False try: for handler in handlers: try: handler.new_file( field_name, file_name, content_type, content_length, charset, content_type_extra, ) except StopFutureHandlers: break for chunk in field_stream: if transfer_encoding == 'base64': # We only special-case base64 transfer encoding # We should always decode base64 chunks by multiple of 4, # ignoring whitespace. stripped_chunk = b"".join(chunk.split()) remaining = len(stripped_chunk) % 4 while remaining != 0: over_chunk = field_stream.read(4 - remaining) stripped_chunk += b"".join(over_chunk.split()) remaining = len(stripped_chunk) % 4 try: chunk = base64.b64decode(stripped_chunk) except Exception as exc: # Since this is only a chunk, any error is an unfixable error. raise MultiPartParserError("Could not decode base64 data.") from exc for i, handler in enumerate(handlers): chunk_length = len(chunk) chunk = handler.receive_data_chunk(chunk, counters[i]) counters[i] += chunk_length if chunk is None: # Don't continue if the chunk received by # the handler is None. break except SkipFile: self._close_files() # Just use up the rest of this file... exhaust(field_stream) else: # Handle file upload completions on next iteration. old_field_name = field_name else: # If this is neither a FIELD or a FILE, just exhaust the stream. exhaust(stream) except StopUpload as e: self._close_files() if not e.connection_reset: exhaust(self._input_data) else: if not uploaded_file: for handler in handlers: handler.upload_interrupted() # Make sure that the request data is all fed exhaust(self._input_data) # Signal that the upload has completed. # any() shortcircuits if a handler's upload_complete() returns a value. any(handler.upload_complete() for handler in handlers) self._post._mutable = False return self._post, self._files def handle_file_complete(self, old_field_name, counters): """ Handle all the signaling that takes place when a file is complete. """ for i, handler in enumerate(self._upload_handlers): file_obj = handler.file_complete(counters[i]) if file_obj: # If it returns a file object, then set the files dict. self._files.appendlist(force_str(old_field_name, self._encoding, errors='replace'), file_obj) break def sanitize_file_name(self, file_name): """ Sanitize the filename of an upload. Remove all possible path separators, even though that might remove more than actually required by the target system. Filenames that could potentially cause problems (current/parent dir) are also discarded. It should be noted that this function could still return a "filepath" like "C:some_file.txt" which is handled later on by the storage layer. So while this function does sanitize filenames to some extent, the resulting filename should still be considered as untrusted user input. """ file_name = html.unescape(file_name) file_name = file_name.rsplit('/')[-1] file_name = file_name.rsplit('\\')[-1] if file_name in {'', '.', '..'}: return None return file_name IE_sanitize = sanitize_file_name def _close_files(self): # Free up all file handles. # FIXME: this currently assumes that upload handlers store the file as 'file' # We should document that... (Maybe add handler.free_file to complement new_file) for handler in self._upload_handlers: if hasattr(handler, 'file'): handler.file.close() class LazyStream: """ The LazyStream wrapper allows one to get and "unget" bytes from a stream. Given a producer object (an iterator that yields bytestrings), the LazyStream object will support iteration, reading, and keeping a "look-back" variable in case you need to "unget" some bytes. """ def __init__(self, producer, length=None): """ Every LazyStream must have a producer when instantiated. A producer is an iterable that returns a string each time it is called. """ self._producer = producer self._empty = False self._leftover = b'' self.length = length self.position = 0 self._remaining = length self._unget_history = [] def tell(self): return self.position def read(self, size=None): def parts(): remaining = self._remaining if size is None else size # do the whole thing in one shot if no limit was provided. if remaining is None: yield b''.join(self) return # otherwise do some bookkeeping to return exactly enough # of the stream and stashing any extra content we get from # the producer while remaining != 0: assert remaining > 0, 'remaining bytes to read should never go negative' try: chunk = next(self) except StopIteration: return else: emitting = chunk[:remaining] self.unget(chunk[remaining:]) remaining -= len(emitting) yield emitting return b''.join(parts()) def __next__(self): """ Used when the exact number of bytes to read is unimportant. Return whatever chunk is conveniently returned from the iterator. Useful to avoid unnecessary bookkeeping if performance is an issue. """ if self._leftover: output = self._leftover self._leftover = b'' else: output = next(self._producer) self._unget_history = [] self.position += len(output) return output def close(self): """ Used to invalidate/disable this lazy stream. Replace the producer with an empty list. Any leftover bytes that have already been read will still be reported upon read() and/or next(). """ self._producer = [] def __iter__(self): return self def unget(self, bytes): """ Place bytes back onto the front of the lazy stream. Future calls to read() will return those bytes first. The stream position and thus tell() will be rewound. """ if not bytes: return self._update_unget_history(len(bytes)) self.position -= len(bytes) self._leftover = bytes + self._leftover def _update_unget_history(self, num_bytes): """ Update the unget history as a sanity check to see if we've pushed back the same number of bytes in one chunk. If we keep ungetting the same number of bytes many times (here, 50), we're mostly likely in an infinite loop of some sort. This is usually caused by a maliciously-malformed MIME request. """ self._unget_history = [num_bytes] + self._unget_history[:49] number_equal = len([ current_number for current_number in self._unget_history if current_number == num_bytes ]) if number_equal > 40: raise SuspiciousMultipartForm( "The multipart parser got stuck, which shouldn't happen with" " normal uploaded files. Check for malicious upload activity;" " if there is none, report this to the Django developers." ) class ChunkIter: """ An iterable that will yield chunks of data. Given a file-like object as the constructor, yield chunks of read operations from that object. """ def __init__(self, flo, chunk_size=64 * 1024): self.flo = flo self.chunk_size = chunk_size def __next__(self): try: data = self.flo.read(self.chunk_size) except InputStreamExhausted: raise StopIteration() if data: return data else: raise StopIteration() def __iter__(self): return self class InterBoundaryIter: """ A Producer that will iterate over boundaries. """ def __init__(self, stream, boundary): self._stream = stream self._boundary = boundary def __iter__(self): return self def __next__(self): try: return LazyStream(BoundaryIter(self._stream, self._boundary)) except InputStreamExhausted: raise StopIteration() class BoundaryIter: """ A Producer that is sensitive to boundaries. Will happily yield bytes until a boundary is found. Will yield the bytes before the boundary, throw away the boundary bytes themselves, and push the post-boundary bytes back on the stream. The future calls to next() after locating the boundary will raise a StopIteration exception. """ def __init__(self, stream, boundary): self._stream = stream self._boundary = boundary self._done = False # rollback an additional six bytes because the format is like # this: CRLF<boundary>[--CRLF] self._rollback = len(boundary) + 6 # Try to use mx fast string search if available. Otherwise # use Python find. Wrap the latter for consistency. unused_char = self._stream.read(1) if not unused_char: raise InputStreamExhausted() self._stream.unget(unused_char) def __iter__(self): return self def __next__(self): if self._done: raise StopIteration() stream = self._stream rollback = self._rollback bytes_read = 0 chunks = [] for bytes in stream: bytes_read += len(bytes) chunks.append(bytes) if bytes_read > rollback: break if not bytes: break else: self._done = True if not chunks: raise StopIteration() chunk = b''.join(chunks) boundary = self._find_boundary(chunk) if boundary: end, next = boundary stream.unget(chunk[next:]) self._done = True return chunk[:end] else: # make sure we don't treat a partial boundary (and # its separators) as data if not chunk[:-rollback]: # and len(chunk) >= (len(self._boundary) + 6): # There's nothing left, we should just return and mark as done. self._done = True return chunk else: stream.unget(chunk[-rollback:]) return chunk[:-rollback] def _find_boundary(self, data): """ Find a multipart boundary in data. Should no boundary exist in the data, return None. Otherwise, return a tuple containing the indices of the following: * the end of current encapsulation * the start of the next encapsulation """ index = data.find(self._boundary) if index < 0: return None else: end = index next = index + len(self._boundary) # backup over CRLF last = max(0, end - 1) if data[last:last + 1] == b'\n': end -= 1 last = max(0, end - 1) if data[last:last + 1] == b'\r': end -= 1 return end, next def exhaust(stream_or_iterable): """Exhaust an iterator or stream.""" try: iterator = iter(stream_or_iterable) except TypeError: iterator = ChunkIter(stream_or_iterable, 16384) collections.deque(iterator, maxlen=0) # consume iterator quickly. def parse_boundary_stream(stream, max_header_size): """ Parse one and exactly one stream that encapsulates a boundary. """ # Stream at beginning of header, look for end of header # and parse it if found. The header must fit within one # chunk. chunk = stream.read(max_header_size) # 'find' returns the top of these four bytes, so we'll # need to munch them later to prevent them from polluting # the payload. header_end = chunk.find(b'\r\n\r\n') def _parse_header(line): main_value_pair, params = parse_header(line) try: name, value = main_value_pair.split(':', 1) except ValueError: raise ValueError("Invalid header: %r" % line) return name, (value, params) if header_end == -1: # we find no header, so we just mark this fact and pass on # the stream verbatim stream.unget(chunk) return (RAW, {}, stream) header = chunk[:header_end] # here we place any excess chunk back onto the stream, as # well as throwing away the CRLFCRLF bytes from above. stream.unget(chunk[header_end + 4:]) TYPE = RAW outdict = {} # Eliminate blank lines for line in header.split(b'\r\n'): # This terminology ("main value" and "dictionary of # parameters") is from the Python docs. try: name, (value, params) = _parse_header(line) except ValueError: continue if name == 'content-disposition': TYPE = FIELD if params.get('filename'): TYPE = FILE outdict[name] = value, params if TYPE == RAW: stream.unget(chunk) return (TYPE, outdict, stream) class Parser: def __init__(self, stream, boundary): self._stream = stream self._separator = b'--' + boundary def __iter__(self): boundarystream = InterBoundaryIter(self._stream, self._separator) for sub_stream in boundarystream: # Iterate over each part yield parse_boundary_stream(sub_stream, 1024) def parse_header(line): """ Parse the header into a key-value. Input (line): bytes, output: str for key/name, bytes for values which will be decoded later. """ plist = _parse_header_params(b';' + line) key = plist.pop(0).lower().decode('ascii') pdict = {} for p in plist: i = p.find(b'=') if i >= 0: has_encoding = False name = p[:i].strip().lower().decode('ascii') if name.endswith('*'): # Lang/encoding embedded in the value (like "filename*=UTF-8''file.ext") # http://tools.ietf.org/html/rfc2231#section-4 name = name[:-1] if p.count(b"'") == 2: has_encoding = True value = p[i + 1:].strip() if len(value) >= 2 and value[:1] == value[-1:] == b'"': value = value[1:-1] value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"') if has_encoding: encoding, lang, value = value.split(b"'") value = unquote(value.decode(), encoding=encoding.decode()) pdict[name] = value return key, pdict def _parse_header_params(s): plist = [] while s[:1] == b';': s = s[1:] end = s.find(b';') while end > 0 and s.count(b'"', 0, end) % 2: end = s.find(b';', end + 1) if end < 0: end = len(s) f = s[:end] plist.append(f.strip()) s = s[end:] return plist
be079ba4856186f27ff6001ecda087b426b1f0d248453cbb573fb50e50050bc4
import datetime import json import mimetypes import os import re import sys import time from collections.abc import Mapping from email.header import Header from http.client import responses from urllib.parse import quote, urlparse from django.conf import settings from django.core import signals, signing from django.core.exceptions import DisallowedRedirect from django.core.serializers.json import DjangoJSONEncoder from django.http.cookie import SimpleCookie from django.utils import timezone from django.utils.datastructures import ( CaseInsensitiveMapping, _destruct_iterable_mapping_values, ) from django.utils.encoding import iri_to_uri from django.utils.http import http_date from django.utils.regex_helper import _lazy_re_compile _charset_from_content_type_re = _lazy_re_compile(r';\s*charset=(?P<charset>[^\s;]+)', re.I) class ResponseHeaders(CaseInsensitiveMapping): def __init__(self, data): """ Populate the initial data using __setitem__ to ensure values are correctly encoded. """ if not isinstance(data, Mapping): data = {k: v for k, v in _destruct_iterable_mapping_values(data)} self._store = {} for header, value in data.items(): self[header] = value def _convert_to_charset(self, value, charset, mime_encode=False): """ Convert headers key/value to ascii/latin-1 native strings. `charset` must be 'ascii' or 'latin-1'. If `mime_encode` is True and `value` can't be represented in the given charset, apply MIME-encoding. """ if not isinstance(value, (bytes, str)): value = str(value) if ( (isinstance(value, bytes) and (b'\n' in value or b'\r' in value)) or (isinstance(value, str) and ('\n' in value or '\r' in value)) ): raise BadHeaderError("Header values can't contain newlines (got %r)" % value) try: if isinstance(value, str): # Ensure string is valid in given charset value.encode(charset) else: # Convert bytestring using given charset value = value.decode(charset) except UnicodeError as e: if mime_encode: value = Header(value, 'utf-8', maxlinelen=sys.maxsize).encode() else: e.reason += ', HTTP response headers must be in %s format' % charset raise return value def __delitem__(self, key): self.pop(key) def __setitem__(self, key, value): key = self._convert_to_charset(key, 'ascii') value = self._convert_to_charset(value, 'latin-1', mime_encode=True) self._store[key.lower()] = (key, value) def pop(self, key, default=None): return self._store.pop(key.lower(), default) def setdefault(self, key, value): if key not in self: self[key] = value class BadHeaderError(ValueError): pass class HttpResponseBase: """ An HTTP response base class with dictionary-accessed headers. This class doesn't handle content. It should not be used directly. Use the HttpResponse and StreamingHttpResponse subclasses instead. """ status_code = 200 def __init__(self, content_type=None, status=None, reason=None, charset=None, headers=None): self.headers = ResponseHeaders(headers or {}) self._charset = charset if content_type and 'Content-Type' in self.headers: raise ValueError( "'headers' must not contain 'Content-Type' when the " "'content_type' parameter is provided." ) if 'Content-Type' not in self.headers: if content_type is None: content_type = 'text/html; charset=%s' % self.charset self.headers['Content-Type'] = content_type self._resource_closers = [] # This parameter is set by the handler. It's necessary to preserve the # historical behavior of request_finished. self._handler_class = None self.cookies = SimpleCookie() self.closed = False if status is not None: try: self.status_code = int(status) except (ValueError, TypeError): raise TypeError('HTTP status code must be an integer.') if not 100 <= self.status_code <= 599: raise ValueError('HTTP status code must be an integer from 100 to 599.') self._reason_phrase = reason @property def reason_phrase(self): if self._reason_phrase is not None: return self._reason_phrase # Leave self._reason_phrase unset in order to use the default # reason phrase for status code. return responses.get(self.status_code, 'Unknown Status Code') @reason_phrase.setter def reason_phrase(self, value): self._reason_phrase = value @property def charset(self): if self._charset is not None: return self._charset content_type = self.get('Content-Type', '') matched = _charset_from_content_type_re.search(content_type) if matched: # Extract the charset and strip its double quotes return matched['charset'].replace('"', '') return settings.DEFAULT_CHARSET @charset.setter def charset(self, value): self._charset = value def serialize_headers(self): """HTTP headers as a bytestring.""" return b'\r\n'.join([ key.encode('ascii') + b': ' + value.encode('latin-1') for key, value in self.headers.items() ]) __bytes__ = serialize_headers @property def _content_type_for_repr(self): return ', "%s"' % self.headers['Content-Type'] if 'Content-Type' in self.headers else '' def __setitem__(self, header, value): self.headers[header] = value def __delitem__(self, header): del self.headers[header] def __getitem__(self, header): return self.headers[header] def has_header(self, header): """Case-insensitive check for a header.""" return header in self.headers __contains__ = has_header def items(self): return self.headers.items() def get(self, header, alternate=None): return self.headers.get(header, alternate) def set_cookie(self, key, value='', max_age=None, expires=None, path='/', domain=None, secure=False, httponly=False, samesite=None): """ Set a cookie. ``expires`` can be: - a string in the correct format, - a naive ``datetime.datetime`` object in UTC, - an aware ``datetime.datetime`` object in any time zone. If it is a ``datetime.datetime`` object then calculate ``max_age``. """ self.cookies[key] = value if expires is not None: if isinstance(expires, datetime.datetime): if timezone.is_naive(expires): expires = timezone.make_aware(expires, timezone.utc) delta = expires - datetime.datetime.now(tz=timezone.utc) # Add one second so the date matches exactly (a fraction of # time gets lost between converting to a timedelta and # then the date string). delta = delta + datetime.timedelta(seconds=1) # Just set max_age - the max_age logic will set expires. expires = None max_age = max(0, delta.days * 86400 + delta.seconds) else: self.cookies[key]['expires'] = expires else: self.cookies[key]['expires'] = '' if max_age is not None: self.cookies[key]['max-age'] = int(max_age) # IE requires expires, so set it if hasn't been already. if not expires: self.cookies[key]['expires'] = http_date(time.time() + max_age) if path is not None: self.cookies[key]['path'] = path if domain is not None: self.cookies[key]['domain'] = domain if secure: self.cookies[key]['secure'] = True if httponly: self.cookies[key]['httponly'] = True if samesite: if samesite.lower() not in ('lax', 'none', 'strict'): raise ValueError('samesite must be "lax", "none", or "strict".') self.cookies[key]['samesite'] = samesite def setdefault(self, key, value): """Set a header unless it has already been set.""" self.headers.setdefault(key, value) def set_signed_cookie(self, key, value, salt='', **kwargs): value = signing.get_cookie_signer(salt=key + salt).sign(value) return self.set_cookie(key, value, **kwargs) def delete_cookie(self, key, path='/', domain=None, samesite=None): # Browsers can ignore the Set-Cookie header if the cookie doesn't use # the secure flag and: # - the cookie name starts with "__Host-" or "__Secure-", or # - the samesite is "none". secure = ( key.startswith(('__Secure-', '__Host-')) or (samesite and samesite.lower() == 'none') ) self.set_cookie( key, max_age=0, path=path, domain=domain, secure=secure, expires='Thu, 01 Jan 1970 00:00:00 GMT', samesite=samesite, ) # Common methods used by subclasses def make_bytes(self, value): """Turn a value into a bytestring encoded in the output charset.""" # Per PEP 3333, this response body must be bytes. To avoid returning # an instance of a subclass, this function returns `bytes(value)`. # This doesn't make a copy when `value` already contains bytes. # Handle string types -- we can't rely on force_bytes here because: # - Python attempts str conversion first # - when self._charset != 'utf-8' it re-encodes the content if isinstance(value, (bytes, memoryview)): return bytes(value) if isinstance(value, str): return bytes(value.encode(self.charset)) # Handle non-string types. return str(value).encode(self.charset) # These methods partially implement the file-like object interface. # See https://docs.python.org/library/io.html#io.IOBase # The WSGI server must call this method upon completion of the request. # See http://blog.dscpl.com.au/2012/10/obligations-for-calling-close-on.html def close(self): for closer in self._resource_closers: try: closer() except Exception: pass # Free resources that were still referenced. self._resource_closers.clear() self.closed = True signals.request_finished.send(sender=self._handler_class) def write(self, content): raise OSError('This %s instance is not writable' % self.__class__.__name__) def flush(self): pass def tell(self): raise OSError('This %s instance cannot tell its position' % self.__class__.__name__) # These methods partially implement a stream-like object interface. # See https://docs.python.org/library/io.html#io.IOBase def readable(self): return False def seekable(self): return False def writable(self): return False def writelines(self, lines): raise OSError('This %s instance is not writable' % self.__class__.__name__) class HttpResponse(HttpResponseBase): """ An HTTP response class with a string as content. This content can be read, appended to, or replaced. """ streaming = False def __init__(self, content=b'', *args, **kwargs): super().__init__(*args, **kwargs) # Content is a bytestring. See the `content` property methods. self.content = content def __repr__(self): return '<%(cls)s status_code=%(status_code)d%(content_type)s>' % { 'cls': self.__class__.__name__, 'status_code': self.status_code, 'content_type': self._content_type_for_repr, } def serialize(self): """Full HTTP message, including headers, as a bytestring.""" return self.serialize_headers() + b'\r\n\r\n' + self.content __bytes__ = serialize @property def content(self): return b''.join(self._container) @content.setter def content(self, value): # Consume iterators upon assignment to allow repeated iteration. if ( hasattr(value, '__iter__') and not isinstance(value, (bytes, memoryview, str)) ): content = b''.join(self.make_bytes(chunk) for chunk in value) if hasattr(value, 'close'): try: value.close() except Exception: pass else: content = self.make_bytes(value) # Create a list of properly encoded bytestrings to support write(). self._container = [content] def __iter__(self): return iter(self._container) def write(self, content): self._container.append(self.make_bytes(content)) def tell(self): return len(self.content) def getvalue(self): return self.content def writable(self): return True def writelines(self, lines): for line in lines: self.write(line) class StreamingHttpResponse(HttpResponseBase): """ A streaming HTTP response class with an iterator as content. This should only be iterated once, when the response is streamed to the client. However, it can be appended to or replaced with a new iterator that wraps the original content (or yields entirely new content). """ streaming = True def __init__(self, streaming_content=(), *args, **kwargs): super().__init__(*args, **kwargs) # `streaming_content` should be an iterable of bytestrings. # See the `streaming_content` property methods. self.streaming_content = streaming_content def __repr__(self): return '<%(cls)s status_code=%(status_code)d%(content_type)s>' % { 'cls': self.__class__.__qualname__, 'status_code': self.status_code, 'content_type': self._content_type_for_repr, } @property def content(self): raise AttributeError( "This %s instance has no `content` attribute. Use " "`streaming_content` instead." % self.__class__.__name__ ) @property def streaming_content(self): return map(self.make_bytes, self._iterator) @streaming_content.setter def streaming_content(self, value): self._set_streaming_content(value) def _set_streaming_content(self, value): # Ensure we can never iterate on "value" more than once. self._iterator = iter(value) if hasattr(value, 'close'): self._resource_closers.append(value.close) def __iter__(self): return self.streaming_content def getvalue(self): return b''.join(self.streaming_content) class FileResponse(StreamingHttpResponse): """ A streaming HTTP response class optimized for files. """ block_size = 4096 def __init__(self, *args, as_attachment=False, filename='', **kwargs): self.as_attachment = as_attachment self.filename = filename super().__init__(*args, **kwargs) def _set_streaming_content(self, value): if not hasattr(value, 'read'): self.file_to_stream = None return super()._set_streaming_content(value) self.file_to_stream = filelike = value if hasattr(filelike, 'close'): self._resource_closers.append(filelike.close) value = iter(lambda: filelike.read(self.block_size), b'') self.set_headers(filelike) super()._set_streaming_content(value) def set_headers(self, filelike): """ Set some common response headers (Content-Length, Content-Type, and Content-Disposition) based on the `filelike` response content. """ encoding_map = { 'bzip2': 'application/x-bzip', 'gzip': 'application/gzip', 'xz': 'application/x-xz', } filename = getattr(filelike, 'name', None) filename = filename if (isinstance(filename, str) and filename) else self.filename if os.path.isabs(filename): self.headers['Content-Length'] = os.path.getsize(filelike.name) elif hasattr(filelike, 'getbuffer'): self.headers['Content-Length'] = filelike.getbuffer().nbytes if self.headers.get('Content-Type', '').startswith('text/html'): if filename: content_type, encoding = mimetypes.guess_type(filename) # Encoding isn't set to prevent browsers from automatically # uncompressing files. content_type = encoding_map.get(encoding, content_type) self.headers['Content-Type'] = content_type or 'application/octet-stream' else: self.headers['Content-Type'] = 'application/octet-stream' filename = self.filename or os.path.basename(filename) if filename: disposition = 'attachment' if self.as_attachment else 'inline' try: filename.encode('ascii') file_expr = 'filename="{}"'.format(filename) except UnicodeEncodeError: file_expr = "filename*=utf-8''{}".format(quote(filename)) self.headers['Content-Disposition'] = '{}; {}'.format(disposition, file_expr) elif self.as_attachment: self.headers['Content-Disposition'] = 'attachment' class HttpResponseRedirectBase(HttpResponse): allowed_schemes = ['http', 'https', 'ftp'] def __init__(self, redirect_to, *args, **kwargs): super().__init__(*args, **kwargs) self['Location'] = iri_to_uri(redirect_to) parsed = urlparse(str(redirect_to)) if parsed.scheme and parsed.scheme not in self.allowed_schemes: raise DisallowedRedirect("Unsafe redirect to URL with protocol '%s'" % parsed.scheme) url = property(lambda self: self['Location']) def __repr__(self): return '<%(cls)s status_code=%(status_code)d%(content_type)s, url="%(url)s">' % { 'cls': self.__class__.__name__, 'status_code': self.status_code, 'content_type': self._content_type_for_repr, 'url': self.url, } class HttpResponseRedirect(HttpResponseRedirectBase): status_code = 302 class HttpResponsePermanentRedirect(HttpResponseRedirectBase): status_code = 301 class HttpResponseNotModified(HttpResponse): status_code = 304 def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) del self['content-type'] @HttpResponse.content.setter def content(self, value): if value: raise AttributeError("You cannot set content to a 304 (Not Modified) response") self._container = [] class HttpResponseBadRequest(HttpResponse): status_code = 400 class HttpResponseNotFound(HttpResponse): status_code = 404 class HttpResponseForbidden(HttpResponse): status_code = 403 class HttpResponseNotAllowed(HttpResponse): status_code = 405 def __init__(self, permitted_methods, *args, **kwargs): super().__init__(*args, **kwargs) self['Allow'] = ', '.join(permitted_methods) def __repr__(self): return '<%(cls)s [%(methods)s] status_code=%(status_code)d%(content_type)s>' % { 'cls': self.__class__.__name__, 'status_code': self.status_code, 'content_type': self._content_type_for_repr, 'methods': self['Allow'], } class HttpResponseGone(HttpResponse): status_code = 410 class HttpResponseServerError(HttpResponse): status_code = 500 class Http404(Exception): pass class JsonResponse(HttpResponse): """ An HTTP response class that consumes data to be serialized to JSON. :param data: Data to be dumped into json. By default only ``dict`` objects are allowed to be passed due to a security flaw before ECMAScript 5. See the ``safe`` parameter for more information. :param encoder: Should be a json encoder class. Defaults to ``django.core.serializers.json.DjangoJSONEncoder``. :param safe: Controls if only ``dict`` objects may be serialized. Defaults to ``True``. :param json_dumps_params: A dictionary of kwargs passed to json.dumps(). """ def __init__(self, data, encoder=DjangoJSONEncoder, safe=True, json_dumps_params=None, **kwargs): if safe and not isinstance(data, dict): raise TypeError( 'In order to allow non-dict objects to be serialized set the ' 'safe parameter to False.' ) if json_dumps_params is None: json_dumps_params = {} kwargs.setdefault('content_type', 'application/json') data = json.dumps(data, cls=encoder, **json_dumps_params) super().__init__(content=data, **kwargs)
0360e9e660a229887f91f86940c77a4a150712090eb1db2ae34cfa8f26610027
import cgi import codecs import copy from io import BytesIO from itertools import chain from urllib.parse import parse_qsl, quote, urlencode, urljoin, urlsplit from django.conf import settings from django.core import signing from django.core.exceptions import ( DisallowedHost, ImproperlyConfigured, RequestDataTooBig, TooManyFieldsSent, ) from django.core.files import uploadhandler from django.http.multipartparser import MultiPartParser, MultiPartParserError from django.utils.datastructures import ( CaseInsensitiveMapping, ImmutableList, MultiValueDict, ) from django.utils.encoding import escape_uri_path, iri_to_uri from django.utils.functional import cached_property from django.utils.http import is_same_domain from django.utils.regex_helper import _lazy_re_compile from .multipartparser import parse_header RAISE_ERROR = object() host_validation_re = _lazy_re_compile(r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9\.:]+\])(:\d+)?$") class UnreadablePostError(OSError): pass class RawPostDataException(Exception): """ You cannot access raw_post_data from a request that has multipart/* POST data if it has been accessed via POST, FILES, etc.. """ pass class HttpRequest: """A basic HTTP request.""" # The encoding used in GET/POST dicts. None means use default setting. _encoding = None _upload_handlers = [] def __init__(self): # WARNING: The `WSGIRequest` subclass doesn't call `super`. # Any variable assignment made here should also happen in # `WSGIRequest.__init__()`. self.GET = QueryDict(mutable=True) self.POST = QueryDict(mutable=True) self.COOKIES = {} self.META = {} self.FILES = MultiValueDict() self.path = '' self.path_info = '' self.method = None self.resolver_match = None self.content_type = None self.content_params = None def __repr__(self): if self.method is None or not self.get_full_path(): return '<%s>' % self.__class__.__name__ return '<%s: %s %r>' % (self.__class__.__name__, self.method, self.get_full_path()) @cached_property def headers(self): return HttpHeaders(self.META) @cached_property def accepted_types(self): """Return a list of MediaType instances.""" return parse_accept_header(self.headers.get('Accept', '*/*')) def accepts(self, media_type): return any( accepted_type.match(media_type) for accepted_type in self.accepted_types ) def _set_content_type_params(self, meta): """Set content_type, content_params, and encoding.""" self.content_type, self.content_params = cgi.parse_header(meta.get('CONTENT_TYPE', '')) if 'charset' in self.content_params: try: codecs.lookup(self.content_params['charset']) except LookupError: pass else: self.encoding = self.content_params['charset'] def _get_raw_host(self): """ Return the HTTP host using the environment or request headers. Skip allowed hosts protection, so may return an insecure host. """ # We try three options, in order of decreasing preference. if settings.USE_X_FORWARDED_HOST and ( 'HTTP_X_FORWARDED_HOST' in self.META): host = self.META['HTTP_X_FORWARDED_HOST'] elif 'HTTP_HOST' in self.META: host = self.META['HTTP_HOST'] else: # Reconstruct the host using the algorithm from PEP 333. host = self.META['SERVER_NAME'] server_port = self.get_port() if server_port != ('443' if self.is_secure() else '80'): host = '%s:%s' % (host, server_port) return host def get_host(self): """Return the HTTP host using the environment or request headers.""" host = self._get_raw_host() # Allow variants of localhost if ALLOWED_HOSTS is empty and DEBUG=True. allowed_hosts = settings.ALLOWED_HOSTS if settings.DEBUG and not allowed_hosts: allowed_hosts = ['.localhost', '127.0.0.1', '[::1]'] domain, port = split_domain_port(host) if domain and validate_host(domain, allowed_hosts): return host else: msg = "Invalid HTTP_HOST header: %r." % host if domain: msg += " You may need to add %r to ALLOWED_HOSTS." % domain else: msg += " The domain name provided is not valid according to RFC 1034/1035." raise DisallowedHost(msg) def get_port(self): """Return the port number for the request as a string.""" if settings.USE_X_FORWARDED_PORT and 'HTTP_X_FORWARDED_PORT' in self.META: port = self.META['HTTP_X_FORWARDED_PORT'] else: port = self.META['SERVER_PORT'] return str(port) def get_full_path(self, force_append_slash=False): return self._get_full_path(self.path, force_append_slash) def get_full_path_info(self, force_append_slash=False): return self._get_full_path(self.path_info, force_append_slash) def _get_full_path(self, path, force_append_slash): # RFC 3986 requires query string arguments to be in the ASCII range. # Rather than crash if this doesn't happen, we encode defensively. return '%s%s%s' % ( escape_uri_path(path), '/' if force_append_slash and not path.endswith('/') else '', ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) if self.META.get('QUERY_STRING', '') else '' ) def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None): """ Attempt to return a signed cookie. If the signature fails or the cookie has expired, raise an exception, unless the `default` argument is provided, in which case return that value. """ try: cookie_value = self.COOKIES[key] except KeyError: if default is not RAISE_ERROR: return default else: raise try: value = signing.get_cookie_signer(salt=key + salt).unsign( cookie_value, max_age=max_age) except signing.BadSignature: if default is not RAISE_ERROR: return default else: raise return value def build_absolute_uri(self, location=None): """ Build an absolute URI from the location and the variables available in this request. If no ``location`` is specified, build the absolute URI using request.get_full_path(). If the location is absolute, convert it to an RFC 3987 compliant URI and return it. If location is relative or is scheme-relative (i.e., ``//example.com/``), urljoin() it to a base URL constructed from the request variables. """ if location is None: # Make it an absolute url (but schemeless and domainless) for the # edge case that the path starts with '//'. location = '//%s' % self.get_full_path() else: # Coerce lazy locations. location = str(location) bits = urlsplit(location) if not (bits.scheme and bits.netloc): # Handle the simple, most common case. If the location is absolute # and a scheme or host (netloc) isn't provided, skip an expensive # urljoin() as long as no path segments are '.' or '..'. if (bits.path.startswith('/') and not bits.scheme and not bits.netloc and '/./' not in bits.path and '/../' not in bits.path): # If location starts with '//' but has no netloc, reuse the # schema and netloc from the current request. Strip the double # slashes and continue as if it wasn't specified. if location.startswith('//'): location = location[2:] location = self._current_scheme_host + location else: # Join the constructed URL with the provided location, which # allows the provided location to apply query strings to the # base path. location = urljoin(self._current_scheme_host + self.path, location) return iri_to_uri(location) @cached_property def _current_scheme_host(self): return '{}://{}'.format(self.scheme, self.get_host()) def _get_scheme(self): """ Hook for subclasses like WSGIRequest to implement. Return 'http' by default. """ return 'http' @property def scheme(self): if settings.SECURE_PROXY_SSL_HEADER: try: header, secure_value = settings.SECURE_PROXY_SSL_HEADER except ValueError: raise ImproperlyConfigured( 'The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.' ) header_value = self.META.get(header) if header_value is not None: return 'https' if header_value == secure_value else 'http' return self._get_scheme() def is_secure(self): return self.scheme == 'https' @property def encoding(self): return self._encoding @encoding.setter def encoding(self, val): """ Set the encoding used for GET/POST accesses. If the GET or POST dictionary has already been created, remove and recreate it on the next access (so that it is decoded correctly). """ self._encoding = val if hasattr(self, 'GET'): del self.GET if hasattr(self, '_post'): del self._post def _initialize_handlers(self): self._upload_handlers = [uploadhandler.load_handler(handler, self) for handler in settings.FILE_UPLOAD_HANDLERS] @property def upload_handlers(self): if not self._upload_handlers: # If there are no upload handlers defined, initialize them from settings. self._initialize_handlers() return self._upload_handlers @upload_handlers.setter def upload_handlers(self, upload_handlers): if hasattr(self, '_files'): raise AttributeError("You cannot set the upload handlers after the upload has been processed.") self._upload_handlers = upload_handlers def parse_file_upload(self, META, post_data): """Return a tuple of (POST QueryDict, FILES MultiValueDict).""" self.upload_handlers = ImmutableList( self.upload_handlers, warning="You cannot alter upload handlers after the upload has been processed." ) parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding) return parser.parse() @property def body(self): if not hasattr(self, '_body'): if self._read_started: raise RawPostDataException("You cannot access body after reading from request's data stream") # Limit the maximum request data size that will be handled in-memory. if (settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None and int(self.META.get('CONTENT_LENGTH') or 0) > settings.DATA_UPLOAD_MAX_MEMORY_SIZE): raise RequestDataTooBig('Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE.') try: self._body = self.read() except OSError as e: raise UnreadablePostError(*e.args) from e self._stream = BytesIO(self._body) return self._body def _mark_post_parse_error(self): self._post = QueryDict() self._files = MultiValueDict() def _load_post_and_files(self): """Populate self._post and self._files if the content-type is a form type""" if self.method != 'POST': self._post, self._files = QueryDict(encoding=self._encoding), MultiValueDict() return if self._read_started and not hasattr(self, '_body'): self._mark_post_parse_error() return if self.content_type == 'multipart/form-data': if hasattr(self, '_body'): # Use already read data data = BytesIO(self._body) else: data = self try: self._post, self._files = self.parse_file_upload(self.META, data) except MultiPartParserError: # An error occurred while parsing POST data. Since when # formatting the error the request handler might access # self.POST, set self._post and self._file to prevent # attempts to parse POST data again. self._mark_post_parse_error() raise elif self.content_type == 'application/x-www-form-urlencoded': self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict() else: self._post, self._files = QueryDict(encoding=self._encoding), MultiValueDict() def close(self): if hasattr(self, '_files'): for f in chain.from_iterable(list_[1] for list_ in self._files.lists()): f.close() # File-like and iterator interface. # # Expects self._stream to be set to an appropriate source of bytes by # a corresponding request subclass (e.g. WSGIRequest). # Also when request data has already been read by request.POST or # request.body, self._stream points to a BytesIO instance # containing that data. def read(self, *args, **kwargs): self._read_started = True try: return self._stream.read(*args, **kwargs) except OSError as e: raise UnreadablePostError(*e.args) from e def readline(self, *args, **kwargs): self._read_started = True try: return self._stream.readline(*args, **kwargs) except OSError as e: raise UnreadablePostError(*e.args) from e def __iter__(self): return iter(self.readline, b'') def readlines(self): return list(self) class HttpHeaders(CaseInsensitiveMapping): HTTP_PREFIX = 'HTTP_' # PEP 333 gives two headers which aren't prepended with HTTP_. UNPREFIXED_HEADERS = {'CONTENT_TYPE', 'CONTENT_LENGTH'} def __init__(self, environ): headers = {} for header, value in environ.items(): name = self.parse_header_name(header) if name: headers[name] = value super().__init__(headers) def __getitem__(self, key): """Allow header lookup using underscores in place of hyphens.""" return super().__getitem__(key.replace('_', '-')) @classmethod def parse_header_name(cls, header): if header.startswith(cls.HTTP_PREFIX): header = header[len(cls.HTTP_PREFIX):] elif header not in cls.UNPREFIXED_HEADERS: return None return header.replace('_', '-').title() class QueryDict(MultiValueDict): """ A specialized MultiValueDict which represents a query string. A QueryDict can be used to represent GET or POST data. It subclasses MultiValueDict since keys in such data can be repeated, for instance in the data from a form with a <select multiple> field. By default QueryDicts are immutable, though the copy() method will always return a mutable copy. Both keys and values set on this class are converted from the given encoding (DEFAULT_CHARSET by default) to str. """ # These are both reset in __init__, but is specified here at the class # level so that unpickling will have valid values _mutable = True _encoding = None def __init__(self, query_string=None, mutable=False, encoding=None): super().__init__() self.encoding = encoding or settings.DEFAULT_CHARSET query_string = query_string or '' parse_qsl_kwargs = { 'keep_blank_values': True, 'encoding': self.encoding, 'max_num_fields': settings.DATA_UPLOAD_MAX_NUMBER_FIELDS, } if isinstance(query_string, bytes): # query_string normally contains URL-encoded data, a subset of ASCII. try: query_string = query_string.decode(self.encoding) except UnicodeDecodeError: # ... but some user agents are misbehaving :-( query_string = query_string.decode('iso-8859-1') try: for key, value in parse_qsl(query_string, **parse_qsl_kwargs): self.appendlist(key, value) except ValueError as e: # ValueError can also be raised if the strict_parsing argument to # parse_qsl() is True. As that is not used by Django, assume that # the exception was raised by exceeding the value of max_num_fields # instead of fragile checks of exception message strings. raise TooManyFieldsSent( 'The number of GET/POST parameters exceeded ' 'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.' ) from e self._mutable = mutable @classmethod def fromkeys(cls, iterable, value='', mutable=False, encoding=None): """ Return a new QueryDict with keys (may be repeated) from an iterable and values from value. """ q = cls('', mutable=True, encoding=encoding) for key in iterable: q.appendlist(key, value) if not mutable: q._mutable = False return q @property def encoding(self): if self._encoding is None: self._encoding = settings.DEFAULT_CHARSET return self._encoding @encoding.setter def encoding(self, value): self._encoding = value def _assert_mutable(self): if not self._mutable: raise AttributeError("This QueryDict instance is immutable") def __setitem__(self, key, value): self._assert_mutable() key = bytes_to_text(key, self.encoding) value = bytes_to_text(value, self.encoding) super().__setitem__(key, value) def __delitem__(self, key): self._assert_mutable() super().__delitem__(key) def __copy__(self): result = self.__class__('', mutable=True, encoding=self.encoding) for key, value in self.lists(): result.setlist(key, value) return result def __deepcopy__(self, memo): result = self.__class__('', mutable=True, encoding=self.encoding) memo[id(self)] = result for key, value in self.lists(): result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo)) return result def setlist(self, key, list_): self._assert_mutable() key = bytes_to_text(key, self.encoding) list_ = [bytes_to_text(elt, self.encoding) for elt in list_] super().setlist(key, list_) def setlistdefault(self, key, default_list=None): self._assert_mutable() return super().setlistdefault(key, default_list) def appendlist(self, key, value): self._assert_mutable() key = bytes_to_text(key, self.encoding) value = bytes_to_text(value, self.encoding) super().appendlist(key, value) def pop(self, key, *args): self._assert_mutable() return super().pop(key, *args) def popitem(self): self._assert_mutable() return super().popitem() def clear(self): self._assert_mutable() super().clear() def setdefault(self, key, default=None): self._assert_mutable() key = bytes_to_text(key, self.encoding) default = bytes_to_text(default, self.encoding) return super().setdefault(key, default) def copy(self): """Return a mutable copy of this object.""" return self.__deepcopy__({}) def urlencode(self, safe=None): """ Return an encoded string of all query string arguments. `safe` specifies characters which don't require quoting, for example:: >>> q = QueryDict(mutable=True) >>> q['next'] = '/a&b/' >>> q.urlencode() 'next=%2Fa%26b%2F' >>> q.urlencode(safe='/') 'next=/a%26b/' """ output = [] if safe: safe = safe.encode(self.encoding) def encode(k, v): return '%s=%s' % ((quote(k, safe), quote(v, safe))) else: def encode(k, v): return urlencode({k: v}) for k, list_ in self.lists(): output.extend( encode(k.encode(self.encoding), str(v).encode(self.encoding)) for v in list_ ) return '&'.join(output) class MediaType: def __init__(self, media_type_raw_line): full_type, self.params = parse_header( media_type_raw_line.encode('ascii') if media_type_raw_line else b'' ) self.main_type, _, self.sub_type = full_type.partition('/') def __str__(self): params_str = ''.join( '; %s=%s' % (k, v.decode('ascii')) for k, v in self.params.items() ) return '%s%s%s' % ( self.main_type, ('/%s' % self.sub_type) if self.sub_type else '', params_str, ) def __repr__(self): return '<%s: %s>' % (self.__class__.__qualname__, self) @property def is_all_types(self): return self.main_type == '*' and self.sub_type == '*' def match(self, other): if self.is_all_types: return True other = MediaType(other) if self.main_type == other.main_type and self.sub_type in {'*', other.sub_type}: return True return False # It's neither necessary nor appropriate to use # django.utils.encoding.force_str() for parsing URLs and form inputs. Thus, # this slightly more restricted function, used by QueryDict. def bytes_to_text(s, encoding): """ Convert bytes objects to strings, using the given encoding. Illegally encoded input characters are replaced with Unicode "unknown" codepoint (\ufffd). Return any non-bytes objects without change. """ if isinstance(s, bytes): return str(s, encoding, 'replace') else: return s def split_domain_port(host): """ Return a (domain, port) tuple from a given host. Returned domain is lowercased. If the host is invalid, the domain will be empty. """ host = host.lower() if not host_validation_re.match(host): return '', '' if host[-1] == ']': # It's an IPv6 address without a port. return host, '' bits = host.rsplit(':', 1) domain, port = bits if len(bits) == 2 else (bits[0], '') # Remove a trailing dot (if present) from the domain. domain = domain[:-1] if domain.endswith('.') else domain return domain, port def validate_host(host, allowed_hosts): """ Validate the given host for this site. Check that the host looks valid and matches a host or host pattern in the given list of ``allowed_hosts``. Any pattern beginning with a period matches a domain and all its subdomains (e.g. ``.example.com`` matches ``example.com`` and any subdomain), ``*`` matches anything, and anything else must match exactly. Note: This function assumes that the given host is lowercased and has already had the port, if any, stripped off. Return ``True`` for a valid host, ``False`` otherwise. """ return any(pattern == '*' or is_same_domain(host, pattern) for pattern in allowed_hosts) def parse_accept_header(header): return [MediaType(token) for token in header.split(',') if token.strip()]
93e5d46bd8a119e0f64ae6fe8b9f9af649ec2d8d0edc2d75f7eadc1e03d327f3
from urllib.parse import quote, urljoin from django import template from django.apps import apps from django.utils.encoding import iri_to_uri from django.utils.html import conditional_escape register = template.Library() class PrefixNode(template.Node): def __repr__(self): return "<PrefixNode for %r>" % self.name def __init__(self, varname=None, name=None): if name is None: raise template.TemplateSyntaxError( "Prefix nodes must be given a name to return.") self.varname = varname self.name = name @classmethod def handle_token(cls, parser, token, name): """ Class method to parse prefix node and return a Node. """ # token.split_contents() isn't useful here because tags using this method don't accept variable as arguments tokens = token.contents.split() if len(tokens) > 1 and tokens[1] != 'as': raise template.TemplateSyntaxError( "First argument in '%s' must be 'as'" % tokens[0]) if len(tokens) > 1: varname = tokens[2] else: varname = None return cls(varname, name) @classmethod def handle_simple(cls, name): try: from django.conf import settings except ImportError: prefix = '' else: prefix = iri_to_uri(getattr(settings, name, '')) return prefix def render(self, context): prefix = self.handle_simple(self.name) if self.varname is None: return prefix context[self.varname] = prefix return '' @register.tag def get_static_prefix(parser, token): """ Populate a template variable with the static prefix, ``settings.STATIC_URL``. Usage:: {% get_static_prefix [as varname] %} Examples:: {% get_static_prefix %} {% get_static_prefix as static_prefix %} """ return PrefixNode.handle_token(parser, token, "STATIC_URL") @register.tag def get_media_prefix(parser, token): """ Populate a template variable with the media prefix, ``settings.MEDIA_URL``. Usage:: {% get_media_prefix [as varname] %} Examples:: {% get_media_prefix %} {% get_media_prefix as media_prefix %} """ return PrefixNode.handle_token(parser, token, "MEDIA_URL") class StaticNode(template.Node): child_nodelists = () def __init__(self, varname=None, path=None): if path is None: raise template.TemplateSyntaxError( "Static template nodes must be given a path to return.") self.path = path self.varname = varname def __repr__(self): return ( f'{self.__class__.__name__}(varname={self.varname!r}, path={self.path!r})' ) def url(self, context): path = self.path.resolve(context) return self.handle_simple(path) def render(self, context): url = self.url(context) if context.autoescape: url = conditional_escape(url) if self.varname is None: return url context[self.varname] = url return '' @classmethod def handle_simple(cls, path): if apps.is_installed('django.contrib.staticfiles'): from django.contrib.staticfiles.storage import staticfiles_storage return staticfiles_storage.url(path) else: return urljoin(PrefixNode.handle_simple("STATIC_URL"), quote(path)) @classmethod def handle_token(cls, parser, token): """ Class method to parse prefix node and return a Node. """ bits = token.split_contents() if len(bits) < 2: raise template.TemplateSyntaxError( "'%s' takes at least one argument (path to file)" % bits[0]) path = parser.compile_filter(bits[1]) if len(bits) >= 2 and bits[-2] == 'as': varname = bits[3] else: varname = None return cls(varname, path) @register.tag('static') def do_static(parser, token): """ Join the given path with the STATIC_URL setting. Usage:: {% static path [as varname] %} Examples:: {% static "myapp/css/base.css" %} {% static variable_with_path %} {% static "myapp/css/base.css" as admin_base_css %} {% static variable_with_path as varname %} """ return StaticNode.handle_token(parser, token) def static(path): """ Given a relative path to a static asset, return the absolute path to the asset. """ return StaticNode.handle_simple(path)
adbe25c9674513ed40f1b45177c1b7f578b390638d8c42e77f9c0361d9a894d9
from decimal import Decimal from django.conf import settings from django.template import Library, Node, TemplateSyntaxError, Variable from django.template.base import TokenType, render_value_in_context from django.template.defaulttags import token_kwargs from django.utils import translation from django.utils.safestring import SafeData, mark_safe register = Library() class GetAvailableLanguagesNode(Node): def __init__(self, variable): self.variable = variable def render(self, context): context[self.variable] = [(k, translation.gettext(v)) for k, v in settings.LANGUAGES] return '' class GetLanguageInfoNode(Node): def __init__(self, lang_code, variable): self.lang_code = lang_code self.variable = variable def render(self, context): lang_code = self.lang_code.resolve(context) context[self.variable] = translation.get_language_info(lang_code) return '' class GetLanguageInfoListNode(Node): def __init__(self, languages, variable): self.languages = languages self.variable = variable def get_language_info(self, language): # ``language`` is either a language code string or a sequence # with the language code as its first item if len(language[0]) > 1: return translation.get_language_info(language[0]) else: return translation.get_language_info(str(language)) def render(self, context): langs = self.languages.resolve(context) context[self.variable] = [self.get_language_info(lang) for lang in langs] return '' class GetCurrentLanguageNode(Node): def __init__(self, variable): self.variable = variable def render(self, context): context[self.variable] = translation.get_language() return '' class GetCurrentLanguageBidiNode(Node): def __init__(self, variable): self.variable = variable def render(self, context): context[self.variable] = translation.get_language_bidi() return '' class TranslateNode(Node): child_nodelists = () def __init__(self, filter_expression, noop, asvar=None, message_context=None): self.noop = noop self.asvar = asvar self.message_context = message_context self.filter_expression = filter_expression if isinstance(self.filter_expression.var, str): self.filter_expression.var = Variable("'%s'" % self.filter_expression.var) def render(self, context): self.filter_expression.var.translate = not self.noop if self.message_context: self.filter_expression.var.message_context = ( self.message_context.resolve(context)) output = self.filter_expression.resolve(context) value = render_value_in_context(output, context) # Restore percent signs. Percent signs in template text are doubled # so they are not interpreted as string format flags. is_safe = isinstance(value, SafeData) value = value.replace('%%', '%') value = mark_safe(value) if is_safe else value if self.asvar: context[self.asvar] = value return '' else: return value class BlockTranslateNode(Node): def __init__(self, extra_context, singular, plural=None, countervar=None, counter=None, message_context=None, trimmed=False, asvar=None, tag_name='blocktranslate'): self.extra_context = extra_context self.singular = singular self.plural = plural self.countervar = countervar self.counter = counter self.message_context = message_context self.trimmed = trimmed self.asvar = asvar self.tag_name = tag_name def __repr__(self): return ( f'<{self.__class__.__qualname__}: ' f'extra_context={self.extra_context!r} ' f'singular={self.singular!r} plural={self.plural!r}>' ) def render_token_list(self, tokens): result = [] vars = [] for token in tokens: if token.token_type == TokenType.TEXT: result.append(token.contents.replace('%', '%%')) elif token.token_type == TokenType.VAR: result.append('%%(%s)s' % token.contents) vars.append(token.contents) msg = ''.join(result) if self.trimmed: msg = translation.trim_whitespace(msg) return msg, vars def render(self, context, nested=False): if self.message_context: message_context = self.message_context.resolve(context) else: message_context = None # Update() works like a push(), so corresponding context.pop() is at # the end of function context.update({var: val.resolve(context) for var, val in self.extra_context.items()}) singular, vars = self.render_token_list(self.singular) if self.plural and self.countervar and self.counter: count = self.counter.resolve(context) if not isinstance(count, (Decimal, float, int)): raise TemplateSyntaxError( "%r argument to %r tag must be a number." % (self.countervar, self.tag_name) ) context[self.countervar] = count plural, plural_vars = self.render_token_list(self.plural) if message_context: result = translation.npgettext(message_context, singular, plural, count) else: result = translation.ngettext(singular, plural, count) vars.extend(plural_vars) else: if message_context: result = translation.pgettext(message_context, singular) else: result = translation.gettext(singular) default_value = context.template.engine.string_if_invalid def render_value(key): if key in context: val = context[key] else: val = default_value % key if '%s' in default_value else default_value return render_value_in_context(val, context) data = {v: render_value(v) for v in vars} context.pop() try: result = result % data except (KeyError, ValueError): if nested: # Either string is malformed, or it's a bug raise TemplateSyntaxError( '%r is unable to format string returned by gettext: %r ' 'using %r' % (self.tag_name, result, data) ) with translation.override(None): result = self.render(context, nested=True) if self.asvar: context[self.asvar] = result return '' else: return result class LanguageNode(Node): def __init__(self, nodelist, language): self.nodelist = nodelist self.language = language def render(self, context): with translation.override(self.language.resolve(context)): output = self.nodelist.render(context) return output @register.tag("get_available_languages") def do_get_available_languages(parser, token): """ Store a list of available languages in the context. Usage:: {% get_available_languages as languages %} {% for language in languages %} ... {% endfor %} This puts settings.LANGUAGES into the named variable. """ # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments args = token.contents.split() if len(args) != 3 or args[1] != 'as': raise TemplateSyntaxError("'get_available_languages' requires 'as variable' (got %r)" % args) return GetAvailableLanguagesNode(args[2]) @register.tag("get_language_info") def do_get_language_info(parser, token): """ Store the language information dictionary for the given language code in a context variable. Usage:: {% get_language_info for LANGUAGE_CODE as l %} {{ l.code }} {{ l.name }} {{ l.name_translated }} {{ l.name_local }} {{ l.bidi|yesno:"bi-directional,uni-directional" }} """ args = token.split_contents() if len(args) != 5 or args[1] != 'for' or args[3] != 'as': raise TemplateSyntaxError("'%s' requires 'for string as variable' (got %r)" % (args[0], args[1:])) return GetLanguageInfoNode(parser.compile_filter(args[2]), args[4]) @register.tag("get_language_info_list") def do_get_language_info_list(parser, token): """ Store a list of language information dictionaries for the given language codes in a context variable. The language codes can be specified either as a list of strings or a settings.LANGUAGES style list (or any sequence of sequences whose first items are language codes). Usage:: {% get_language_info_list for LANGUAGES as langs %} {% for l in langs %} {{ l.code }} {{ l.name }} {{ l.name_translated }} {{ l.name_local }} {{ l.bidi|yesno:"bi-directional,uni-directional" }} {% endfor %} """ args = token.split_contents() if len(args) != 5 or args[1] != 'for' or args[3] != 'as': raise TemplateSyntaxError("'%s' requires 'for sequence as variable' (got %r)" % (args[0], args[1:])) return GetLanguageInfoListNode(parser.compile_filter(args[2]), args[4]) @register.filter def language_name(lang_code): return translation.get_language_info(lang_code)['name'] @register.filter def language_name_translated(lang_code): english_name = translation.get_language_info(lang_code)['name'] return translation.gettext(english_name) @register.filter def language_name_local(lang_code): return translation.get_language_info(lang_code)['name_local'] @register.filter def language_bidi(lang_code): return translation.get_language_info(lang_code)['bidi'] @register.tag("get_current_language") def do_get_current_language(parser, token): """ Store the current language in the context. Usage:: {% get_current_language as language %} This fetches the currently active language and puts its value into the ``language`` context variable. """ # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments args = token.contents.split() if len(args) != 3 or args[1] != 'as': raise TemplateSyntaxError("'get_current_language' requires 'as variable' (got %r)" % args) return GetCurrentLanguageNode(args[2]) @register.tag("get_current_language_bidi") def do_get_current_language_bidi(parser, token): """ Store the current language layout in the context. Usage:: {% get_current_language_bidi as bidi %} This fetches the currently active language's layout and puts its value into the ``bidi`` context variable. True indicates right-to-left layout, otherwise left-to-right. """ # token.split_contents() isn't useful here because this tag doesn't accept variable as arguments args = token.contents.split() if len(args) != 3 or args[1] != 'as': raise TemplateSyntaxError("'get_current_language_bidi' requires 'as variable' (got %r)" % args) return GetCurrentLanguageBidiNode(args[2]) @register.tag("translate") @register.tag("trans") def do_translate(parser, token): """ Mark a string for translation and translate the string for the current language. Usage:: {% translate "this is a test" %} This marks the string for translation so it will be pulled out by makemessages into the .po files and runs the string through the translation engine. There is a second form:: {% translate "this is a test" noop %} This marks the string for translation, but returns the string unchanged. Use it when you need to store values into forms that should be translated later on. You can use variables instead of constant strings to translate stuff you marked somewhere else:: {% translate variable %} This tries to translate the contents of the variable ``variable``. Make sure that the string in there is something that is in the .po file. It is possible to store the translated string into a variable:: {% translate "this is a test" as var %} {{ var }} Contextual translations are also supported:: {% translate "this is a test" context "greeting" %} This is equivalent to calling pgettext instead of (u)gettext. """ bits = token.split_contents() if len(bits) < 2: raise TemplateSyntaxError("'%s' takes at least one argument" % bits[0]) message_string = parser.compile_filter(bits[1]) remaining = bits[2:] noop = False asvar = None message_context = None seen = set() invalid_context = {'as', 'noop'} while remaining: option = remaining.pop(0) if option in seen: raise TemplateSyntaxError( "The '%s' option was specified more than once." % option, ) elif option == 'noop': noop = True elif option == 'context': try: value = remaining.pop(0) except IndexError: raise TemplateSyntaxError( "No argument provided to the '%s' tag for the context option." % bits[0] ) if value in invalid_context: raise TemplateSyntaxError( "Invalid argument '%s' provided to the '%s' tag for the context option" % (value, bits[0]), ) message_context = parser.compile_filter(value) elif option == 'as': try: value = remaining.pop(0) except IndexError: raise TemplateSyntaxError( "No argument provided to the '%s' tag for the as option." % bits[0] ) asvar = value else: raise TemplateSyntaxError( "Unknown argument for '%s' tag: '%s'. The only options " "available are 'noop', 'context' \"xxx\", and 'as VAR'." % ( bits[0], option, ) ) seen.add(option) return TranslateNode(message_string, noop, asvar, message_context) @register.tag("blocktranslate") @register.tag("blocktrans") def do_block_translate(parser, token): """ Translate a block of text with parameters. Usage:: {% blocktranslate with bar=foo|filter boo=baz|filter %} This is {{ bar }} and {{ boo }}. {% endblocktranslate %} Additionally, this supports pluralization:: {% blocktranslate count count=var|length %} There is {{ count }} object. {% plural %} There are {{ count }} objects. {% endblocktranslate %} This is much like ngettext, only in template syntax. The "var as value" legacy format is still supported:: {% blocktranslate with foo|filter as bar and baz|filter as boo %} {% blocktranslate count var|length as count %} The translated string can be stored in a variable using `asvar`:: {% blocktranslate with bar=foo|filter boo=baz|filter asvar var %} This is {{ bar }} and {{ boo }}. {% endblocktranslate %} {{ var }} Contextual translations are supported:: {% blocktranslate with bar=foo|filter context "greeting" %} This is {{ bar }}. {% endblocktranslate %} This is equivalent to calling pgettext/npgettext instead of (u)gettext/(u)ngettext. """ bits = token.split_contents() options = {} remaining_bits = bits[1:] asvar = None while remaining_bits: option = remaining_bits.pop(0) if option in options: raise TemplateSyntaxError('The %r option was specified more ' 'than once.' % option) if option == 'with': value = token_kwargs(remaining_bits, parser, support_legacy=True) if not value: raise TemplateSyntaxError('"with" in %r tag needs at least ' 'one keyword argument.' % bits[0]) elif option == 'count': value = token_kwargs(remaining_bits, parser, support_legacy=True) if len(value) != 1: raise TemplateSyntaxError('"count" in %r tag expected exactly ' 'one keyword argument.' % bits[0]) elif option == "context": try: value = remaining_bits.pop(0) value = parser.compile_filter(value) except Exception: raise TemplateSyntaxError( '"context" in %r tag expected exactly one argument.' % bits[0] ) elif option == "trimmed": value = True elif option == "asvar": try: value = remaining_bits.pop(0) except IndexError: raise TemplateSyntaxError( "No argument provided to the '%s' tag for the asvar option." % bits[0] ) asvar = value else: raise TemplateSyntaxError('Unknown argument for %r tag: %r.' % (bits[0], option)) options[option] = value if 'count' in options: countervar, counter = next(iter(options['count'].items())) else: countervar, counter = None, None if 'context' in options: message_context = options['context'] else: message_context = None extra_context = options.get('with', {}) trimmed = options.get("trimmed", False) singular = [] plural = [] while parser.tokens: token = parser.next_token() if token.token_type in (TokenType.VAR, TokenType.TEXT): singular.append(token) else: break if countervar and counter: if token.contents.strip() != 'plural': raise TemplateSyntaxError("%r doesn't allow other block tags inside it" % bits[0]) while parser.tokens: token = parser.next_token() if token.token_type in (TokenType.VAR, TokenType.TEXT): plural.append(token) else: break end_tag_name = 'end%s' % bits[0] if token.contents.strip() != end_tag_name: raise TemplateSyntaxError("%r doesn't allow other block tags (seen %r) inside it" % (bits[0], token.contents)) return BlockTranslateNode(extra_context, singular, plural, countervar, counter, message_context, trimmed=trimmed, asvar=asvar, tag_name=bits[0]) @register.tag def language(parser, token): """ Enable the given language just for this block. Usage:: {% language "de" %} This is {{ bar }} and {{ boo }}. {% endlanguage %} """ bits = token.split_contents() if len(bits) != 2: raise TemplateSyntaxError("'%s' takes one argument (language)" % bits[0]) language = parser.compile_filter(bits[1]) nodelist = parser.parse(('endlanguage',)) parser.delete_first_token() return LanguageNode(nodelist, language)
f29800e221df6686c468814d3730239155fb752f0edc4480924c97d841a5b631
""" Decorators for views based on HTTP headers. """ from functools import wraps from django.http import HttpResponseNotAllowed from django.middleware.http import ConditionalGetMiddleware from django.utils import timezone from django.utils.cache import get_conditional_response from django.utils.decorators import decorator_from_middleware from django.utils.http import http_date, quote_etag from django.utils.log import log_response conditional_page = decorator_from_middleware(ConditionalGetMiddleware) def require_http_methods(request_method_list): """ Decorator to make a view only accept particular request methods. Usage:: @require_http_methods(["GET", "POST"]) def my_view(request): # I can assume now that only GET or POST requests make it this far # ... Note that request methods should be in uppercase. """ def decorator(func): @wraps(func) def inner(request, *args, **kwargs): if request.method not in request_method_list: response = HttpResponseNotAllowed(request_method_list) log_response( 'Method Not Allowed (%s): %s', request.method, request.path, response=response, request=request, ) return response return func(request, *args, **kwargs) return inner return decorator require_GET = require_http_methods(["GET"]) require_GET.__doc__ = "Decorator to require that a view only accepts the GET method." require_POST = require_http_methods(["POST"]) require_POST.__doc__ = "Decorator to require that a view only accepts the POST method." require_safe = require_http_methods(["GET", "HEAD"]) require_safe.__doc__ = "Decorator to require that a view only accepts safe methods: GET and HEAD." def condition(etag_func=None, last_modified_func=None): """ Decorator to support conditional retrieval (or change) for a view function. The parameters are callables to compute the ETag and last modified time for the requested resource, respectively. The callables are passed the same parameters as the view itself. The ETag function should return a string (or None if the resource doesn't exist), while the last_modified function should return a datetime object (or None if the resource doesn't exist). The ETag function should return a complete ETag, including quotes (e.g. '"etag"'), since that's the only way to distinguish between weak and strong ETags. If an unquoted ETag is returned (e.g. 'etag'), it will be converted to a strong ETag by adding quotes. This decorator will either pass control to the wrapped view function or return an HTTP 304 response (unmodified) or 412 response (precondition failed), depending upon the request method. In either case, the decorator will add the generated ETag and Last-Modified headers to the response if the headers aren't already set and if the request's method is safe. """ def decorator(func): @wraps(func) def inner(request, *args, **kwargs): # Compute values (if any) for the requested resource. def get_last_modified(): if last_modified_func: dt = last_modified_func(request, *args, **kwargs) if dt: if not timezone.is_aware(dt): dt = timezone.make_aware(dt, timezone.utc) return int(dt.timestamp()) # The value from etag_func() could be quoted or unquoted. res_etag = etag_func(request, *args, **kwargs) if etag_func else None res_etag = quote_etag(res_etag) if res_etag is not None else None res_last_modified = get_last_modified() response = get_conditional_response( request, etag=res_etag, last_modified=res_last_modified, ) if response is None: response = func(request, *args, **kwargs) # Set relevant headers on the response if they don't already exist # and if the request method is safe. if request.method in ('GET', 'HEAD'): if res_last_modified and not response.has_header('Last-Modified'): response.headers['Last-Modified'] = http_date(res_last_modified) if res_etag: response.headers.setdefault('ETag', res_etag) return response return inner return decorator # Shortcut decorators for common cases based on ETag or Last-Modified only def etag(etag_func): return condition(etag_func=etag_func) def last_modified(last_modified_func): return condition(last_modified_func=last_modified_func)
100d20c4ad792a8dc8018a3d8411f46677fb0b44ba5a779899cdf58df90298ca
import functools from django.http import HttpRequest def sensitive_variables(*variables): """ Indicate which variables used in the decorated function are sensitive so that those variables can later be treated in a special way, for example by hiding them when logging unhandled exceptions. Accept two forms: * with specified variable names: @sensitive_variables('user', 'password', 'credit_card') def my_function(user): password = user.pass_word credit_card = user.credit_card_number ... * without any specified variable names, in which case consider all variables are sensitive: @sensitive_variables() def my_function() ... """ if len(variables) == 1 and callable(variables[0]): raise TypeError( 'sensitive_variables() must be called to use it as a decorator, ' 'e.g., use @sensitive_variables(), not @sensitive_variables.' ) def decorator(func): @functools.wraps(func) def sensitive_variables_wrapper(*func_args, **func_kwargs): if variables: sensitive_variables_wrapper.sensitive_variables = variables else: sensitive_variables_wrapper.sensitive_variables = '__ALL__' return func(*func_args, **func_kwargs) return sensitive_variables_wrapper return decorator def sensitive_post_parameters(*parameters): """ Indicate which POST parameters used in the decorated view are sensitive, so that those parameters can later be treated in a special way, for example by hiding them when logging unhandled exceptions. Accept two forms: * with specified parameters: @sensitive_post_parameters('password', 'credit_card') def my_view(request): pw = request.POST['password'] cc = request.POST['credit_card'] ... * without any specified parameters, in which case consider all variables are sensitive: @sensitive_post_parameters() def my_view(request) ... """ if len(parameters) == 1 and callable(parameters[0]): raise TypeError( 'sensitive_post_parameters() must be called to use it as a ' 'decorator, e.g., use @sensitive_post_parameters(), not ' '@sensitive_post_parameters.' ) def decorator(view): @functools.wraps(view) def sensitive_post_parameters_wrapper(request, *args, **kwargs): if not isinstance(request, HttpRequest): raise TypeError( "sensitive_post_parameters didn't receive an HttpRequest " "object. If you are decorating a classmethod, make sure " "to use @method_decorator." ) if parameters: request.sensitive_post_parameters = parameters else: request.sensitive_post_parameters = '__ALL__' return view(request, *args, **kwargs) return sensitive_post_parameters_wrapper return decorator
f59f05fe3f90bc2b2b034a7f7866b4498be9bfbf7607c03824d78c96f6924d0a
from django.core.exceptions import ImproperlyConfigured from django.forms import Form, models as model_forms from django.http import HttpResponseRedirect from django.views.generic.base import ContextMixin, TemplateResponseMixin, View from django.views.generic.detail import ( BaseDetailView, SingleObjectMixin, SingleObjectTemplateResponseMixin, ) class FormMixin(ContextMixin): """Provide a way to show and handle a form in a request.""" initial = {} form_class = None success_url = None prefix = None def get_initial(self): """Return the initial data to use for forms on this view.""" return self.initial.copy() def get_prefix(self): """Return the prefix to use for forms.""" return self.prefix def get_form_class(self): """Return the form class to use.""" return self.form_class def get_form(self, form_class=None): """Return an instance of the form to be used in this view.""" if form_class is None: form_class = self.get_form_class() return form_class(**self.get_form_kwargs()) def get_form_kwargs(self): """Return the keyword arguments for instantiating the form.""" kwargs = { 'initial': self.get_initial(), 'prefix': self.get_prefix(), } if self.request.method in ('POST', 'PUT'): kwargs.update({ 'data': self.request.POST, 'files': self.request.FILES, }) return kwargs def get_success_url(self): """Return the URL to redirect to after processing a valid form.""" if not self.success_url: raise ImproperlyConfigured("No URL to redirect to. Provide a success_url.") return str(self.success_url) # success_url may be lazy def form_valid(self, form): """If the form is valid, redirect to the supplied URL.""" return HttpResponseRedirect(self.get_success_url()) def form_invalid(self, form): """If the form is invalid, render the invalid form.""" return self.render_to_response(self.get_context_data(form=form)) def get_context_data(self, **kwargs): """Insert the form into the context dict.""" if 'form' not in kwargs: kwargs['form'] = self.get_form() return super().get_context_data(**kwargs) class ModelFormMixin(FormMixin, SingleObjectMixin): """Provide a way to show and handle a ModelForm in a request.""" fields = None def get_form_class(self): """Return the form class to use in this view.""" if self.fields is not None and self.form_class: raise ImproperlyConfigured( "Specifying both 'fields' and 'form_class' is not permitted." ) if self.form_class: return self.form_class else: if self.model is not None: # If a model has been explicitly provided, use it model = self.model elif getattr(self, 'object', None) is not None: # If this view is operating on a single object, use # the class of that object model = self.object.__class__ else: # Try to get a queryset and extract the model class # from that model = self.get_queryset().model if self.fields is None: raise ImproperlyConfigured( "Using ModelFormMixin (base class of %s) without " "the 'fields' attribute is prohibited." % self.__class__.__name__ ) return model_forms.modelform_factory(model, fields=self.fields) def get_form_kwargs(self): """Return the keyword arguments for instantiating the form.""" kwargs = super().get_form_kwargs() if hasattr(self, 'object'): kwargs.update({'instance': self.object}) return kwargs def get_success_url(self): """Return the URL to redirect to after processing a valid form.""" if self.success_url: url = self.success_url.format(**self.object.__dict__) else: try: url = self.object.get_absolute_url() except AttributeError: raise ImproperlyConfigured( "No URL to redirect to. Either provide a url or define" " a get_absolute_url method on the Model.") return url def form_valid(self, form): """If the form is valid, save the associated model.""" self.object = form.save() return super().form_valid(form) class ProcessFormView(View): """Render a form on GET and processes it on POST.""" def get(self, request, *args, **kwargs): """Handle GET requests: instantiate a blank version of the form.""" return self.render_to_response(self.get_context_data()) def post(self, request, *args, **kwargs): """ Handle POST requests: instantiate a form instance with the passed POST variables and then check if it's valid. """ form = self.get_form() if form.is_valid(): return self.form_valid(form) else: return self.form_invalid(form) # PUT is a valid HTTP verb for creating (with a known URL) or editing an # object, note that browsers only support POST for now. def put(self, *args, **kwargs): return self.post(*args, **kwargs) class BaseFormView(FormMixin, ProcessFormView): """A base view for displaying a form.""" class FormView(TemplateResponseMixin, BaseFormView): """A view for displaying a form and rendering a template response.""" class BaseCreateView(ModelFormMixin, ProcessFormView): """ Base view for creating a new object instance. Using this base class requires subclassing to provide a response mixin. """ def get(self, request, *args, **kwargs): self.object = None return super().get(request, *args, **kwargs) def post(self, request, *args, **kwargs): self.object = None return super().post(request, *args, **kwargs) class CreateView(SingleObjectTemplateResponseMixin, BaseCreateView): """ View for creating a new object, with a response rendered by a template. """ template_name_suffix = '_form' class BaseUpdateView(ModelFormMixin, ProcessFormView): """ Base view for updating an existing object. Using this base class requires subclassing to provide a response mixin. """ def get(self, request, *args, **kwargs): self.object = self.get_object() return super().get(request, *args, **kwargs) def post(self, request, *args, **kwargs): self.object = self.get_object() return super().post(request, *args, **kwargs) class UpdateView(SingleObjectTemplateResponseMixin, BaseUpdateView): """View for updating an object, with a response rendered by a template.""" template_name_suffix = '_form' class DeletionMixin: """Provide the ability to delete objects.""" success_url = None def delete(self, request, *args, **kwargs): """ Call the delete() method on the fetched object and then redirect to the success URL. """ self.object = self.get_object() success_url = self.get_success_url() self.object.delete() return HttpResponseRedirect(success_url) # Add support for browsers which only accept GET and POST for now. def post(self, request, *args, **kwargs): return self.delete(request, *args, **kwargs) def get_success_url(self): if self.success_url: return self.success_url.format(**self.object.__dict__) else: raise ImproperlyConfigured( "No URL to redirect to. Provide a success_url.") class BaseDeleteView(DeletionMixin, FormMixin, BaseDetailView): """ Base view for deleting an object. Using this base class requires subclassing to provide a response mixin. """ form_class = Form def post(self, request, *args, **kwargs): # Set self.object before the usual form processing flow. # Inlined because having DeletionMixin as the first base, for # get_success_url(), makes leveraging super() with ProcessFormView # overly complex. self.object = self.get_object() form = self.get_form() if form.is_valid(): return self.form_valid(form) else: return self.form_invalid(form) def form_valid(self, form): success_url = self.get_success_url() self.object.delete() return HttpResponseRedirect(success_url) class DeleteView(SingleObjectTemplateResponseMixin, BaseDeleteView): """ View for deleting an object retrieved with self.get_object(), with a response rendered by a template. """ template_name_suffix = '_confirm_delete'
9c49fd23b913b6334036fb8c4ec2edcbd17fd4fab9e6daa3e59f8882cd4fc032
"""Translation helper functions.""" import functools import gettext as gettext_module import os import re import sys import warnings from asgiref.local import Local from django.apps import apps from django.conf import settings from django.conf.locale import LANG_INFO from django.core.exceptions import AppRegistryNotReady from django.core.signals import setting_changed from django.dispatch import receiver from django.utils.regex_helper import _lazy_re_compile from django.utils.safestring import SafeData, mark_safe from . import to_language, to_locale # Translations are cached in a dictionary for every language. # The active translations are stored by threadid to make them thread local. _translations = {} _active = Local() # The default translation is based on the settings file. _default = None # magic gettext number to separate context from message CONTEXT_SEPARATOR = "\x04" # Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9 # and RFC 3066, section 2.1 accept_language_re = _lazy_re_compile(r''' ([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*) # "en", "en-au", "x-y-z", "es-419", "*" (?:\s*;\s*q=(0(?:\.\d{,3})?|1(?:\.0{,3})?))? # Optional "q=1.00", "q=0.8" (?:\s*,\s*|$) # Multiple accepts per header. ''', re.VERBOSE) language_code_re = _lazy_re_compile( r'^[a-z]{1,8}(?:-[a-z0-9]{1,8})*(?:@[a-z0-9]{1,20})?$', re.IGNORECASE ) language_code_prefix_re = _lazy_re_compile(r'^/(\w+([@-]\w+)?)(/|$)') @receiver(setting_changed) def reset_cache(**kwargs): """ Reset global state when LANGUAGES setting has been changed, as some languages should no longer be accepted. """ if kwargs['setting'] in ('LANGUAGES', 'LANGUAGE_CODE'): check_for_language.cache_clear() get_languages.cache_clear() get_supported_language_variant.cache_clear() class TranslationCatalog: """ Simulate a dict for DjangoTranslation._catalog so as multiple catalogs with different plural equations are kept separate. """ def __init__(self, trans=None): self._catalogs = [trans._catalog.copy()] if trans else [{}] self._plurals = [trans.plural] if trans else [lambda n: int(n != 1)] def __getitem__(self, key): for cat in self._catalogs: try: return cat[key] except KeyError: pass raise KeyError(key) def __setitem__(self, key, value): self._catalogs[0][key] = value def __contains__(self, key): return any(key in cat for cat in self._catalogs) def items(self): for cat in self._catalogs: yield from cat.items() def keys(self): for cat in self._catalogs: yield from cat.keys() def update(self, trans): # Merge if plural function is the same, else prepend. for cat, plural in zip(self._catalogs, self._plurals): if trans.plural.__code__ == plural.__code__: cat.update(trans._catalog) break else: self._catalogs.insert(0, trans._catalog.copy()) self._plurals.insert(0, trans.plural) def get(self, key, default=None): missing = object() for cat in self._catalogs: result = cat.get(key, missing) if result is not missing: return result return default def plural(self, msgid, num): for cat, plural in zip(self._catalogs, self._plurals): tmsg = cat.get((msgid, plural(num))) if tmsg is not None: return tmsg raise KeyError class DjangoTranslation(gettext_module.GNUTranslations): """ Set up the GNUTranslations context with regard to output charset. This translation object will be constructed out of multiple GNUTranslations objects by merging their catalogs. It will construct an object for the requested language and add a fallback to the default language, if it's different from the requested language. """ domain = 'django' def __init__(self, language, domain=None, localedirs=None): """Create a GNUTranslations() using many locale directories""" gettext_module.GNUTranslations.__init__(self) if domain is not None: self.domain = domain self.__language = language self.__to_language = to_language(language) self.__locale = to_locale(language) self._catalog = None # If a language doesn't have a catalog, use the Germanic default for # pluralization: anything except one is pluralized. self.plural = lambda n: int(n != 1) if self.domain == 'django': if localedirs is not None: # A module-level cache is used for caching 'django' translations warnings.warn("localedirs is ignored when domain is 'django'.", RuntimeWarning) localedirs = None self._init_translation_catalog() if localedirs: for localedir in localedirs: translation = self._new_gnu_trans(localedir) self.merge(translation) else: self._add_installed_apps_translations() self._add_local_translations() if self.__language == settings.LANGUAGE_CODE and self.domain == 'django' and self._catalog is None: # default lang should have at least one translation file available. raise OSError('No translation files found for default language %s.' % settings.LANGUAGE_CODE) self._add_fallback(localedirs) if self._catalog is None: # No catalogs found for this language, set an empty catalog. self._catalog = TranslationCatalog() def __repr__(self): return "<DjangoTranslation lang:%s>" % self.__language def _new_gnu_trans(self, localedir, use_null_fallback=True): """ Return a mergeable gettext.GNUTranslations instance. A convenience wrapper. By default gettext uses 'fallback=False'. Using param `use_null_fallback` to avoid confusion with any other references to 'fallback'. """ return gettext_module.translation( domain=self.domain, localedir=localedir, languages=[self.__locale], fallback=use_null_fallback, ) def _init_translation_catalog(self): """Create a base catalog using global django translations.""" settingsfile = sys.modules[settings.__module__].__file__ localedir = os.path.join(os.path.dirname(settingsfile), 'locale') translation = self._new_gnu_trans(localedir) self.merge(translation) def _add_installed_apps_translations(self): """Merge translations from each installed app.""" try: app_configs = reversed(list(apps.get_app_configs())) except AppRegistryNotReady: raise AppRegistryNotReady( "The translation infrastructure cannot be initialized before the " "apps registry is ready. Check that you don't make non-lazy " "gettext calls at import time.") for app_config in app_configs: localedir = os.path.join(app_config.path, 'locale') if os.path.exists(localedir): translation = self._new_gnu_trans(localedir) self.merge(translation) def _add_local_translations(self): """Merge translations defined in LOCALE_PATHS.""" for localedir in reversed(settings.LOCALE_PATHS): translation = self._new_gnu_trans(localedir) self.merge(translation) def _add_fallback(self, localedirs=None): """Set the GNUTranslations() fallback with the default language.""" # Don't set a fallback for the default language or any English variant # (as it's empty, so it'll ALWAYS fall back to the default language) if self.__language == settings.LANGUAGE_CODE or self.__language.startswith('en'): return if self.domain == 'django': # Get from cache default_translation = translation(settings.LANGUAGE_CODE) else: default_translation = DjangoTranslation( settings.LANGUAGE_CODE, domain=self.domain, localedirs=localedirs ) self.add_fallback(default_translation) def merge(self, other): """Merge another translation into this catalog.""" if not getattr(other, '_catalog', None): return # NullTranslations() has no _catalog if self._catalog is None: # Take plural and _info from first catalog found (generally Django's). self.plural = other.plural self._info = other._info.copy() self._catalog = TranslationCatalog(other) else: self._catalog.update(other) if other._fallback: self.add_fallback(other._fallback) def language(self): """Return the translation language.""" return self.__language def to_language(self): """Return the translation language name.""" return self.__to_language def ngettext(self, msgid1, msgid2, n): try: tmsg = self._catalog.plural(msgid1, n) except KeyError: if self._fallback: return self._fallback.ngettext(msgid1, msgid2, n) if n == 1: tmsg = msgid1 else: tmsg = msgid2 return tmsg def translation(language): """ Return a translation object in the default 'django' domain. """ global _translations if language not in _translations: _translations[language] = DjangoTranslation(language) return _translations[language] def activate(language): """ Fetch the translation object for a given language and install it as the current translation object for the current thread. """ if not language: return _active.value = translation(language) def deactivate(): """ Uninstall the active translation object so that further _() calls resolve to the default translation object. """ if hasattr(_active, "value"): del _active.value def deactivate_all(): """ Make the active translation object a NullTranslations() instance. This is useful when we want delayed translations to appear as the original string for some reason. """ _active.value = gettext_module.NullTranslations() _active.value.to_language = lambda *args: None def get_language(): """Return the currently selected language.""" t = getattr(_active, "value", None) if t is not None: try: return t.to_language() except AttributeError: pass # If we don't have a real translation object, assume it's the default language. return settings.LANGUAGE_CODE def get_language_bidi(): """ Return selected language's BiDi layout. * False = left-to-right layout * True = right-to-left layout """ lang = get_language() if lang is None: return False else: base_lang = get_language().split('-')[0] return base_lang in settings.LANGUAGES_BIDI def catalog(): """ Return the current active catalog for further processing. This can be used if you need to modify the catalog or want to access the whole message catalog instead of just translating one string. """ global _default t = getattr(_active, "value", None) if t is not None: return t if _default is None: _default = translation(settings.LANGUAGE_CODE) return _default def gettext(message): """ Translate the 'message' string. It uses the current thread to find the translation object to use. If no current translation is activated, the message will be run through the default translation object. """ global _default eol_message = message.replace('\r\n', '\n').replace('\r', '\n') if eol_message: _default = _default or translation(settings.LANGUAGE_CODE) translation_object = getattr(_active, "value", _default) result = translation_object.gettext(eol_message) else: # Return an empty value of the corresponding type if an empty message # is given, instead of metadata, which is the default gettext behavior. result = type(message)('') if isinstance(message, SafeData): return mark_safe(result) return result def pgettext(context, message): msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message) result = gettext(msg_with_ctxt) if CONTEXT_SEPARATOR in result: # Translation not found result = message elif isinstance(message, SafeData): result = mark_safe(result) return result def gettext_noop(message): """ Mark strings for translation but don't translate them now. This can be used to store strings in global variables that should stay in the base language (because they might be used externally) and will be translated later. """ return message def do_ntranslate(singular, plural, number, translation_function): global _default t = getattr(_active, "value", None) if t is not None: return getattr(t, translation_function)(singular, plural, number) if _default is None: _default = translation(settings.LANGUAGE_CODE) return getattr(_default, translation_function)(singular, plural, number) def ngettext(singular, plural, number): """ Return a string of the translation of either the singular or plural, based on the number. """ return do_ntranslate(singular, plural, number, 'ngettext') def npgettext(context, singular, plural, number): msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, singular), "%s%s%s" % (context, CONTEXT_SEPARATOR, plural), number) result = ngettext(*msgs_with_ctxt) if CONTEXT_SEPARATOR in result: # Translation not found result = ngettext(singular, plural, number) return result def all_locale_paths(): """ Return a list of paths to user-provides languages files. """ globalpath = os.path.join( os.path.dirname(sys.modules[settings.__module__].__file__), 'locale') app_paths = [] for app_config in apps.get_app_configs(): locale_path = os.path.join(app_config.path, 'locale') if os.path.exists(locale_path): app_paths.append(locale_path) return [globalpath, *settings.LOCALE_PATHS, *app_paths] @functools.lru_cache(maxsize=1000) def check_for_language(lang_code): """ Check whether there is a global language file for the given language code. This is used to decide whether a user-provided language is available. lru_cache should have a maxsize to prevent from memory exhaustion attacks, as the provided language codes are taken from the HTTP request. See also <https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>. """ # First, a quick check to make sure lang_code is well-formed (#21458) if lang_code is None or not language_code_re.search(lang_code): return False return any( gettext_module.find('django', path, [to_locale(lang_code)]) is not None for path in all_locale_paths() ) @functools.lru_cache() def get_languages(): """ Cache of settings.LANGUAGES in a dictionary for easy lookups by key. """ return dict(settings.LANGUAGES) @functools.lru_cache(maxsize=1000) def get_supported_language_variant(lang_code, strict=False): """ Return the language code that's listed in supported languages, possibly selecting a more generic variant. Raise LookupError if nothing is found. If `strict` is False (the default), look for a country-specific variant when neither the language code nor its generic variant is found. lru_cache should have a maxsize to prevent from memory exhaustion attacks, as the provided language codes are taken from the HTTP request. See also <https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>. """ if lang_code: # If 'zh-hant-tw' is not supported, try special fallback or subsequent # language codes i.e. 'zh-hant' and 'zh'. possible_lang_codes = [lang_code] try: possible_lang_codes.extend(LANG_INFO[lang_code]['fallback']) except KeyError: pass i = None while (i := lang_code.rfind('-', 0, i)) > -1: possible_lang_codes.append(lang_code[:i]) generic_lang_code = possible_lang_codes[-1] supported_lang_codes = get_languages() for code in possible_lang_codes: if code in supported_lang_codes and check_for_language(code): return code if not strict: # if fr-fr is not supported, try fr-ca. for supported_code in supported_lang_codes: if supported_code.startswith(generic_lang_code + '-'): return supported_code raise LookupError(lang_code) def get_language_from_path(path, strict=False): """ Return the language code if there's a valid language code found in `path`. If `strict` is False (the default), look for a country-specific variant when neither the language code nor its generic variant is found. """ regex_match = language_code_prefix_re.match(path) if not regex_match: return None lang_code = regex_match[1] try: return get_supported_language_variant(lang_code, strict=strict) except LookupError: return None def get_language_from_request(request, check_path=False): """ Analyze the request to find what language the user wants the system to show. Only languages listed in settings.LANGUAGES are taken into account. If the user requests a sublanguage where we have a main language, we send out the main language. If check_path is True, the URL path prefix will be checked for a language code, otherwise this is skipped for backwards compatibility. """ if check_path: lang_code = get_language_from_path(request.path_info) if lang_code is not None: return lang_code lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME) if lang_code is not None and lang_code in get_languages() and check_for_language(lang_code): return lang_code try: return get_supported_language_variant(lang_code) except LookupError: pass accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '') for accept_lang, unused in parse_accept_lang_header(accept): if accept_lang == '*': break if not language_code_re.search(accept_lang): continue try: return get_supported_language_variant(accept_lang) except LookupError: continue try: return get_supported_language_variant(settings.LANGUAGE_CODE) except LookupError: return settings.LANGUAGE_CODE @functools.lru_cache(maxsize=1000) def parse_accept_lang_header(lang_string): """ Parse the lang_string, which is the body of an HTTP Accept-Language header, and return a tuple of (lang, q-value), ordered by 'q' values. Return an empty tuple if there are any format errors in lang_string. """ result = [] pieces = accept_language_re.split(lang_string.lower()) if pieces[-1]: return () for i in range(0, len(pieces) - 1, 3): first, lang, priority = pieces[i:i + 3] if first: return () if priority: priority = float(priority) else: priority = 1.0 result.append((lang, priority)) result.sort(key=lambda k: k[1], reverse=True) return tuple(result)
35301041b0705d76bc1b19991c39cc080abbe10a05a55c7100e27546b9e37723
from django.apps.registry import apps as global_apps from django.db import migrations, router from .exceptions import InvalidMigrationPlan from .loader import MigrationLoader from .recorder import MigrationRecorder from .state import ProjectState class MigrationExecutor: """ End-to-end migration execution - load migrations and run them up or down to a specified set of targets. """ def __init__(self, connection, progress_callback=None): self.connection = connection self.loader = MigrationLoader(self.connection) self.recorder = MigrationRecorder(self.connection) self.progress_callback = progress_callback def migration_plan(self, targets, clean_start=False): """ Given a set of targets, return a list of (Migration instance, backwards?). """ plan = [] if clean_start: applied = {} else: applied = dict(self.loader.applied_migrations) for target in targets: # If the target is (app_label, None), that means unmigrate everything if target[1] is None: for root in self.loader.graph.root_nodes(): if root[0] == target[0]: for migration in self.loader.graph.backwards_plan(root): if migration in applied: plan.append((self.loader.graph.nodes[migration], True)) applied.pop(migration) # If the migration is already applied, do backwards mode, # otherwise do forwards mode. elif target in applied: # Don't migrate backwards all the way to the target node (that # may roll back dependencies in other apps that don't need to # be rolled back); instead roll back through target's immediate # child(ren) in the same app, and no further. next_in_app = sorted( n for n in self.loader.graph.node_map[target].children if n[0] == target[0] ) for node in next_in_app: for migration in self.loader.graph.backwards_plan(node): if migration in applied: plan.append((self.loader.graph.nodes[migration], True)) applied.pop(migration) else: for migration in self.loader.graph.forwards_plan(target): if migration not in applied: plan.append((self.loader.graph.nodes[migration], False)) applied[migration] = self.loader.graph.nodes[migration] return plan def _create_project_state(self, with_applied_migrations=False): """ Create a project state including all the applications without migrations and applied migrations if with_applied_migrations=True. """ state = ProjectState(real_apps=list(self.loader.unmigrated_apps)) if with_applied_migrations: # Create the forwards plan Django would follow on an empty database full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True) applied_migrations = { self.loader.graph.nodes[key] for key in self.loader.applied_migrations if key in self.loader.graph.nodes } for migration, _ in full_plan: if migration in applied_migrations: migration.mutate_state(state, preserve=False) return state def migrate(self, targets, plan=None, state=None, fake=False, fake_initial=False): """ Migrate the database up to the given targets. Django first needs to create all project states before a migration is (un)applied and in a second step run all the database operations. """ # The django_migrations table must be present to record applied # migrations. self.recorder.ensure_schema() if plan is None: plan = self.migration_plan(targets) # Create the forwards plan Django would follow on an empty database full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True) all_forwards = all(not backwards for mig, backwards in plan) all_backwards = all(backwards for mig, backwards in plan) if not plan: if state is None: # The resulting state should include applied migrations. state = self._create_project_state(with_applied_migrations=True) elif all_forwards == all_backwards: # This should only happen if there's a mixed plan raise InvalidMigrationPlan( "Migration plans with both forwards and backwards migrations " "are not supported. Please split your migration process into " "separate plans of only forwards OR backwards migrations.", plan ) elif all_forwards: if state is None: # The resulting state should still include applied migrations. state = self._create_project_state(with_applied_migrations=True) state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial) else: # No need to check for `elif all_backwards` here, as that condition # would always evaluate to true. state = self._migrate_all_backwards(plan, full_plan, fake=fake) self.check_replacements() return state def _migrate_all_forwards(self, state, plan, full_plan, fake, fake_initial): """ Take a list of 2-tuples of the form (migration instance, False) and apply them in the order they occur in the full_plan. """ migrations_to_run = {m[0] for m in plan} for migration, _ in full_plan: if not migrations_to_run: # We remove every migration that we applied from these sets so # that we can bail out once the last migration has been applied # and don't always run until the very end of the migration # process. break if migration in migrations_to_run: if 'apps' not in state.__dict__: if self.progress_callback: self.progress_callback("render_start") state.apps # Render all -- performance critical if self.progress_callback: self.progress_callback("render_success") state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial) migrations_to_run.remove(migration) return state def _migrate_all_backwards(self, plan, full_plan, fake): """ Take a list of 2-tuples of the form (migration instance, True) and unapply them in reverse order they occur in the full_plan. Since unapplying a migration requires the project state prior to that migration, Django will compute the migration states before each of them in a first run over the plan and then unapply them in a second run over the plan. """ migrations_to_run = {m[0] for m in plan} # Holds all migration states prior to the migrations being unapplied states = {} state = self._create_project_state() applied_migrations = { self.loader.graph.nodes[key] for key in self.loader.applied_migrations if key in self.loader.graph.nodes } if self.progress_callback: self.progress_callback("render_start") for migration, _ in full_plan: if not migrations_to_run: # We remove every migration that we applied from this set so # that we can bail out once the last migration has been applied # and don't always run until the very end of the migration # process. break if migration in migrations_to_run: if 'apps' not in state.__dict__: state.apps # Render all -- performance critical # The state before this migration states[migration] = state # The old state keeps as-is, we continue with the new state state = migration.mutate_state(state, preserve=True) migrations_to_run.remove(migration) elif migration in applied_migrations: # Only mutate the state if the migration is actually applied # to make sure the resulting state doesn't include changes # from unrelated migrations. migration.mutate_state(state, preserve=False) if self.progress_callback: self.progress_callback("render_success") for migration, _ in plan: self.unapply_migration(states[migration], migration, fake=fake) applied_migrations.remove(migration) # Generate the post migration state by starting from the state before # the last migration is unapplied and mutating it to include all the # remaining applied migrations. last_unapplied_migration = plan[-1][0] state = states[last_unapplied_migration] for index, (migration, _) in enumerate(full_plan): if migration == last_unapplied_migration: for migration, _ in full_plan[index:]: if migration in applied_migrations: migration.mutate_state(state, preserve=False) break return state def apply_migration(self, state, migration, fake=False, fake_initial=False): """Run a migration forwards.""" migration_recorded = False if self.progress_callback: self.progress_callback("apply_start", migration, fake) if not fake: if fake_initial: # Test to see if this is an already-applied initial migration applied, state = self.detect_soft_applied(state, migration) if applied: fake = True if not fake: # Alright, do it normally with self.connection.schema_editor(atomic=migration.atomic) as schema_editor: state = migration.apply(state, schema_editor) if not schema_editor.deferred_sql: self.record_migration(migration) migration_recorded = True if not migration_recorded: self.record_migration(migration) # Report progress if self.progress_callback: self.progress_callback("apply_success", migration, fake) return state def record_migration(self, migration): # For replacement migrations, record individual statuses if migration.replaces: for app_label, name in migration.replaces: self.recorder.record_applied(app_label, name) else: self.recorder.record_applied(migration.app_label, migration.name) def unapply_migration(self, state, migration, fake=False): """Run a migration backwards.""" if self.progress_callback: self.progress_callback("unapply_start", migration, fake) if not fake: with self.connection.schema_editor(atomic=migration.atomic) as schema_editor: state = migration.unapply(state, schema_editor) # For replacement migrations, also record individual statuses. if migration.replaces: for app_label, name in migration.replaces: self.recorder.record_unapplied(app_label, name) self.recorder.record_unapplied(migration.app_label, migration.name) # Report progress if self.progress_callback: self.progress_callback("unapply_success", migration, fake) return state def check_replacements(self): """ Mark replacement migrations applied if their replaced set all are. Do this unconditionally on every migrate, rather than just when migrations are applied or unapplied, to correctly handle the case when a new squash migration is pushed to a deployment that already had all its replaced migrations applied. In this case no new migration will be applied, but the applied state of the squashed migration must be maintained. """ applied = self.recorder.applied_migrations() for key, migration in self.loader.replacements.items(): all_applied = all(m in applied for m in migration.replaces) if all_applied and key not in applied: self.recorder.record_applied(*key) def detect_soft_applied(self, project_state, migration): """ Test whether a migration has been implicitly applied - that the tables or columns it would create exist. This is intended only for use on initial migrations (as it only looks for CreateModel and AddField). """ def should_skip_detecting_model(migration, model): """ No need to detect tables for proxy models, unmanaged models, or models that can't be migrated on the current database. """ return ( model._meta.proxy or not model._meta.managed or not router.allow_migrate( self.connection.alias, migration.app_label, model_name=model._meta.model_name, ) ) if migration.initial is None: # Bail if the migration isn't the first one in its app if any(app == migration.app_label for app, name in migration.dependencies): return False, project_state elif migration.initial is False: # Bail if it's NOT an initial migration return False, project_state if project_state is None: after_state = self.loader.project_state((migration.app_label, migration.name), at_end=True) else: after_state = migration.mutate_state(project_state) apps = after_state.apps found_create_model_migration = False found_add_field_migration = False fold_identifier_case = self.connection.features.ignores_table_name_case with self.connection.cursor() as cursor: existing_table_names = set(self.connection.introspection.table_names(cursor)) if fold_identifier_case: existing_table_names = {name.casefold() for name in existing_table_names} # Make sure all create model and add field operations are done for operation in migration.operations: if isinstance(operation, migrations.CreateModel): model = apps.get_model(migration.app_label, operation.name) if model._meta.swapped: # We have to fetch the model to test with from the # main app cache, as it's not a direct dependency. model = global_apps.get_model(model._meta.swapped) if should_skip_detecting_model(migration, model): continue db_table = model._meta.db_table if fold_identifier_case: db_table = db_table.casefold() if db_table not in existing_table_names: return False, project_state found_create_model_migration = True elif isinstance(operation, migrations.AddField): model = apps.get_model(migration.app_label, operation.model_name) if model._meta.swapped: # We have to fetch the model to test with from the # main app cache, as it's not a direct dependency. model = global_apps.get_model(model._meta.swapped) if should_skip_detecting_model(migration, model): continue table = model._meta.db_table field = model._meta.get_field(operation.name) # Handle implicit many-to-many tables created by AddField. if field.many_to_many: through_db_table = field.remote_field.through._meta.db_table if fold_identifier_case: through_db_table = through_db_table.casefold() if through_db_table not in existing_table_names: return False, project_state else: found_add_field_migration = True continue with self.connection.cursor() as cursor: columns = self.connection.introspection.get_table_description(cursor, table) for column in columns: field_column = field.column column_name = column.name if fold_identifier_case: column_name = column_name.casefold() field_column = field_column.casefold() if column_name == field_column: found_add_field_migration = True break else: return False, project_state # If we get this far and we found at least one CreateModel or AddField migration, # the migration is considered implicitly applied. return (found_create_model_migration or found_add_field_migration), after_state
64123ed6fa5c8535388a2b7324c0abe83077636b755d67d842363170b02b333d
import copy from collections import defaultdict from contextlib import contextmanager from functools import partial from django.apps import AppConfig from django.apps.registry import Apps, apps as global_apps from django.conf import settings from django.core.exceptions import FieldDoesNotExist from django.db import models from django.db.migrations.utils import field_is_referenced, get_references from django.db.models import NOT_PROVIDED from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT from django.db.models.options import DEFAULT_NAMES, normalize_together from django.db.models.utils import make_model_tuple from django.utils.functional import cached_property from django.utils.module_loading import import_string from django.utils.version import get_docs_version from .exceptions import InvalidBasesError from .utils import resolve_relation def _get_app_label_and_model_name(model, app_label=''): if isinstance(model, str): split = model.split('.', 1) return tuple(split) if len(split) == 2 else (app_label, split[0]) else: return model._meta.app_label, model._meta.model_name def _get_related_models(m): """Return all models that have a direct relationship to the given model.""" related_models = [ subclass for subclass in m.__subclasses__() if issubclass(subclass, models.Model) ] related_fields_models = set() for f in m._meta.get_fields(include_parents=True, include_hidden=True): if f.is_relation and f.related_model is not None and not isinstance(f.related_model, str): related_fields_models.add(f.model) related_models.append(f.related_model) # Reverse accessors of foreign keys to proxy models are attached to their # concrete proxied model. opts = m._meta if opts.proxy and m in related_fields_models: related_models.append(opts.concrete_model) return related_models def get_related_models_tuples(model): """ Return a list of typical (app_label, model_name) tuples for all related models for the given model. """ return { (rel_mod._meta.app_label, rel_mod._meta.model_name) for rel_mod in _get_related_models(model) } def get_related_models_recursive(model): """ Return all models that have a direct or indirect relationship to the given model. Relationships are either defined by explicit relational fields, like ForeignKey, ManyToManyField or OneToOneField, or by inheriting from another model (a superclass is related to its subclasses, but not vice versa). Note, however, that a model inheriting from a concrete model is also related to its superclass through the implicit *_ptr OneToOneField on the subclass. """ seen = set() queue = _get_related_models(model) for rel_mod in queue: rel_app_label, rel_model_name = rel_mod._meta.app_label, rel_mod._meta.model_name if (rel_app_label, rel_model_name) in seen: continue seen.add((rel_app_label, rel_model_name)) queue.extend(_get_related_models(rel_mod)) return seen - {(model._meta.app_label, model._meta.model_name)} class ProjectState: """ Represent the entire project's overall state. This is the item that is passed around - do it here rather than at the app level so that cross-app FKs/etc. resolve properly. """ def __init__(self, models=None, real_apps=None): self.models = models or {} # Apps to include from main registry, usually unmigrated ones self.real_apps = real_apps or [] self.is_delayed = False # {remote_model_key: {model_key: [(field_name, field)]}} self.relations = None def add_model(self, model_state): app_label, model_name = model_state.app_label, model_state.name_lower self.models[(app_label, model_name)] = model_state if 'apps' in self.__dict__: # hasattr would cache the property self.reload_model(app_label, model_name) def remove_model(self, app_label, model_name): del self.models[app_label, model_name] if 'apps' in self.__dict__: # hasattr would cache the property self.apps.unregister_model(app_label, model_name) # Need to do this explicitly since unregister_model() doesn't clear # the cache automatically (#24513) self.apps.clear_cache() def rename_model(self, app_label, old_name, new_name): # Add a new model. old_name_lower = old_name.lower() new_name_lower = new_name.lower() renamed_model = self.models[app_label, old_name_lower].clone() renamed_model.name = new_name self.models[app_label, new_name_lower] = renamed_model # Repoint all fields pointing to the old model to the new one. old_model_tuple = (app_label, old_name_lower) new_remote_model = f'{app_label}.{new_name}' to_reload = set() for model_state, name, field, reference in get_references(self, old_model_tuple): changed_field = None if reference.to: changed_field = field.clone() changed_field.remote_field.model = new_remote_model if reference.through: if changed_field is None: changed_field = field.clone() changed_field.remote_field.through = new_remote_model if changed_field: model_state.fields[name] = changed_field to_reload.add((model_state.app_label, model_state.name_lower)) # Reload models related to old model before removing the old model. self.reload_models(to_reload, delay=True) # Remove the old model. self.remove_model(app_label, old_name_lower) self.reload_model(app_label, new_name_lower, delay=True) def alter_model_options(self, app_label, model_name, options, option_keys=None): model_state = self.models[app_label, model_name] model_state.options = {**model_state.options, **options} if option_keys: for key in option_keys: if key not in options: model_state.options.pop(key, False) self.reload_model(app_label, model_name, delay=True) def alter_model_managers(self, app_label, model_name, managers): model_state = self.models[app_label, model_name] model_state.managers = list(managers) self.reload_model(app_label, model_name, delay=True) def _append_option(self, app_label, model_name, option_name, obj): model_state = self.models[app_label, model_name] model_state.options[option_name] = [*model_state.options[option_name], obj] self.reload_model(app_label, model_name, delay=True) def _remove_option(self, app_label, model_name, option_name, obj_name): model_state = self.models[app_label, model_name] objs = model_state.options[option_name] model_state.options[option_name] = [obj for obj in objs if obj.name != obj_name] self.reload_model(app_label, model_name, delay=True) def add_index(self, app_label, model_name, index): self._append_option(app_label, model_name, 'indexes', index) def remove_index(self, app_label, model_name, index_name): self._remove_option(app_label, model_name, 'indexes', index_name) def add_constraint(self, app_label, model_name, constraint): self._append_option(app_label, model_name, 'constraints', constraint) def remove_constraint(self, app_label, model_name, constraint_name): self._remove_option(app_label, model_name, 'constraints', constraint_name) def add_field(self, app_label, model_name, name, field, preserve_default): # If preserve default is off, don't use the default for future state. if not preserve_default: field = field.clone() field.default = NOT_PROVIDED else: field = field self.models[app_label, model_name].fields[name] = field # Delay rendering of relationships if it's not a relational field. delay = not field.is_relation self.reload_model(app_label, model_name, delay=delay) def remove_field(self, app_label, model_name, name): model_state = self.models[app_label, model_name] old_field = model_state.fields.pop(name) # Delay rendering of relationships if it's not a relational field. delay = not old_field.is_relation self.reload_model(app_label, model_name, delay=delay) def alter_field(self, app_label, model_name, name, field, preserve_default): if not preserve_default: field = field.clone() field.default = NOT_PROVIDED else: field = field model_state = self.models[app_label, model_name] model_state.fields[name] = field # TODO: investigate if old relational fields must be reloaded or if # it's sufficient if the new field is (#27737). # Delay rendering of relationships if it's not a relational field and # not referenced by a foreign key. delay = ( not field.is_relation and not field_is_referenced(self, (app_label, model_name), (name, field)) ) self.reload_model(app_label, model_name, delay=delay) def rename_field(self, app_label, model_name, old_name, new_name): model_state = self.models[app_label, model_name] # Rename the field. fields = model_state.fields try: found = fields.pop(old_name) except KeyError: raise FieldDoesNotExist( f"{app_label}.{model_name} has no field named '{old_name}'" ) fields[new_name] = found for field in fields.values(): # Fix from_fields to refer to the new field. from_fields = getattr(field, 'from_fields', None) if from_fields: field.from_fields = tuple([ new_name if from_field_name == old_name else from_field_name for from_field_name in from_fields ]) # Fix index/unique_together to refer to the new field. options = model_state.options for option in ('index_together', 'unique_together'): if option in options: options[option] = [ [new_name if n == old_name else n for n in together] for together in options[option] ] # Fix to_fields to refer to the new field. delay = True references = get_references(self, (app_label, model_name), (old_name, found)) for *_, field, reference in references: delay = False if reference.to: remote_field, to_fields = reference.to if getattr(remote_field, 'field_name', None) == old_name: remote_field.field_name = new_name if to_fields: field.to_fields = tuple([ new_name if to_field_name == old_name else to_field_name for to_field_name in to_fields ]) self.reload_model(app_label, model_name, delay=delay) def _find_reload_model(self, app_label, model_name, delay=False): if delay: self.is_delayed = True related_models = set() try: old_model = self.apps.get_model(app_label, model_name) except LookupError: pass else: # Get all relations to and from the old model before reloading, # as _meta.apps may change if delay: related_models = get_related_models_tuples(old_model) else: related_models = get_related_models_recursive(old_model) # Get all outgoing references from the model to be rendered model_state = self.models[(app_label, model_name)] # Directly related models are the models pointed to by ForeignKeys, # OneToOneFields, and ManyToManyFields. direct_related_models = set() for field in model_state.fields.values(): if field.is_relation: if field.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT: continue rel_app_label, rel_model_name = _get_app_label_and_model_name(field.related_model, app_label) direct_related_models.add((rel_app_label, rel_model_name.lower())) # For all direct related models recursively get all related models. related_models.update(direct_related_models) for rel_app_label, rel_model_name in direct_related_models: try: rel_model = self.apps.get_model(rel_app_label, rel_model_name) except LookupError: pass else: if delay: related_models.update(get_related_models_tuples(rel_model)) else: related_models.update(get_related_models_recursive(rel_model)) # Include the model itself related_models.add((app_label, model_name)) return related_models def reload_model(self, app_label, model_name, delay=False): if 'apps' in self.__dict__: # hasattr would cache the property related_models = self._find_reload_model(app_label, model_name, delay) self._reload(related_models) def reload_models(self, models, delay=True): if 'apps' in self.__dict__: # hasattr would cache the property related_models = set() for app_label, model_name in models: related_models.update(self._find_reload_model(app_label, model_name, delay)) self._reload(related_models) def _reload(self, related_models): # Unregister all related models with self.apps.bulk_update(): for rel_app_label, rel_model_name in related_models: self.apps.unregister_model(rel_app_label, rel_model_name) states_to_be_rendered = [] # Gather all models states of those models that will be rerendered. # This includes: # 1. All related models of unmigrated apps for model_state in self.apps.real_models: if (model_state.app_label, model_state.name_lower) in related_models: states_to_be_rendered.append(model_state) # 2. All related models of migrated apps for rel_app_label, rel_model_name in related_models: try: model_state = self.models[rel_app_label, rel_model_name] except KeyError: pass else: states_to_be_rendered.append(model_state) # Render all models self.apps.render_multiple(states_to_be_rendered) def resolve_fields_and_relations(self): # Resolve fields. for model_state in self.models.values(): for field_name, field in model_state.fields.items(): field.name = field_name # Resolve relations. # {remote_model_key: {model_key: [(field_name, field)]}} self.relations = defaultdict(partial(defaultdict, list)) concretes, proxies = self._get_concrete_models_mapping_and_proxy_models() real_apps = set(self.real_apps) for model_key in concretes: model_state = self.models[model_key] for field_name, field in model_state.fields.items(): remote_field = field.remote_field if not remote_field: continue remote_model_key = resolve_relation(remote_field.model, *model_key) if remote_model_key[0] not in real_apps and remote_model_key in concretes: remote_model_key = concretes[remote_model_key] self.relations[remote_model_key][model_key].append((field_name, field)) through = getattr(remote_field, 'through', None) if not through: continue through_model_key = resolve_relation(through, *model_key) if through_model_key[0] not in real_apps and through_model_key in concretes: through_model_key = concretes[through_model_key] self.relations[through_model_key][model_key].append((field_name, field)) for model_key in proxies: self.relations[model_key] = self.relations[concretes[model_key]] def get_concrete_model_key(self, model): concrete_models_mapping, _ = self._get_concrete_models_mapping_and_proxy_models() model_key = make_model_tuple(model) return concrete_models_mapping[model_key] def _get_concrete_models_mapping_and_proxy_models(self): concrete_models_mapping = {} proxy_models = {} # Split models to proxy and concrete models. for model_key, model_state in self.models.items(): if model_state.options.get('proxy'): proxy_models[model_key] = model_state # Find a concrete model for the proxy. concrete_models_mapping[model_key] = self._find_concrete_model_from_proxy( proxy_models, model_state, ) else: concrete_models_mapping[model_key] = model_key return concrete_models_mapping, proxy_models def _find_concrete_model_from_proxy(self, proxy_models, model_state): for base in model_state.bases: base_key = make_model_tuple(base) base_state = proxy_models.get(base_key) if not base_state: # Concrete model found, stop looking at bases. return base_key return self._find_concrete_model_from_proxy(proxy_models, base_state) def clone(self): """Return an exact copy of this ProjectState.""" new_state = ProjectState( models={k: v.clone() for k, v in self.models.items()}, real_apps=self.real_apps, ) if 'apps' in self.__dict__: new_state.apps = self.apps.clone() new_state.is_delayed = self.is_delayed return new_state def clear_delayed_apps_cache(self): if self.is_delayed and 'apps' in self.__dict__: del self.__dict__['apps'] @cached_property def apps(self): return StateApps(self.real_apps, self.models) @classmethod def from_apps(cls, apps): """Take an Apps and return a ProjectState matching it.""" app_models = {} for model in apps.get_models(include_swapped=True): model_state = ModelState.from_model(model) app_models[(model_state.app_label, model_state.name_lower)] = model_state return cls(app_models) def __eq__(self, other): return self.models == other.models and set(self.real_apps) == set(other.real_apps) class AppConfigStub(AppConfig): """Stub of an AppConfig. Only provides a label and a dict of models.""" def __init__(self, label): self.apps = None self.models = {} # App-label and app-name are not the same thing, so technically passing # in the label here is wrong. In practice, migrations don't care about # the app name, but we need something unique, and the label works fine. self.label = label self.name = label def import_models(self): self.models = self.apps.all_models[self.label] class StateApps(Apps): """ Subclass of the global Apps registry class to better handle dynamic model additions and removals. """ def __init__(self, real_apps, models, ignore_swappable=False): # Any apps in self.real_apps should have all their models included # in the render. We don't use the original model instances as there # are some variables that refer to the Apps object. # FKs/M2Ms from real apps are also not included as they just # mess things up with partial states (due to lack of dependencies) self.real_models = [] for app_label in real_apps: app = global_apps.get_app_config(app_label) for model in app.get_models(): self.real_models.append(ModelState.from_model(model, exclude_rels=True)) # Populate the app registry with a stub for each application. app_labels = {model_state.app_label for model_state in models.values()} app_configs = [AppConfigStub(label) for label in sorted([*real_apps, *app_labels])] super().__init__(app_configs) # These locks get in the way of copying as implemented in clone(), # which is called whenever Django duplicates a StateApps before # updating it. self._lock = None self.ready_event = None self.render_multiple([*models.values(), *self.real_models]) # There shouldn't be any operations pending at this point. from django.core.checks.model_checks import _check_lazy_references ignore = {make_model_tuple(settings.AUTH_USER_MODEL)} if ignore_swappable else set() errors = _check_lazy_references(self, ignore=ignore) if errors: raise ValueError("\n".join(error.msg for error in errors)) @contextmanager def bulk_update(self): # Avoid clearing each model's cache for each change. Instead, clear # all caches when we're finished updating the model instances. ready = self.ready self.ready = False try: yield finally: self.ready = ready self.clear_cache() def render_multiple(self, model_states): # We keep trying to render the models in a loop, ignoring invalid # base errors, until the size of the unrendered models doesn't # decrease by at least one, meaning there's a base dependency loop/ # missing base. if not model_states: return # Prevent that all model caches are expired for each render. with self.bulk_update(): unrendered_models = model_states while unrendered_models: new_unrendered_models = [] for model in unrendered_models: try: model.render(self) except InvalidBasesError: new_unrendered_models.append(model) if len(new_unrendered_models) == len(unrendered_models): raise InvalidBasesError( "Cannot resolve bases for %r\nThis can happen if you are inheriting models from an " "app with migrations (e.g. contrib.auth)\n in an app with no migrations; see " "https://docs.djangoproject.com/en/%s/topics/migrations/#dependencies " "for more" % (new_unrendered_models, get_docs_version()) ) unrendered_models = new_unrendered_models def clone(self): """Return a clone of this registry.""" clone = StateApps([], {}) clone.all_models = copy.deepcopy(self.all_models) clone.app_configs = copy.deepcopy(self.app_configs) # Set the pointer to the correct app registry. for app_config in clone.app_configs.values(): app_config.apps = clone # No need to actually clone them, they'll never change clone.real_models = self.real_models return clone def register_model(self, app_label, model): self.all_models[app_label][model._meta.model_name] = model if app_label not in self.app_configs: self.app_configs[app_label] = AppConfigStub(app_label) self.app_configs[app_label].apps = self self.app_configs[app_label].models[model._meta.model_name] = model self.do_pending_operations(model) self.clear_cache() def unregister_model(self, app_label, model_name): try: del self.all_models[app_label][model_name] del self.app_configs[app_label].models[model_name] except KeyError: pass class ModelState: """ Represent a Django Model. Don't use the actual Model class as it's not designed to have its options changed - instead, mutate this one and then render it into a Model as required. Note that while you are allowed to mutate .fields, you are not allowed to mutate the Field instances inside there themselves - you must instead assign new ones, as these are not detached during a clone. """ def __init__(self, app_label, name, fields, options=None, bases=None, managers=None): self.app_label = app_label self.name = name self.fields = dict(fields) self.options = options or {} self.options.setdefault('indexes', []) self.options.setdefault('constraints', []) self.bases = bases or (models.Model,) self.managers = managers or [] for name, field in self.fields.items(): # Sanity-check that fields are NOT already bound to a model. if hasattr(field, 'model'): raise ValueError( 'ModelState.fields cannot be bound to a model - "%s" is.' % name ) # Sanity-check that relation fields are NOT referring to a model class. if field.is_relation and hasattr(field.related_model, '_meta'): raise ValueError( 'ModelState.fields cannot refer to a model class - "%s.to" does. ' 'Use a string reference instead.' % name ) if field.many_to_many and hasattr(field.remote_field.through, '_meta'): raise ValueError( 'ModelState.fields cannot refer to a model class - "%s.through" does. ' 'Use a string reference instead.' % name ) # Sanity-check that indexes have their name set. for index in self.options['indexes']: if not index.name: raise ValueError( "Indexes passed to ModelState require a name attribute. " "%r doesn't have one." % index ) @cached_property def name_lower(self): return self.name.lower() def get_field(self, field_name): field_name = ( self.options['order_with_respect_to'] if field_name == '_order' else field_name ) return self.fields[field_name] @classmethod def from_model(cls, model, exclude_rels=False): """Given a model, return a ModelState representing it.""" # Deconstruct the fields fields = [] for field in model._meta.local_fields: if getattr(field, "remote_field", None) and exclude_rels: continue if isinstance(field, models.OrderWrt): continue name = field.name try: fields.append((name, field.clone())) except TypeError as e: raise TypeError("Couldn't reconstruct field %s on %s: %s" % ( name, model._meta.label, e, )) if not exclude_rels: for field in model._meta.local_many_to_many: name = field.name try: fields.append((name, field.clone())) except TypeError as e: raise TypeError("Couldn't reconstruct m2m field %s on %s: %s" % ( name, model._meta.object_name, e, )) # Extract the options options = {} for name in DEFAULT_NAMES: # Ignore some special options if name in ["apps", "app_label"]: continue elif name in model._meta.original_attrs: if name == "unique_together": ut = model._meta.original_attrs["unique_together"] options[name] = set(normalize_together(ut)) elif name == "index_together": it = model._meta.original_attrs["index_together"] options[name] = set(normalize_together(it)) elif name == "indexes": indexes = [idx.clone() for idx in model._meta.indexes] for index in indexes: if not index.name: index.set_name_with_model(model) options['indexes'] = indexes elif name == 'constraints': options['constraints'] = [con.clone() for con in model._meta.constraints] else: options[name] = model._meta.original_attrs[name] # If we're ignoring relationships, remove all field-listing model # options (that option basically just means "make a stub model") if exclude_rels: for key in ["unique_together", "index_together", "order_with_respect_to"]: if key in options: del options[key] # Private fields are ignored, so remove options that refer to them. elif options.get('order_with_respect_to') in {field.name for field in model._meta.private_fields}: del options['order_with_respect_to'] def flatten_bases(model): bases = [] for base in model.__bases__: if hasattr(base, "_meta") and base._meta.abstract: bases.extend(flatten_bases(base)) else: bases.append(base) return bases # We can't rely on __mro__ directly because we only want to flatten # abstract models and not the whole tree. However by recursing on # __bases__ we may end up with duplicates and ordering issues, we # therefore discard any duplicates and reorder the bases according # to their index in the MRO. flattened_bases = sorted(set(flatten_bases(model)), key=lambda x: model.__mro__.index(x)) # Make our record bases = tuple( ( base._meta.label_lower if hasattr(base, "_meta") else base ) for base in flattened_bases ) # Ensure at least one base inherits from models.Model if not any((isinstance(base, str) or issubclass(base, models.Model)) for base in bases): bases = (models.Model,) managers = [] manager_names = set() default_manager_shim = None for manager in model._meta.managers: if manager.name in manager_names: # Skip overridden managers. continue elif manager.use_in_migrations: # Copy managers usable in migrations. new_manager = copy.copy(manager) new_manager._set_creation_counter() elif manager is model._base_manager or manager is model._default_manager: # Shim custom managers used as default and base managers. new_manager = models.Manager() new_manager.model = manager.model new_manager.name = manager.name if manager is model._default_manager: default_manager_shim = new_manager else: continue manager_names.add(manager.name) managers.append((manager.name, new_manager)) # Ignore a shimmed default manager called objects if it's the only one. if managers == [('objects', default_manager_shim)]: managers = [] # Construct the new ModelState return cls( model._meta.app_label, model._meta.object_name, fields, options, bases, managers, ) def construct_managers(self): """Deep-clone the managers using deconstruction.""" # Sort all managers by their creation counter sorted_managers = sorted(self.managers, key=lambda v: v[1].creation_counter) for mgr_name, manager in sorted_managers: as_manager, manager_path, qs_path, args, kwargs = manager.deconstruct() if as_manager: qs_class = import_string(qs_path) yield mgr_name, qs_class.as_manager() else: manager_class = import_string(manager_path) yield mgr_name, manager_class(*args, **kwargs) def clone(self): """Return an exact copy of this ModelState.""" return self.__class__( app_label=self.app_label, name=self.name, fields=dict(self.fields), # Since options are shallow-copied here, operations such as # AddIndex must replace their option (e.g 'indexes') rather # than mutating it. options=dict(self.options), bases=self.bases, managers=list(self.managers), ) def render(self, apps): """Create a Model object from our current state into the given apps.""" # First, make a Meta object meta_contents = {'app_label': self.app_label, 'apps': apps, **self.options} meta = type("Meta", (), meta_contents) # Then, work out our bases try: bases = tuple( (apps.get_model(base) if isinstance(base, str) else base) for base in self.bases ) except LookupError: raise InvalidBasesError("Cannot resolve one or more bases from %r" % (self.bases,)) # Clone fields for the body, add other bits. body = {name: field.clone() for name, field in self.fields.items()} body['Meta'] = meta body['__module__'] = "__fake__" # Restore managers body.update(self.construct_managers()) # Then, make a Model object (apps.register_model is called in __new__) return type(self.name, bases, body) def get_index_by_name(self, name): for index in self.options['indexes']: if index.name == name: return index raise ValueError("No index named %s on model %s" % (name, self.name)) def get_constraint_by_name(self, name): for constraint in self.options['constraints']: if constraint.name == name: return constraint raise ValueError('No constraint named %s on model %s' % (name, self.name)) def __repr__(self): return "<%s: '%s.%s'>" % (self.__class__.__name__, self.app_label, self.name) def __eq__(self, other): return ( (self.app_label == other.app_label) and (self.name == other.name) and (len(self.fields) == len(other.fields)) and all( k1 == k2 and f1.deconstruct()[1:] == f2.deconstruct()[1:] for (k1, f1), (k2, f2) in zip( sorted(self.fields.items()), sorted(other.fields.items()), ) ) and (self.options == other.options) and (self.bases == other.bases) and (self.managers == other.managers) )
29f7bbf18a831fa5d32d56ddca34ceacd4fe4aa509e69305fefef5d55ad30bb0
import pkgutil import sys from importlib import import_module, reload from django.apps import apps from django.conf import settings from django.db.migrations.graph import MigrationGraph from django.db.migrations.recorder import MigrationRecorder from .exceptions import ( AmbiguityError, BadMigrationError, InconsistentMigrationHistory, NodeNotFoundError, ) MIGRATIONS_MODULE_NAME = 'migrations' class MigrationLoader: """ Load migration files from disk and their status from the database. Migration files are expected to live in the "migrations" directory of an app. Their names are entirely unimportant from a code perspective, but will probably follow the 1234_name.py convention. On initialization, this class will scan those directories, and open and read the Python files, looking for a class called Migration, which should inherit from django.db.migrations.Migration. See django.db.migrations.migration for what that looks like. Some migrations will be marked as "replacing" another set of migrations. These are loaded into a separate set of migrations away from the main ones. If all the migrations they replace are either unapplied or missing from disk, then they are injected into the main set, replacing the named migrations. Any dependency pointers to the replaced migrations are re-pointed to the new migration. This does mean that this class MUST also talk to the database as well as to disk, but this is probably fine. We're already not just operating in memory. """ def __init__( self, connection, load=True, ignore_no_migrations=False, replace_migrations=True, ): self.connection = connection self.disk_migrations = None self.applied_migrations = None self.ignore_no_migrations = ignore_no_migrations self.replace_migrations = replace_migrations if load: self.build_graph() @classmethod def migrations_module(cls, app_label): """ Return the path to the migrations module for the specified app_label and a boolean indicating if the module is specified in settings.MIGRATION_MODULE. """ if app_label in settings.MIGRATION_MODULES: return settings.MIGRATION_MODULES[app_label], True else: app_package_name = apps.get_app_config(app_label).name return '%s.%s' % (app_package_name, MIGRATIONS_MODULE_NAME), False def load_disk(self): """Load the migrations from all INSTALLED_APPS from disk.""" self.disk_migrations = {} self.unmigrated_apps = set() self.migrated_apps = set() for app_config in apps.get_app_configs(): # Get the migrations module directory module_name, explicit = self.migrations_module(app_config.label) if module_name is None: self.unmigrated_apps.add(app_config.label) continue was_loaded = module_name in sys.modules try: module = import_module(module_name) except ModuleNotFoundError as e: if ( (explicit and self.ignore_no_migrations) or (not explicit and MIGRATIONS_MODULE_NAME in e.name.split('.')) ): self.unmigrated_apps.add(app_config.label) continue raise else: # Module is not a package (e.g. migrations.py). if not hasattr(module, '__path__'): self.unmigrated_apps.add(app_config.label) continue # Empty directories are namespaces. Namespace packages have no # __file__ and don't use a list for __path__. See # https://docs.python.org/3/reference/import.html#namespace-packages if ( getattr(module, '__file__', None) is None and not isinstance(module.__path__, list) ): self.unmigrated_apps.add(app_config.label) continue # Force a reload if it's already loaded (tests need this) if was_loaded: reload(module) self.migrated_apps.add(app_config.label) migration_names = { name for _, name, is_pkg in pkgutil.iter_modules(module.__path__) if not is_pkg and name[0] not in '_~' } # Load migrations for migration_name in migration_names: migration_path = '%s.%s' % (module_name, migration_name) try: migration_module = import_module(migration_path) except ImportError as e: if 'bad magic number' in str(e): raise ImportError( "Couldn't import %r as it appears to be a stale " ".pyc file." % migration_path ) from e else: raise if not hasattr(migration_module, "Migration"): raise BadMigrationError( "Migration %s in app %s has no Migration class" % (migration_name, app_config.label) ) self.disk_migrations[app_config.label, migration_name] = migration_module.Migration( migration_name, app_config.label, ) def get_migration(self, app_label, name_prefix): """Return the named migration or raise NodeNotFoundError.""" return self.graph.nodes[app_label, name_prefix] def get_migration_by_prefix(self, app_label, name_prefix): """ Return the migration(s) which match the given app label and name_prefix. """ # Do the search results = [] for migration_app_label, migration_name in self.disk_migrations: if migration_app_label == app_label and migration_name.startswith(name_prefix): results.append((migration_app_label, migration_name)) if len(results) > 1: raise AmbiguityError( "There is more than one migration for '%s' with the prefix '%s'" % (app_label, name_prefix) ) elif not results: raise KeyError( f"There is no migration for '{app_label}' with the prefix " f"'{name_prefix}'" ) else: return self.disk_migrations[results[0]] def check_key(self, key, current_app): if (key[1] != "__first__" and key[1] != "__latest__") or key in self.graph: return key # Special-case __first__, which means "the first migration" for # migrated apps, and is ignored for unmigrated apps. It allows # makemigrations to declare dependencies on apps before they even have # migrations. if key[0] == current_app: # Ignore __first__ references to the same app (#22325) return if key[0] in self.unmigrated_apps: # This app isn't migrated, but something depends on it. # The models will get auto-added into the state, though # so we're fine. return if key[0] in self.migrated_apps: try: if key[1] == "__first__": return self.graph.root_nodes(key[0])[0] else: # "__latest__" return self.graph.leaf_nodes(key[0])[0] except IndexError: if self.ignore_no_migrations: return None else: raise ValueError("Dependency on app with no migrations: %s" % key[0]) raise ValueError("Dependency on unknown app: %s" % key[0]) def add_internal_dependencies(self, key, migration): """ Internal dependencies need to be added first to ensure `__first__` dependencies find the correct root node. """ for parent in migration.dependencies: # Ignore __first__ references to the same app. if parent[0] == key[0] and parent[1] != '__first__': self.graph.add_dependency(migration, key, parent, skip_validation=True) def add_external_dependencies(self, key, migration): for parent in migration.dependencies: # Skip internal dependencies if key[0] == parent[0]: continue parent = self.check_key(parent, key[0]) if parent is not None: self.graph.add_dependency(migration, key, parent, skip_validation=True) for child in migration.run_before: child = self.check_key(child, key[0]) if child is not None: self.graph.add_dependency(migration, child, key, skip_validation=True) def build_graph(self): """ Build a migration dependency graph using both the disk and database. You'll need to rebuild the graph if you apply migrations. This isn't usually a problem as generally migration stuff runs in a one-shot process. """ # Load disk data self.load_disk() # Load database data if self.connection is None: self.applied_migrations = {} else: recorder = MigrationRecorder(self.connection) self.applied_migrations = recorder.applied_migrations() # To start, populate the migration graph with nodes for ALL migrations # and their dependencies. Also make note of replacing migrations at this step. self.graph = MigrationGraph() self.replacements = {} for key, migration in self.disk_migrations.items(): self.graph.add_node(key, migration) # Replacing migrations. if migration.replaces: self.replacements[key] = migration for key, migration in self.disk_migrations.items(): # Internal (same app) dependencies. self.add_internal_dependencies(key, migration) # Add external dependencies now that the internal ones have been resolved. for key, migration in self.disk_migrations.items(): self.add_external_dependencies(key, migration) # Carry out replacements where possible and if enabled. if self.replace_migrations: for key, migration in self.replacements.items(): # Get applied status of each of this migration's replacement # targets. applied_statuses = [(target in self.applied_migrations) for target in migration.replaces] # The replacing migration is only marked as applied if all of # its replacement targets are. if all(applied_statuses): self.applied_migrations[key] = migration else: self.applied_migrations.pop(key, None) # A replacing migration can be used if either all or none of # its replacement targets have been applied. if all(applied_statuses) or (not any(applied_statuses)): self.graph.remove_replaced_nodes(key, migration.replaces) else: # This replacing migration cannot be used because it is # partially applied. Remove it from the graph and remap # dependencies to it (#25945). self.graph.remove_replacement_node(key, migration.replaces) # Ensure the graph is consistent. try: self.graph.validate_consistency() except NodeNotFoundError as exc: # Check if the missing node could have been replaced by any squash # migration but wasn't because the squash migration was partially # applied before. In that case raise a more understandable exception # (#23556). # Get reverse replacements. reverse_replacements = {} for key, migration in self.replacements.items(): for replaced in migration.replaces: reverse_replacements.setdefault(replaced, set()).add(key) # Try to reraise exception with more detail. if exc.node in reverse_replacements: candidates = reverse_replacements.get(exc.node, set()) is_replaced = any(candidate in self.graph.nodes for candidate in candidates) if not is_replaced: tries = ', '.join('%s.%s' % c for c in candidates) raise NodeNotFoundError( "Migration {0} depends on nonexistent node ('{1}', '{2}'). " "Django tried to replace migration {1}.{2} with any of [{3}] " "but wasn't able to because some of the replaced migrations " "are already applied.".format( exc.origin, exc.node[0], exc.node[1], tries ), exc.node ) from exc raise self.graph.ensure_not_cyclic() def check_consistent_history(self, connection): """ Raise InconsistentMigrationHistory if any applied migrations have unapplied dependencies. """ recorder = MigrationRecorder(connection) applied = recorder.applied_migrations() for migration in applied: # If the migration is unknown, skip it. if migration not in self.graph.nodes: continue for parent in self.graph.node_map[migration].parents: if parent not in applied: # Skip unapplied squashed migrations that have all of their # `replaces` applied. if parent in self.replacements: if all(m in applied for m in self.replacements[parent].replaces): continue raise InconsistentMigrationHistory( "Migration {}.{} is applied before its dependency " "{}.{} on database '{}'.".format( migration[0], migration[1], parent[0], parent[1], connection.alias, ) ) def detect_conflicts(self): """ Look through the loaded graph and detect any conflicts - apps with more than one leaf migration. Return a dict of the app labels that conflict with the migration names that conflict. """ seen_apps = {} conflicting_apps = set() for app_label, migration_name in self.graph.leaf_nodes(): if app_label in seen_apps: conflicting_apps.add(app_label) seen_apps.setdefault(app_label, set()).add(migration_name) return {app_label: sorted(seen_apps[app_label]) for app_label in conflicting_apps} def project_state(self, nodes=None, at_end=True): """ Return a ProjectState object representing the most recent state that the loaded migrations represent. See graph.make_state() for the meaning of "nodes" and "at_end". """ return self.graph.make_state(nodes=nodes, at_end=at_end, real_apps=list(self.unmigrated_apps)) def collect_sql(self, plan): """ Take a migration plan and return a list of collected SQL statements that represent the best-efforts version of that plan. """ statements = [] state = None for migration, backwards in plan: with self.connection.schema_editor(collect_sql=True, atomic=migration.atomic) as schema_editor: if state is None: state = self.project_state((migration.app_label, migration.name), at_end=False) if not backwards: state = migration.apply(state, schema_editor, collect_sql=True) else: state = migration.unapply(state, schema_editor, collect_sql=True) statements.extend(schema_editor.collected_sql) return statements
6eddebec4e9d4d4f53fe81edb20f46f601e52cbfd1b22bebf6677f03a66a79cd
from functools import total_ordering from django.db.migrations.state import ProjectState from .exceptions import CircularDependencyError, NodeNotFoundError @total_ordering class Node: """ A single node in the migration graph. Contains direct links to adjacent nodes in either direction. """ def __init__(self, key): self.key = key self.children = set() self.parents = set() def __eq__(self, other): return self.key == other def __lt__(self, other): return self.key < other def __hash__(self): return hash(self.key) def __getitem__(self, item): return self.key[item] def __str__(self): return str(self.key) def __repr__(self): return '<%s: (%r, %r)>' % (self.__class__.__name__, self.key[0], self.key[1]) def add_child(self, child): self.children.add(child) def add_parent(self, parent): self.parents.add(parent) class DummyNode(Node): """ A node that doesn't correspond to a migration file on disk. (A squashed migration that was removed, for example.) After the migration graph is processed, all dummy nodes should be removed. If there are any left, a nonexistent dependency error is raised. """ def __init__(self, key, origin, error_message): super().__init__(key) self.origin = origin self.error_message = error_message def raise_error(self): raise NodeNotFoundError(self.error_message, self.key, origin=self.origin) class MigrationGraph: """ Represent the digraph of all migrations in a project. Each migration is a node, and each dependency is an edge. There are no implicit dependencies between numbered migrations - the numbering is merely a convention to aid file listing. Every new numbered migration has a declared dependency to the previous number, meaning that VCS branch merges can be detected and resolved. Migrations files can be marked as replacing another set of migrations - this is to support the "squash" feature. The graph handler isn't responsible for these; instead, the code to load them in here should examine the migration files and if the replaced migrations are all either unapplied or not present, it should ignore the replaced ones, load in just the replacing migration, and repoint any dependencies that pointed to the replaced migrations to point to the replacing one. A node should be a tuple: (app_path, migration_name). The tree special-cases things within an app - namely, root nodes and leaf nodes ignore dependencies to other apps. """ def __init__(self): self.node_map = {} self.nodes = {} def add_node(self, key, migration): assert key not in self.node_map node = Node(key) self.node_map[key] = node self.nodes[key] = migration def add_dummy_node(self, key, origin, error_message): node = DummyNode(key, origin, error_message) self.node_map[key] = node self.nodes[key] = None def add_dependency(self, migration, child, parent, skip_validation=False): """ This may create dummy nodes if they don't yet exist. If `skip_validation=True`, validate_consistency() should be called afterward. """ if child not in self.nodes: error_message = ( "Migration %s dependencies reference nonexistent" " child node %r" % (migration, child) ) self.add_dummy_node(child, migration, error_message) if parent not in self.nodes: error_message = ( "Migration %s dependencies reference nonexistent" " parent node %r" % (migration, parent) ) self.add_dummy_node(parent, migration, error_message) self.node_map[child].add_parent(self.node_map[parent]) self.node_map[parent].add_child(self.node_map[child]) if not skip_validation: self.validate_consistency() def remove_replaced_nodes(self, replacement, replaced): """ Remove each of the `replaced` nodes (when they exist). Any dependencies that were referencing them are changed to reference the `replacement` node instead. """ # Cast list of replaced keys to set to speed up lookup later. replaced = set(replaced) try: replacement_node = self.node_map[replacement] except KeyError as err: raise NodeNotFoundError( "Unable to find replacement node %r. It was either never added" " to the migration graph, or has been removed." % (replacement,), replacement ) from err for replaced_key in replaced: self.nodes.pop(replaced_key, None) replaced_node = self.node_map.pop(replaced_key, None) if replaced_node: for child in replaced_node.children: child.parents.remove(replaced_node) # We don't want to create dependencies between the replaced # node and the replacement node as this would lead to # self-referencing on the replacement node at a later iteration. if child.key not in replaced: replacement_node.add_child(child) child.add_parent(replacement_node) for parent in replaced_node.parents: parent.children.remove(replaced_node) # Again, to avoid self-referencing. if parent.key not in replaced: replacement_node.add_parent(parent) parent.add_child(replacement_node) def remove_replacement_node(self, replacement, replaced): """ The inverse operation to `remove_replaced_nodes`. Almost. Remove the replacement node `replacement` and remap its child nodes to `replaced` - the list of nodes it would have replaced. Don't remap its parent nodes as they are expected to be correct already. """ self.nodes.pop(replacement, None) try: replacement_node = self.node_map.pop(replacement) except KeyError as err: raise NodeNotFoundError( "Unable to remove replacement node %r. It was either never added" " to the migration graph, or has been removed already." % (replacement,), replacement ) from err replaced_nodes = set() replaced_nodes_parents = set() for key in replaced: replaced_node = self.node_map.get(key) if replaced_node: replaced_nodes.add(replaced_node) replaced_nodes_parents |= replaced_node.parents # We're only interested in the latest replaced node, so filter out # replaced nodes that are parents of other replaced nodes. replaced_nodes -= replaced_nodes_parents for child in replacement_node.children: child.parents.remove(replacement_node) for replaced_node in replaced_nodes: replaced_node.add_child(child) child.add_parent(replaced_node) for parent in replacement_node.parents: parent.children.remove(replacement_node) # NOTE: There is no need to remap parent dependencies as we can # assume the replaced nodes already have the correct ancestry. def validate_consistency(self): """Ensure there are no dummy nodes remaining in the graph.""" [n.raise_error() for n in self.node_map.values() if isinstance(n, DummyNode)] def forwards_plan(self, target): """ Given a node, return a list of which previous nodes (dependencies) must be applied, ending with the node itself. This is the list you would follow if applying the migrations to a database. """ if target not in self.nodes: raise NodeNotFoundError("Node %r not a valid node" % (target,), target) return self.iterative_dfs(self.node_map[target]) def backwards_plan(self, target): """ Given a node, return a list of which dependent nodes (dependencies) must be unapplied, ending with the node itself. This is the list you would follow if removing the migrations from a database. """ if target not in self.nodes: raise NodeNotFoundError("Node %r not a valid node" % (target,), target) return self.iterative_dfs(self.node_map[target], forwards=False) def iterative_dfs(self, start, forwards=True): """Iterative depth-first search for finding dependencies.""" visited = [] visited_set = set() stack = [(start, False)] while stack: node, processed = stack.pop() if node in visited_set: pass elif processed: visited_set.add(node) visited.append(node.key) else: stack.append((node, True)) stack += [(n, False) for n in sorted(node.parents if forwards else node.children)] return visited def root_nodes(self, app=None): """ Return all root nodes - that is, nodes with no dependencies inside their app. These are the starting point for an app. """ roots = set() for node in self.nodes: if all(key[0] != node[0] for key in self.node_map[node].parents) and (not app or app == node[0]): roots.add(node) return sorted(roots) def leaf_nodes(self, app=None): """ Return all leaf nodes - that is, nodes with no dependents in their app. These are the "most current" version of an app's schema. Having more than one per app is technically an error, but one that gets handled further up, in the interactive command - it's usually the result of a VCS merge and needs some user input. """ leaves = set() for node in self.nodes: if all(key[0] != node[0] for key in self.node_map[node].children) and (not app or app == node[0]): leaves.add(node) return sorted(leaves) def ensure_not_cyclic(self): # Algo from GvR: # https://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed-graph.html todo = set(self.nodes) while todo: node = todo.pop() stack = [node] while stack: top = stack[-1] for child in self.node_map[top].children: # Use child.key instead of child to speed up the frequent # hashing. node = child.key if node in stack: cycle = stack[stack.index(node):] raise CircularDependencyError(", ".join("%s.%s" % n for n in cycle)) if node in todo: stack.append(node) todo.remove(node) break else: node = stack.pop() def __str__(self): return 'Graph: %s nodes, %s edges' % self._nodes_and_edges() def __repr__(self): nodes, edges = self._nodes_and_edges() return '<%s: nodes=%s, edges=%s>' % (self.__class__.__name__, nodes, edges) def _nodes_and_edges(self): return len(self.nodes), sum(len(node.parents) for node in self.node_map.values()) def _generate_plan(self, nodes, at_end): plan = [] for node in nodes: for migration in self.forwards_plan(node): if migration not in plan and (at_end or migration not in nodes): plan.append(migration) return plan def make_state(self, nodes=None, at_end=True, real_apps=None): """ Given a migration node or nodes, return a complete ProjectState for it. If at_end is False, return the state before the migration has run. If nodes is not provided, return the overall most current project state. """ if nodes is None: nodes = list(self.leaf_nodes()) if not nodes: return ProjectState() if not isinstance(nodes[0], tuple): nodes = [nodes] plan = self._generate_plan(nodes, at_end) project_state = ProjectState(real_apps=real_apps) for node in plan: project_state = self.nodes[node].mutate_state(project_state, preserve=False) return project_state def __contains__(self, node): return node in self.nodes
44653e6bcbec1e3a18cc926b628ca63ce72c3705fdc319a0cf00e262f533ecb8
import functools import re from itertools import chain from django.conf import settings from django.db import models from django.db.migrations import operations from django.db.migrations.migration import Migration from django.db.migrations.operations.models import AlterModelOptions from django.db.migrations.optimizer import MigrationOptimizer from django.db.migrations.questioner import MigrationQuestioner from django.db.migrations.utils import ( COMPILED_REGEX_TYPE, RegexObject, resolve_relation, ) from django.utils.topological_sort import stable_topological_sort class MigrationAutodetector: """ Take a pair of ProjectStates and compare them to see what the first would need doing to make it match the second (the second usually being the project's current state). Note that this naturally operates on entire projects at a time, as it's likely that changes interact (for example, you can't add a ForeignKey without having a migration to add the table it depends on first). A user interface may offer single-app usage if it wishes, with the caveat that it may not always be possible. """ def __init__(self, from_state, to_state, questioner=None): self.from_state = from_state self.to_state = to_state self.questioner = questioner or MigrationQuestioner() self.existing_apps = {app for app, model in from_state.models} def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None): """ Main entry point to produce a list of applicable changes. Take a graph to base names on and an optional set of apps to try and restrict to (restriction is not guaranteed) """ changes = self._detect_changes(convert_apps, graph) changes = self.arrange_for_graph(changes, graph, migration_name) if trim_to_apps: changes = self._trim_to_apps(changes, trim_to_apps) return changes def deep_deconstruct(self, obj): """ Recursive deconstruction for a field and its arguments. Used for full comparison for rename/alter; sometimes a single-level deconstruction will not compare correctly. """ if isinstance(obj, list): return [self.deep_deconstruct(value) for value in obj] elif isinstance(obj, tuple): return tuple(self.deep_deconstruct(value) for value in obj) elif isinstance(obj, dict): return { key: self.deep_deconstruct(value) for key, value in obj.items() } elif isinstance(obj, functools.partial): return (obj.func, self.deep_deconstruct(obj.args), self.deep_deconstruct(obj.keywords)) elif isinstance(obj, COMPILED_REGEX_TYPE): return RegexObject(obj) elif isinstance(obj, type): # If this is a type that implements 'deconstruct' as an instance method, # avoid treating this as being deconstructible itself - see #22951 return obj elif hasattr(obj, 'deconstruct'): deconstructed = obj.deconstruct() if isinstance(obj, models.Field): # we have a field which also returns a name deconstructed = deconstructed[1:] path, args, kwargs = deconstructed return ( path, [self.deep_deconstruct(value) for value in args], { key: self.deep_deconstruct(value) for key, value in kwargs.items() }, ) else: return obj def only_relation_agnostic_fields(self, fields): """ Return a definition of the fields that ignores field names and what related fields actually relate to. Used for detecting renames (as the related fields change during renames). """ fields_def = [] for name, field in sorted(fields.items()): deconstruction = self.deep_deconstruct(field) if field.remote_field and field.remote_field.model: del deconstruction[2]['to'] fields_def.append(deconstruction) return fields_def def _detect_changes(self, convert_apps=None, graph=None): """ Return a dict of migration plans which will achieve the change from from_state to to_state. The dict has app labels as keys and a list of migrations as values. The resulting migrations aren't specially named, but the names do matter for dependencies inside the set. convert_apps is the list of apps to convert to use migrations (i.e. to make initial migrations for, in the usual case) graph is an optional argument that, if provided, can help improve dependency generation and avoid potential circular dependencies. """ # The first phase is generating all the operations for each app # and gathering them into a big per-app list. # Then go through that list, order it, and split into migrations to # resolve dependencies caused by M2Ms and FKs. self.generated_operations = {} self.altered_indexes = {} self.altered_constraints = {} # Prepare some old/new state and model lists, separating # proxy models and ignoring unmigrated apps. self.old_model_keys = set() self.old_proxy_keys = set() self.old_unmanaged_keys = set() self.new_model_keys = set() self.new_proxy_keys = set() self.new_unmanaged_keys = set() for (app_label, model_name), model_state in self.from_state.models.items(): if not model_state.options.get('managed', True): self.old_unmanaged_keys.add((app_label, model_name)) elif app_label not in self.from_state.real_apps: if model_state.options.get('proxy'): self.old_proxy_keys.add((app_label, model_name)) else: self.old_model_keys.add((app_label, model_name)) for (app_label, model_name), model_state in self.to_state.models.items(): if not model_state.options.get('managed', True): self.new_unmanaged_keys.add((app_label, model_name)) elif ( app_label not in self.from_state.real_apps or (convert_apps and app_label in convert_apps) ): if model_state.options.get('proxy'): self.new_proxy_keys.add((app_label, model_name)) else: self.new_model_keys.add((app_label, model_name)) self.from_state.resolve_fields_and_relations() self.to_state.resolve_fields_and_relations() # Renames have to come first self.generate_renamed_models() # Prepare lists of fields and generate through model map self._prepare_field_lists() self._generate_through_model_map() # Generate non-rename model operations self.generate_deleted_models() self.generate_created_models() self.generate_deleted_proxies() self.generate_created_proxies() self.generate_altered_options() self.generate_altered_managers() # Create the altered indexes and store them in self.altered_indexes. # This avoids the same computation in generate_removed_indexes() # and generate_added_indexes(). self.create_altered_indexes() self.create_altered_constraints() # Generate index removal operations before field is removed self.generate_removed_constraints() self.generate_removed_indexes() # Generate field operations self.generate_renamed_fields() self.generate_removed_fields() self.generate_added_fields() self.generate_altered_fields() self.generate_altered_order_with_respect_to() self.generate_altered_unique_together() self.generate_altered_index_together() self.generate_added_indexes() self.generate_added_constraints() self.generate_altered_db_table() self._sort_migrations() self._build_migration_list(graph) self._optimize_migrations() return self.migrations def _prepare_field_lists(self): """ Prepare field lists and a list of the fields that used through models in the old state so dependencies can be made from the through model deletion to the field that uses it. """ self.kept_model_keys = self.old_model_keys & self.new_model_keys self.kept_proxy_keys = self.old_proxy_keys & self.new_proxy_keys self.kept_unmanaged_keys = self.old_unmanaged_keys & self.new_unmanaged_keys self.through_users = {} self.old_field_keys = { (app_label, model_name, field_name) for app_label, model_name in self.kept_model_keys for field_name in self.from_state.models[ app_label, self.renamed_models.get((app_label, model_name), model_name) ].fields } self.new_field_keys = { (app_label, model_name, field_name) for app_label, model_name in self.kept_model_keys for field_name in self.to_state.models[app_label, model_name].fields } def _generate_through_model_map(self): """Through model map generation.""" for app_label, model_name in sorted(self.old_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] for field_name, field in old_model_state.fields.items(): if hasattr(field, 'remote_field') and getattr(field.remote_field, 'through', None): through_key = resolve_relation(field.remote_field.through, app_label, model_name) self.through_users[through_key] = (app_label, old_model_name, field_name) @staticmethod def _resolve_dependency(dependency): """ Return the resolved dependency and a boolean denoting whether or not it was swappable. """ if dependency[0] != '__setting__': return dependency, False resolved_app_label, resolved_object_name = getattr(settings, dependency[1]).split('.') return (resolved_app_label, resolved_object_name.lower()) + dependency[2:], True def _build_migration_list(self, graph=None): """ Chop the lists of operations up into migrations with dependencies on each other. Do this by going through an app's list of operations until one is found that has an outgoing dependency that isn't in another app's migration yet (hasn't been chopped off its list). Then chop off the operations before it into a migration and move onto the next app. If the loops completes without doing anything, there's a circular dependency (which _should_ be impossible as the operations are all split at this point so they can't depend and be depended on). """ self.migrations = {} num_ops = sum(len(x) for x in self.generated_operations.values()) chop_mode = False while num_ops: # On every iteration, we step through all the apps and see if there # is a completed set of operations. # If we find that a subset of the operations are complete we can # try to chop it off from the rest and continue, but we only # do this if we've already been through the list once before # without any chopping and nothing has changed. for app_label in sorted(self.generated_operations): chopped = [] dependencies = set() for operation in list(self.generated_operations[app_label]): deps_satisfied = True operation_dependencies = set() for dep in operation._auto_deps: # Temporarily resolve the swappable dependency to # prevent circular references. While keeping the # dependency checks on the resolved model, add the # swappable dependencies. original_dep = dep dep, is_swappable_dep = self._resolve_dependency(dep) if dep[0] != app_label: # External app dependency. See if it's not yet # satisfied. for other_operation in self.generated_operations.get(dep[0], []): if self.check_dependency(other_operation, dep): deps_satisfied = False break if not deps_satisfied: break else: if is_swappable_dep: operation_dependencies.add((original_dep[0], original_dep[1])) elif dep[0] in self.migrations: operation_dependencies.add((dep[0], self.migrations[dep[0]][-1].name)) else: # If we can't find the other app, we add a first/last dependency, # but only if we've already been through once and checked everything if chop_mode: # If the app already exists, we add a dependency on the last migration, # as we don't know which migration contains the target field. # If it's not yet migrated or has no migrations, we use __first__ if graph and graph.leaf_nodes(dep[0]): operation_dependencies.add(graph.leaf_nodes(dep[0])[0]) else: operation_dependencies.add((dep[0], "__first__")) else: deps_satisfied = False if deps_satisfied: chopped.append(operation) dependencies.update(operation_dependencies) del self.generated_operations[app_label][0] else: break # Make a migration! Well, only if there's stuff to put in it if dependencies or chopped: if not self.generated_operations[app_label] or chop_mode: subclass = type("Migration", (Migration,), {"operations": [], "dependencies": []}) instance = subclass("auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label) instance.dependencies = list(dependencies) instance.operations = chopped instance.initial = app_label not in self.existing_apps self.migrations.setdefault(app_label, []).append(instance) chop_mode = False else: self.generated_operations[app_label] = chopped + self.generated_operations[app_label] new_num_ops = sum(len(x) for x in self.generated_operations.values()) if new_num_ops == num_ops: if not chop_mode: chop_mode = True else: raise ValueError("Cannot resolve operation dependencies: %r" % self.generated_operations) num_ops = new_num_ops def _sort_migrations(self): """ Reorder to make things possible. Reordering may be needed so FKs work nicely inside the same app. """ for app_label, ops in sorted(self.generated_operations.items()): # construct a dependency graph for intra-app dependencies dependency_graph = {op: set() for op in ops} for op in ops: for dep in op._auto_deps: # Resolve intra-app dependencies to handle circular # references involving a swappable model. dep = self._resolve_dependency(dep)[0] if dep[0] == app_label: for op2 in ops: if self.check_dependency(op2, dep): dependency_graph[op].add(op2) # we use a stable sort for deterministic tests & general behavior self.generated_operations[app_label] = stable_topological_sort(ops, dependency_graph) def _optimize_migrations(self): # Add in internal dependencies among the migrations for app_label, migrations in self.migrations.items(): for m1, m2 in zip(migrations, migrations[1:]): m2.dependencies.append((app_label, m1.name)) # De-dupe dependencies for migrations in self.migrations.values(): for migration in migrations: migration.dependencies = list(set(migration.dependencies)) # Optimize migrations for app_label, migrations in self.migrations.items(): for migration in migrations: migration.operations = MigrationOptimizer().optimize(migration.operations, app_label) def check_dependency(self, operation, dependency): """ Return True if the given operation depends on the given dependency, False otherwise. """ # Created model if dependency[2] is None and dependency[3] is True: return ( isinstance(operation, operations.CreateModel) and operation.name_lower == dependency[1].lower() ) # Created field elif dependency[2] is not None and dependency[3] is True: return ( ( isinstance(operation, operations.CreateModel) and operation.name_lower == dependency[1].lower() and any(dependency[2] == x for x, y in operation.fields) ) or ( isinstance(operation, operations.AddField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) ) # Removed field elif dependency[2] is not None and dependency[3] is False: return ( isinstance(operation, operations.RemoveField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) # Removed model elif dependency[2] is None and dependency[3] is False: return ( isinstance(operation, operations.DeleteModel) and operation.name_lower == dependency[1].lower() ) # Field being altered elif dependency[2] is not None and dependency[3] == "alter": return ( isinstance(operation, operations.AlterField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) # order_with_respect_to being unset for a field elif dependency[2] is not None and dependency[3] == "order_wrt_unset": return ( isinstance(operation, operations.AlterOrderWithRespectTo) and operation.name_lower == dependency[1].lower() and (operation.order_with_respect_to or "").lower() != dependency[2].lower() ) # Field is removed and part of an index/unique_together elif dependency[2] is not None and dependency[3] == "foo_together_change": return ( isinstance(operation, (operations.AlterUniqueTogether, operations.AlterIndexTogether)) and operation.name_lower == dependency[1].lower() ) # Unknown dependency. Raise an error. else: raise ValueError("Can't handle dependency %r" % (dependency,)) def add_operation(self, app_label, operation, dependencies=None, beginning=False): # Dependencies are (app_label, model_name, field_name, create/delete as True/False) operation._auto_deps = dependencies or [] if beginning: self.generated_operations.setdefault(app_label, []).insert(0, operation) else: self.generated_operations.setdefault(app_label, []).append(operation) def swappable_first_key(self, item): """ Place potential swappable models first in lists of created models (only real way to solve #22783). """ try: model_state = self.to_state.models[item] base_names = { base if isinstance(base, str) else base.__name__ for base in model_state.bases } string_version = "%s.%s" % (item[0], item[1]) if ( model_state.options.get('swappable') or "AbstractUser" in base_names or "AbstractBaseUser" in base_names or settings.AUTH_USER_MODEL.lower() == string_version.lower() ): return ("___" + item[0], "___" + item[1]) except LookupError: pass return item def generate_renamed_models(self): """ Find any renamed models, generate the operations for them, and remove the old entry from the model lists. Must be run before other model-level generation. """ self.renamed_models = {} self.renamed_models_rel = {} added_models = self.new_model_keys - self.old_model_keys for app_label, model_name in sorted(added_models): model_state = self.to_state.models[app_label, model_name] model_fields_def = self.only_relation_agnostic_fields(model_state.fields) removed_models = self.old_model_keys - self.new_model_keys for rem_app_label, rem_model_name in removed_models: if rem_app_label == app_label: rem_model_state = self.from_state.models[rem_app_label, rem_model_name] rem_model_fields_def = self.only_relation_agnostic_fields(rem_model_state.fields) if model_fields_def == rem_model_fields_def: if self.questioner.ask_rename_model(rem_model_state, model_state): dependencies = [] fields = list(model_state.fields.values()) + [ field.remote_field for relations in self.to_state.relations[app_label, model_name].values() for _, field in relations ] for field in fields: if field.is_relation: dependencies.extend( self._get_dependencies_for_foreign_key( app_label, model_name, field, self.to_state, ) ) self.add_operation( app_label, operations.RenameModel( old_name=rem_model_state.name, new_name=model_state.name, ), dependencies=dependencies, ) self.renamed_models[app_label, model_name] = rem_model_name renamed_models_rel_key = '%s.%s' % ( rem_model_state.app_label, rem_model_state.name_lower, ) self.renamed_models_rel[renamed_models_rel_key] = '%s.%s' % ( model_state.app_label, model_state.name_lower, ) self.old_model_keys.remove((rem_app_label, rem_model_name)) self.old_model_keys.add((app_label, model_name)) break def generate_created_models(self): """ Find all new models (both managed and unmanaged) and make create operations for them as well as separate operations to create any foreign key or M2M relationships (these are optimized later, if possible). Defer any model options that refer to collections of fields that might be deferred (e.g. unique_together, index_together). """ old_keys = self.old_model_keys | self.old_unmanaged_keys added_models = self.new_model_keys - old_keys added_unmanaged_models = self.new_unmanaged_keys - old_keys all_added_models = chain( sorted(added_models, key=self.swappable_first_key, reverse=True), sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True) ) for app_label, model_name in all_added_models: model_state = self.to_state.models[app_label, model_name] # Gather related fields related_fields = {} primary_key_rel = None for field_name, field in model_state.fields.items(): if field.remote_field: if field.remote_field.model: if field.primary_key: primary_key_rel = field.remote_field.model elif not field.remote_field.parent_link: related_fields[field_name] = field if getattr(field.remote_field, 'through', None): related_fields[field_name] = field # Are there indexes/unique|index_together to defer? indexes = model_state.options.pop('indexes') constraints = model_state.options.pop('constraints') unique_together = model_state.options.pop('unique_together', None) index_together = model_state.options.pop('index_together', None) order_with_respect_to = model_state.options.pop('order_with_respect_to', None) # Depend on the deletion of any possible proxy version of us dependencies = [ (app_label, model_name, None, False), ] # Depend on all bases for base in model_state.bases: if isinstance(base, str) and "." in base: base_app_label, base_name = base.split(".", 1) dependencies.append((base_app_label, base_name, None, True)) # Depend on the removal of base fields if the new model has # a field with the same name. old_base_model_state = self.from_state.models.get((base_app_label, base_name)) new_base_model_state = self.to_state.models.get((base_app_label, base_name)) if old_base_model_state and new_base_model_state: removed_base_fields = set(old_base_model_state.fields).difference( new_base_model_state.fields, ).intersection(model_state.fields) for removed_base_field in removed_base_fields: dependencies.append((base_app_label, base_name, removed_base_field, False)) # Depend on the other end of the primary key if it's a relation if primary_key_rel: dependencies.append( resolve_relation( primary_key_rel, app_label, model_name, ) + (None, True) ) # Generate creation operation self.add_operation( app_label, operations.CreateModel( name=model_state.name, fields=[d for d in model_state.fields.items() if d[0] not in related_fields], options=model_state.options, bases=model_state.bases, managers=model_state.managers, ), dependencies=dependencies, beginning=True, ) # Don't add operations which modify the database for unmanaged models if not model_state.options.get('managed', True): continue # Generate operations for each related field for name, field in sorted(related_fields.items()): dependencies = self._get_dependencies_for_foreign_key( app_label, model_name, field, self.to_state, ) # Depend on our own model being created dependencies.append((app_label, model_name, None, True)) # Make operation self.add_operation( app_label, operations.AddField( model_name=model_name, name=name, field=field, ), dependencies=list(set(dependencies)), ) # Generate other opns if order_with_respect_to: self.add_operation( app_label, operations.AlterOrderWithRespectTo( name=model_name, order_with_respect_to=order_with_respect_to, ), dependencies=[ (app_label, model_name, order_with_respect_to, True), (app_label, model_name, None, True), ] ) related_dependencies = [ (app_label, model_name, name, True) for name in sorted(related_fields) ] related_dependencies.append((app_label, model_name, None, True)) for index in indexes: self.add_operation( app_label, operations.AddIndex( model_name=model_name, index=index, ), dependencies=related_dependencies, ) for constraint in constraints: self.add_operation( app_label, operations.AddConstraint( model_name=model_name, constraint=constraint, ), dependencies=related_dependencies, ) if unique_together: self.add_operation( app_label, operations.AlterUniqueTogether( name=model_name, unique_together=unique_together, ), dependencies=related_dependencies ) if index_together: self.add_operation( app_label, operations.AlterIndexTogether( name=model_name, index_together=index_together, ), dependencies=related_dependencies ) # Fix relationships if the model changed from a proxy model to a # concrete model. relations = self.to_state.relations if (app_label, model_name) in self.old_proxy_keys: for related_model_key, related_fields in relations[app_label, model_name].items(): related_model_state = self.to_state.models[related_model_key] for related_field_name, related_field in related_fields: self.add_operation( related_model_state.app_label, operations.AlterField( model_name=related_model_state.name, name=related_field_name, field=related_field, ), dependencies=[(app_label, model_name, None, True)], ) def generate_created_proxies(self): """ Make CreateModel statements for proxy models. Use the same statements as that way there's less code duplication, but for proxy models it's safe to skip all the pointless field stuff and chuck out an operation. """ added = self.new_proxy_keys - self.old_proxy_keys for app_label, model_name in sorted(added): model_state = self.to_state.models[app_label, model_name] assert model_state.options.get("proxy") # Depend on the deletion of any possible non-proxy version of us dependencies = [ (app_label, model_name, None, False), ] # Depend on all bases for base in model_state.bases: if isinstance(base, str) and "." in base: base_app_label, base_name = base.split(".", 1) dependencies.append((base_app_label, base_name, None, True)) # Generate creation operation self.add_operation( app_label, operations.CreateModel( name=model_state.name, fields=[], options=model_state.options, bases=model_state.bases, managers=model_state.managers, ), # Depend on the deletion of any possible non-proxy version of us dependencies=dependencies, ) def generate_deleted_models(self): """ Find all deleted models (managed and unmanaged) and make delete operations for them as well as separate operations to delete any foreign key or M2M relationships (these are optimized later, if possible). Also bring forward removal of any model options that refer to collections of fields - the inverse of generate_created_models(). """ new_keys = self.new_model_keys | self.new_unmanaged_keys deleted_models = self.old_model_keys - new_keys deleted_unmanaged_models = self.old_unmanaged_keys - new_keys all_deleted_models = chain(sorted(deleted_models), sorted(deleted_unmanaged_models)) for app_label, model_name in all_deleted_models: model_state = self.from_state.models[app_label, model_name] # Gather related fields related_fields = {} for field_name, field in model_state.fields.items(): if field.remote_field: if field.remote_field.model: related_fields[field_name] = field if getattr(field.remote_field, 'through', None): related_fields[field_name] = field # Generate option removal first unique_together = model_state.options.pop('unique_together', None) index_together = model_state.options.pop('index_together', None) if unique_together: self.add_operation( app_label, operations.AlterUniqueTogether( name=model_name, unique_together=None, ) ) if index_together: self.add_operation( app_label, operations.AlterIndexTogether( name=model_name, index_together=None, ) ) # Then remove each related field for name in sorted(related_fields): self.add_operation( app_label, operations.RemoveField( model_name=model_name, name=name, ) ) # Finally, remove the model. # This depends on both the removal/alteration of all incoming fields # and the removal of all its own related fields, and if it's # a through model the field that references it. dependencies = [] relations = self.from_state.relations for (related_object_app_label, object_name), relation_related_fields in ( relations[app_label, model_name].items() ): for field_name, field in relation_related_fields: dependencies.append( (related_object_app_label, object_name, field_name, False), ) if not field.many_to_many: dependencies.append( (related_object_app_label, object_name, field_name, 'alter'), ) for name in sorted(related_fields): dependencies.append((app_label, model_name, name, False)) # We're referenced in another field's through= through_user = self.through_users.get((app_label, model_state.name_lower)) if through_user: dependencies.append((through_user[0], through_user[1], through_user[2], False)) # Finally, make the operation, deduping any dependencies self.add_operation( app_label, operations.DeleteModel( name=model_state.name, ), dependencies=list(set(dependencies)), ) def generate_deleted_proxies(self): """Make DeleteModel options for proxy models.""" deleted = self.old_proxy_keys - self.new_proxy_keys for app_label, model_name in sorted(deleted): model_state = self.from_state.models[app_label, model_name] assert model_state.options.get("proxy") self.add_operation( app_label, operations.DeleteModel( name=model_state.name, ), ) def generate_renamed_fields(self): """Work out renamed fields.""" self.renamed_fields = {} for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, old_model_name] field = new_model_state.get_field(field_name) # Scan to see if this is actually a rename! field_dec = self.deep_deconstruct(field) for rem_app_label, rem_model_name, rem_field_name in sorted(self.old_field_keys - self.new_field_keys): if rem_app_label == app_label and rem_model_name == model_name: old_field = old_model_state.get_field(rem_field_name) old_field_dec = self.deep_deconstruct(old_field) if field.remote_field and field.remote_field.model and 'to' in old_field_dec[2]: old_rel_to = old_field_dec[2]['to'] if old_rel_to in self.renamed_models_rel: old_field_dec[2]['to'] = self.renamed_models_rel[old_rel_to] old_field.set_attributes_from_name(rem_field_name) old_db_column = old_field.get_attname_column()[1] if (old_field_dec == field_dec or ( # Was the field renamed and db_column equal to the # old field's column added? old_field_dec[0:2] == field_dec[0:2] and dict(old_field_dec[2], db_column=old_db_column) == field_dec[2])): if self.questioner.ask_rename(model_name, rem_field_name, field_name, field): self.add_operation( app_label, operations.RenameField( model_name=model_name, old_name=rem_field_name, new_name=field_name, ) ) self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name)) self.old_field_keys.add((app_label, model_name, field_name)) self.renamed_fields[app_label, model_name, field_name] = rem_field_name break def generate_added_fields(self): """Make AddField operations.""" for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys): self._generate_added_field(app_label, model_name, field_name) def _generate_added_field(self, app_label, model_name, field_name): field = self.to_state.models[app_label, model_name].get_field(field_name) # Fields that are foreignkeys/m2ms depend on stuff dependencies = [] if field.remote_field and field.remote_field.model: dependencies.extend(self._get_dependencies_for_foreign_key( app_label, model_name, field, self.to_state, )) # You can't just add NOT NULL fields with no default or fields # which don't allow empty strings as default. time_fields = (models.DateField, models.DateTimeField, models.TimeField) preserve_default = ( field.null or field.has_default() or field.many_to_many or (field.blank and field.empty_strings_allowed) or (isinstance(field, time_fields) and field.auto_now) ) if not preserve_default: field = field.clone() if isinstance(field, time_fields) and field.auto_now_add: field.default = self.questioner.ask_auto_now_add_addition(field_name, model_name) else: field.default = self.questioner.ask_not_null_addition(field_name, model_name) self.add_operation( app_label, operations.AddField( model_name=model_name, name=field_name, field=field, preserve_default=preserve_default, ), dependencies=dependencies, ) def generate_removed_fields(self): """Make RemoveField operations.""" for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys): self._generate_removed_field(app_label, model_name, field_name) def _generate_removed_field(self, app_label, model_name, field_name): self.add_operation( app_label, operations.RemoveField( model_name=model_name, name=field_name, ), # We might need to depend on the removal of an # order_with_respect_to or index/unique_together operation; # this is safely ignored if there isn't one dependencies=[ (app_label, model_name, field_name, "order_wrt_unset"), (app_label, model_name, field_name, "foo_together_change"), ], ) def generate_altered_fields(self): """ Make AlterField operations, or possibly RemovedField/AddField if alter isn't possible. """ for app_label, model_name, field_name in sorted(self.old_field_keys & self.new_field_keys): # Did the field change? old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_field_name = self.renamed_fields.get((app_label, model_name, field_name), field_name) old_field = self.from_state.models[app_label, old_model_name].get_field(old_field_name) new_field = self.to_state.models[app_label, model_name].get_field(field_name) dependencies = [] # Implement any model renames on relations; these are handled by RenameModel # so we need to exclude them from the comparison if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "model", None): rename_key = resolve_relation(new_field.remote_field.model, app_label, model_name) if rename_key in self.renamed_models: new_field.remote_field.model = old_field.remote_field.model # Handle ForeignKey which can only have a single to_field. remote_field_name = getattr(new_field.remote_field, 'field_name', None) if remote_field_name: to_field_rename_key = rename_key + (remote_field_name,) if to_field_rename_key in self.renamed_fields: # Repoint both model and field name because to_field # inclusion in ForeignKey.deconstruct() is based on # both. new_field.remote_field.model = old_field.remote_field.model new_field.remote_field.field_name = old_field.remote_field.field_name # Handle ForeignObjects which can have multiple from_fields/to_fields. from_fields = getattr(new_field, 'from_fields', None) if from_fields: from_rename_key = (app_label, model_name) new_field.from_fields = tuple([ self.renamed_fields.get(from_rename_key + (from_field,), from_field) for from_field in from_fields ]) new_field.to_fields = tuple([ self.renamed_fields.get(rename_key + (to_field,), to_field) for to_field in new_field.to_fields ]) dependencies.extend(self._get_dependencies_for_foreign_key( app_label, model_name, new_field, self.to_state, )) if ( hasattr(new_field, 'remote_field') and getattr(new_field.remote_field, 'through', None) ): rename_key = resolve_relation(new_field.remote_field.through, app_label, model_name) if rename_key in self.renamed_models: new_field.remote_field.through = old_field.remote_field.through old_field_dec = self.deep_deconstruct(old_field) new_field_dec = self.deep_deconstruct(new_field) if old_field_dec != new_field_dec: both_m2m = old_field.many_to_many and new_field.many_to_many neither_m2m = not old_field.many_to_many and not new_field.many_to_many if both_m2m or neither_m2m: # Either both fields are m2m or neither is preserve_default = True if (old_field.null and not new_field.null and not new_field.has_default() and not new_field.many_to_many): field = new_field.clone() new_default = self.questioner.ask_not_null_alteration(field_name, model_name) if new_default is not models.NOT_PROVIDED: field.default = new_default preserve_default = False else: field = new_field self.add_operation( app_label, operations.AlterField( model_name=model_name, name=field_name, field=field, preserve_default=preserve_default, ), dependencies=dependencies, ) else: # We cannot alter between m2m and concrete fields self._generate_removed_field(app_label, model_name, field_name) self._generate_added_field(app_label, model_name, field_name) def create_altered_indexes(self): option_name = operations.AddIndex.option_name for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_indexes = old_model_state.options[option_name] new_indexes = new_model_state.options[option_name] add_idx = [idx for idx in new_indexes if idx not in old_indexes] rem_idx = [idx for idx in old_indexes if idx not in new_indexes] self.altered_indexes.update({ (app_label, model_name): { 'added_indexes': add_idx, 'removed_indexes': rem_idx, } }) def generate_added_indexes(self): for (app_label, model_name), alt_indexes in self.altered_indexes.items(): for index in alt_indexes['added_indexes']: self.add_operation( app_label, operations.AddIndex( model_name=model_name, index=index, ) ) def generate_removed_indexes(self): for (app_label, model_name), alt_indexes in self.altered_indexes.items(): for index in alt_indexes['removed_indexes']: self.add_operation( app_label, operations.RemoveIndex( model_name=model_name, name=index.name, ) ) def create_altered_constraints(self): option_name = operations.AddConstraint.option_name for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_constraints = old_model_state.options[option_name] new_constraints = new_model_state.options[option_name] add_constraints = [c for c in new_constraints if c not in old_constraints] rem_constraints = [c for c in old_constraints if c not in new_constraints] self.altered_constraints.update({ (app_label, model_name): { 'added_constraints': add_constraints, 'removed_constraints': rem_constraints, } }) def generate_added_constraints(self): for (app_label, model_name), alt_constraints in self.altered_constraints.items(): for constraint in alt_constraints['added_constraints']: self.add_operation( app_label, operations.AddConstraint( model_name=model_name, constraint=constraint, ) ) def generate_removed_constraints(self): for (app_label, model_name), alt_constraints in self.altered_constraints.items(): for constraint in alt_constraints['removed_constraints']: self.add_operation( app_label, operations.RemoveConstraint( model_name=model_name, name=constraint.name, ) ) @staticmethod def _get_dependencies_for_foreign_key(app_label, model_name, field, project_state): remote_field_model = None if hasattr(field.remote_field, 'model'): remote_field_model = field.remote_field.model else: relations = project_state.relations[app_label, model_name] for (remote_app_label, remote_model_name), fields in relations.items(): if any(field == related_field.remote_field for _, related_field in fields): remote_field_model = f'{remote_app_label}.{remote_model_name}' break # Account for FKs to swappable models swappable_setting = getattr(field, 'swappable_setting', None) if swappable_setting is not None: dep_app_label = "__setting__" dep_object_name = swappable_setting else: dep_app_label, dep_object_name = resolve_relation( remote_field_model, app_label, model_name, ) dependencies = [(dep_app_label, dep_object_name, None, True)] if getattr(field.remote_field, 'through', None): through_app_label, through_object_name = resolve_relation( remote_field_model, app_label, model_name, ) dependencies.append((through_app_label, through_object_name, None, True)) return dependencies def _generate_altered_foo_together(self, operation): option_name = operation.option_name for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] # We run the old version through the field renames to account for those old_value = old_model_state.options.get(option_name) old_value = { tuple( self.renamed_fields.get((app_label, model_name, n), n) for n in unique ) for unique in old_value } if old_value else set() new_value = new_model_state.options.get(option_name) new_value = set(new_value) if new_value else set() if old_value != new_value: dependencies = [] for foo_togethers in new_value: for field_name in foo_togethers: field = new_model_state.get_field(field_name) if field.remote_field and field.remote_field.model: dependencies.extend(self._get_dependencies_for_foreign_key( app_label, model_name, field, self.to_state, )) self.add_operation( app_label, operation( name=model_name, **{option_name: new_value} ), dependencies=dependencies, ) def generate_altered_unique_together(self): self._generate_altered_foo_together(operations.AlterUniqueTogether) def generate_altered_index_together(self): self._generate_altered_foo_together(operations.AlterIndexTogether) def generate_altered_db_table(self): models_to_check = self.kept_model_keys.union(self.kept_proxy_keys, self.kept_unmanaged_keys) for app_label, model_name in sorted(models_to_check): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_db_table_name = old_model_state.options.get('db_table') new_db_table_name = new_model_state.options.get('db_table') if old_db_table_name != new_db_table_name: self.add_operation( app_label, operations.AlterModelTable( name=model_name, table=new_db_table_name, ) ) def generate_altered_options(self): """ Work out if any non-schema-affecting options have changed and make an operation to represent them in state changes (in case Python code in migrations needs them). """ models_to_check = self.kept_model_keys.union( self.kept_proxy_keys, self.kept_unmanaged_keys, # unmanaged converted to managed self.old_unmanaged_keys & self.new_model_keys, # managed converted to unmanaged self.old_model_keys & self.new_unmanaged_keys, ) for app_label, model_name in sorted(models_to_check): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_options = { key: value for key, value in old_model_state.options.items() if key in AlterModelOptions.ALTER_OPTION_KEYS } new_options = { key: value for key, value in new_model_state.options.items() if key in AlterModelOptions.ALTER_OPTION_KEYS } if old_options != new_options: self.add_operation( app_label, operations.AlterModelOptions( name=model_name, options=new_options, ) ) def generate_altered_order_with_respect_to(self): for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] if (old_model_state.options.get("order_with_respect_to") != new_model_state.options.get("order_with_respect_to")): # Make sure it comes second if we're adding # (removal dependency is part of RemoveField) dependencies = [] if new_model_state.options.get("order_with_respect_to"): dependencies.append(( app_label, model_name, new_model_state.options["order_with_respect_to"], True, )) # Actually generate the operation self.add_operation( app_label, operations.AlterOrderWithRespectTo( name=model_name, order_with_respect_to=new_model_state.options.get('order_with_respect_to'), ), dependencies=dependencies, ) def generate_altered_managers(self): for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] if old_model_state.managers != new_model_state.managers: self.add_operation( app_label, operations.AlterModelManagers( name=model_name, managers=new_model_state.managers, ) ) def arrange_for_graph(self, changes, graph, migration_name=None): """ Take a result from changes() and a MigrationGraph, and fix the names and dependencies of the changes so they extend the graph from the leaf nodes for each app. """ leaves = graph.leaf_nodes() name_map = {} for app_label, migrations in list(changes.items()): if not migrations: continue # Find the app label's current leaf node app_leaf = None for leaf in leaves: if leaf[0] == app_label: app_leaf = leaf break # Do they want an initial migration for this app? if app_leaf is None and not self.questioner.ask_initial(app_label): # They don't. for migration in migrations: name_map[(app_label, migration.name)] = (app_label, "__first__") del changes[app_label] continue # Work out the next number in the sequence if app_leaf is None: next_number = 1 else: next_number = (self.parse_number(app_leaf[1]) or 0) + 1 # Name each migration for i, migration in enumerate(migrations): if i == 0 and app_leaf: migration.dependencies.append(app_leaf) new_name_parts = ['%04i' % next_number] if migration_name: new_name_parts.append(migration_name) elif i == 0 and not app_leaf: new_name_parts.append('initial') else: new_name_parts.append(migration.suggest_name()[:100]) new_name = '_'.join(new_name_parts) name_map[(app_label, migration.name)] = (app_label, new_name) next_number += 1 migration.name = new_name # Now fix dependencies for migrations in changes.values(): for migration in migrations: migration.dependencies = [name_map.get(d, d) for d in migration.dependencies] return changes def _trim_to_apps(self, changes, app_labels): """ Take changes from arrange_for_graph() and set of app labels, and return a modified set of changes which trims out as many migrations that are not in app_labels as possible. Note that some other migrations may still be present as they may be required dependencies. """ # Gather other app dependencies in a first pass app_dependencies = {} for app_label, migrations in changes.items(): for migration in migrations: for dep_app_label, name in migration.dependencies: app_dependencies.setdefault(app_label, set()).add(dep_app_label) required_apps = set(app_labels) # Keep resolving till there's no change old_required_apps = None while old_required_apps != required_apps: old_required_apps = set(required_apps) required_apps.update(*[app_dependencies.get(app_label, ()) for app_label in required_apps]) # Remove all migrations that aren't needed for app_label in list(changes): if app_label not in required_apps: del changes[app_label] return changes @classmethod def parse_number(cls, name): """ Given a migration name, try to extract a number from the beginning of it. If no number is found, return None. """ match = re.match(r'^\d+', name) if match: return int(match[0]) return None
13e33d4ac253b04b3c20385c6514aeb5f2c2fd1fe621b2a11f0c1ea91f9275b0
import datetime import re from collections import namedtuple from django.db.models.fields.related import RECURSIVE_RELATIONSHIP_CONSTANT FieldReference = namedtuple('FieldReference', 'to through') COMPILED_REGEX_TYPE = type(re.compile('')) class RegexObject: def __init__(self, obj): self.pattern = obj.pattern self.flags = obj.flags def __eq__(self, other): return self.pattern == other.pattern and self.flags == other.flags def get_migration_name_timestamp(): return datetime.datetime.now().strftime("%Y%m%d_%H%M") def resolve_relation(model, app_label=None, model_name=None): """ Turn a model class or model reference string and return a model tuple. app_label and model_name are used to resolve the scope of recursive and unscoped model relationship. """ if isinstance(model, str): if model == RECURSIVE_RELATIONSHIP_CONSTANT: if app_label is None or model_name is None: raise TypeError( 'app_label and model_name must be provided to resolve ' 'recursive relationships.' ) return app_label, model_name if '.' in model: app_label, model_name = model.split('.', 1) return app_label, model_name.lower() if app_label is None: raise TypeError( 'app_label must be provided to resolve unscoped model ' 'relationships.' ) return app_label, model.lower() return model._meta.app_label, model._meta.model_name def field_references( model_tuple, field, reference_model_tuple, reference_field_name=None, reference_field=None, ): """ Return either False or a FieldReference if `field` references provided context. False positives can be returned if `reference_field_name` is provided without `reference_field` because of the introspection limitation it incurs. This should not be an issue when this function is used to determine whether or not an optimization can take place. """ remote_field = field.remote_field if not remote_field: return False references_to = None references_through = None if resolve_relation(remote_field.model, *model_tuple) == reference_model_tuple: to_fields = getattr(field, 'to_fields', None) if ( reference_field_name is None or # Unspecified to_field(s). to_fields is None or # Reference to primary key. (None in to_fields and (reference_field is None or reference_field.primary_key)) or # Reference to field. reference_field_name in to_fields ): references_to = (remote_field, to_fields) through = getattr(remote_field, 'through', None) if through and resolve_relation(through, *model_tuple) == reference_model_tuple: through_fields = remote_field.through_fields if ( reference_field_name is None or # Unspecified through_fields. through_fields is None or # Reference to field. reference_field_name in through_fields ): references_through = (remote_field, through_fields) if not (references_to or references_through): return False return FieldReference(references_to, references_through) def get_references(state, model_tuple, field_tuple=()): """ Generator of (model_state, name, field, reference) referencing provided context. If field_tuple is provided only references to this particular field of model_tuple will be generated. """ for state_model_tuple, model_state in state.models.items(): for name, field in model_state.fields.items(): reference = field_references(state_model_tuple, field, model_tuple, *field_tuple) if reference: yield model_state, name, field, reference def field_is_referenced(state, model_tuple, field_tuple): """Return whether `field_tuple` is referenced by any state models.""" return next(get_references(state, model_tuple, field_tuple), None) is not None
ba6a6b26c999b603dbb7469baae87d1da6e35d8da72c271e03ae84ca57840526
import builtins import collections.abc import datetime import decimal import enum import functools import math import os import pathlib import re import types import uuid from django.conf import SettingsReference from django.db import models from django.db.migrations.operations.base import Operation from django.db.migrations.utils import COMPILED_REGEX_TYPE, RegexObject from django.utils.functional import LazyObject, Promise from django.utils.timezone import utc from django.utils.version import get_docs_version class BaseSerializer: def __init__(self, value): self.value = value def serialize(self): raise NotImplementedError('Subclasses of BaseSerializer must implement the serialize() method.') class BaseSequenceSerializer(BaseSerializer): def _format(self): raise NotImplementedError('Subclasses of BaseSequenceSerializer must implement the _format() method.') def serialize(self): imports = set() strings = [] for item in self.value: item_string, item_imports = serializer_factory(item).serialize() imports.update(item_imports) strings.append(item_string) value = self._format() return value % (", ".join(strings)), imports class BaseSimpleSerializer(BaseSerializer): def serialize(self): return repr(self.value), set() class ChoicesSerializer(BaseSerializer): def serialize(self): return serializer_factory(self.value.value).serialize() class DateTimeSerializer(BaseSerializer): """For datetime.*, except datetime.datetime.""" def serialize(self): return repr(self.value), {'import datetime'} class DatetimeDatetimeSerializer(BaseSerializer): """For datetime.datetime.""" def serialize(self): if self.value.tzinfo is not None and self.value.tzinfo != utc: self.value = self.value.astimezone(utc) imports = ["import datetime"] if self.value.tzinfo is not None: imports.append("from django.utils.timezone import utc") return repr(self.value).replace('<UTC>', 'utc'), set(imports) class DecimalSerializer(BaseSerializer): def serialize(self): return repr(self.value), {"from decimal import Decimal"} class DeconstructableSerializer(BaseSerializer): @staticmethod def serialize_deconstructed(path, args, kwargs): name, imports = DeconstructableSerializer._serialize_path(path) strings = [] for arg in args: arg_string, arg_imports = serializer_factory(arg).serialize() strings.append(arg_string) imports.update(arg_imports) for kw, arg in sorted(kwargs.items()): arg_string, arg_imports = serializer_factory(arg).serialize() imports.update(arg_imports) strings.append("%s=%s" % (kw, arg_string)) return "%s(%s)" % (name, ", ".join(strings)), imports @staticmethod def _serialize_path(path): module, name = path.rsplit(".", 1) if module == "django.db.models": imports = {"from django.db import models"} name = "models.%s" % name else: imports = {"import %s" % module} name = path return name, imports def serialize(self): return self.serialize_deconstructed(*self.value.deconstruct()) class DictionarySerializer(BaseSerializer): def serialize(self): imports = set() strings = [] for k, v in sorted(self.value.items()): k_string, k_imports = serializer_factory(k).serialize() v_string, v_imports = serializer_factory(v).serialize() imports.update(k_imports) imports.update(v_imports) strings.append((k_string, v_string)) return "{%s}" % (", ".join("%s: %s" % (k, v) for k, v in strings)), imports class EnumSerializer(BaseSerializer): def serialize(self): enum_class = self.value.__class__ module = enum_class.__module__ return ( '%s.%s[%r]' % (module, enum_class.__qualname__, self.value.name), {'import %s' % module}, ) class FloatSerializer(BaseSimpleSerializer): def serialize(self): if math.isnan(self.value) or math.isinf(self.value): return 'float("{}")'.format(self.value), set() return super().serialize() class FrozensetSerializer(BaseSequenceSerializer): def _format(self): return "frozenset([%s])" class FunctionTypeSerializer(BaseSerializer): def serialize(self): if getattr(self.value, "__self__", None) and isinstance(self.value.__self__, type): klass = self.value.__self__ module = klass.__module__ return "%s.%s.%s" % (module, klass.__name__, self.value.__name__), {"import %s" % module} # Further error checking if self.value.__name__ == '<lambda>': raise ValueError("Cannot serialize function: lambda") if self.value.__module__ is None: raise ValueError("Cannot serialize function %r: No module" % self.value) module_name = self.value.__module__ if '<' not in self.value.__qualname__: # Qualname can include <locals> return '%s.%s' % (module_name, self.value.__qualname__), {'import %s' % self.value.__module__} raise ValueError( 'Could not find function %s in %s.\n' % (self.value.__name__, module_name) ) class FunctoolsPartialSerializer(BaseSerializer): def serialize(self): # Serialize functools.partial() arguments func_string, func_imports = serializer_factory(self.value.func).serialize() args_string, args_imports = serializer_factory(self.value.args).serialize() keywords_string, keywords_imports = serializer_factory(self.value.keywords).serialize() # Add any imports needed by arguments imports = {'import functools', *func_imports, *args_imports, *keywords_imports} return ( 'functools.%s(%s, *%s, **%s)' % ( self.value.__class__.__name__, func_string, args_string, keywords_string, ), imports, ) class IterableSerializer(BaseSerializer): def serialize(self): imports = set() strings = [] for item in self.value: item_string, item_imports = serializer_factory(item).serialize() imports.update(item_imports) strings.append(item_string) # When len(strings)==0, the empty iterable should be serialized as # "()", not "(,)" because (,) is invalid Python syntax. value = "(%s)" if len(strings) != 1 else "(%s,)" return value % (", ".join(strings)), imports class ModelFieldSerializer(DeconstructableSerializer): def serialize(self): attr_name, path, args, kwargs = self.value.deconstruct() return self.serialize_deconstructed(path, args, kwargs) class ModelManagerSerializer(DeconstructableSerializer): def serialize(self): as_manager, manager_path, qs_path, args, kwargs = self.value.deconstruct() if as_manager: name, imports = self._serialize_path(qs_path) return "%s.as_manager()" % name, imports else: return self.serialize_deconstructed(manager_path, args, kwargs) class OperationSerializer(BaseSerializer): def serialize(self): from django.db.migrations.writer import OperationWriter string, imports = OperationWriter(self.value, indentation=0).serialize() # Nested operation, trailing comma is handled in upper OperationWriter._write() return string.rstrip(','), imports class PathLikeSerializer(BaseSerializer): def serialize(self): return repr(os.fspath(self.value)), {} class PathSerializer(BaseSerializer): def serialize(self): # Convert concrete paths to pure paths to avoid issues with migrations # generated on one platform being used on a different platform. prefix = 'Pure' if isinstance(self.value, pathlib.Path) else '' return 'pathlib.%s%r' % (prefix, self.value), {'import pathlib'} class RegexSerializer(BaseSerializer): def serialize(self): regex_pattern, pattern_imports = serializer_factory(self.value.pattern).serialize() # Turn off default implicit flags (e.g. re.U) because regexes with the # same implicit and explicit flags aren't equal. flags = self.value.flags ^ re.compile('').flags regex_flags, flag_imports = serializer_factory(flags).serialize() imports = {'import re', *pattern_imports, *flag_imports} args = [regex_pattern] if flags: args.append(regex_flags) return "re.compile(%s)" % ', '.join(args), imports class SequenceSerializer(BaseSequenceSerializer): def _format(self): return "[%s]" class SetSerializer(BaseSequenceSerializer): def _format(self): # Serialize as a set literal except when value is empty because {} # is an empty dict. return '{%s}' if self.value else 'set(%s)' class SettingsReferenceSerializer(BaseSerializer): def serialize(self): return "settings.%s" % self.value.setting_name, {"from django.conf import settings"} class TupleSerializer(BaseSequenceSerializer): def _format(self): # When len(value)==0, the empty tuple should be serialized as "()", # not "(,)" because (,) is invalid Python syntax. return "(%s)" if len(self.value) != 1 else "(%s,)" class TypeSerializer(BaseSerializer): def serialize(self): special_cases = [ (models.Model, "models.Model", ['from django.db import models']), (type(None), 'type(None)', []), ] for case, string, imports in special_cases: if case is self.value: return string, set(imports) if hasattr(self.value, "__module__"): module = self.value.__module__ if module == builtins.__name__: return self.value.__name__, set() else: return "%s.%s" % (module, self.value.__qualname__), {"import %s" % module} class UUIDSerializer(BaseSerializer): def serialize(self): return "uuid.%s" % repr(self.value), {"import uuid"} class Serializer: _registry = { # Some of these are order-dependent. frozenset: FrozensetSerializer, list: SequenceSerializer, set: SetSerializer, tuple: TupleSerializer, dict: DictionarySerializer, models.Choices: ChoicesSerializer, enum.Enum: EnumSerializer, datetime.datetime: DatetimeDatetimeSerializer, (datetime.date, datetime.timedelta, datetime.time): DateTimeSerializer, SettingsReference: SettingsReferenceSerializer, float: FloatSerializer, (bool, int, type(None), bytes, str, range): BaseSimpleSerializer, decimal.Decimal: DecimalSerializer, (functools.partial, functools.partialmethod): FunctoolsPartialSerializer, (types.FunctionType, types.BuiltinFunctionType, types.MethodType): FunctionTypeSerializer, collections.abc.Iterable: IterableSerializer, (COMPILED_REGEX_TYPE, RegexObject): RegexSerializer, uuid.UUID: UUIDSerializer, pathlib.PurePath: PathSerializer, os.PathLike: PathLikeSerializer, } @classmethod def register(cls, type_, serializer): if not issubclass(serializer, BaseSerializer): raise ValueError("'%s' must inherit from 'BaseSerializer'." % serializer.__name__) cls._registry[type_] = serializer @classmethod def unregister(cls, type_): cls._registry.pop(type_) def serializer_factory(value): if isinstance(value, Promise): value = str(value) elif isinstance(value, LazyObject): # The unwrapped value is returned as the first item of the arguments # tuple. value = value.__reduce__()[1][0] if isinstance(value, models.Field): return ModelFieldSerializer(value) if isinstance(value, models.manager.BaseManager): return ModelManagerSerializer(value) if isinstance(value, Operation): return OperationSerializer(value) if isinstance(value, type): return TypeSerializer(value) # Anything that knows how to deconstruct itself. if hasattr(value, 'deconstruct'): return DeconstructableSerializer(value) for type_, serializer_cls in Serializer._registry.items(): if isinstance(value, type_): return serializer_cls(value) raise ValueError( "Cannot serialize: %r\nThere are some values Django cannot serialize into " "migration files.\nFor more, see https://docs.djangoproject.com/en/%s/" "topics/migrations/#migration-serializing" % (value, get_docs_version()) )
71262dd26d9fdb2de8a060fe31d9e72604587e0cf31f0dce487f22f4e13ad894
""" Classes to represent the definitions of aggregate functions. """ from django.core.exceptions import FieldError from django.db.models.expressions import Case, Func, Star, When from django.db.models.fields import IntegerField from django.db.models.functions.comparison import Coalesce from django.db.models.functions.mixins import ( FixDurationInputMixin, NumericOutputFieldMixin, ) __all__ = [ 'Aggregate', 'Avg', 'Count', 'Max', 'Min', 'StdDev', 'Sum', 'Variance', ] class Aggregate(Func): template = '%(function)s(%(distinct)s%(expressions)s)' contains_aggregate = True name = None filter_template = '%s FILTER (WHERE %%(filter)s)' window_compatible = True allow_distinct = False empty_aggregate_value = None def __init__(self, *expressions, distinct=False, filter=None, default=None, **extra): if distinct and not self.allow_distinct: raise TypeError("%s does not allow distinct." % self.__class__.__name__) if default is not None and self.empty_aggregate_value is not None: raise TypeError(f'{self.__class__.__name__} does not allow default.') self.distinct = distinct self.filter = filter self.default = default super().__init__(*expressions, **extra) def get_source_fields(self): # Don't return the filter expression since it's not a source field. return [e._output_field_or_none for e in super().get_source_expressions()] def get_source_expressions(self): source_expressions = super().get_source_expressions() if self.filter: return source_expressions + [self.filter] return source_expressions def set_source_expressions(self, exprs): self.filter = self.filter and exprs.pop() return super().set_source_expressions(exprs) def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): # Aggregates are not allowed in UPDATE queries, so ignore for_save c = super().resolve_expression(query, allow_joins, reuse, summarize) c.filter = c.filter and c.filter.resolve_expression(query, allow_joins, reuse, summarize) if not summarize: # Call Aggregate.get_source_expressions() to avoid # returning self.filter and including that in this loop. expressions = super(Aggregate, c).get_source_expressions() for index, expr in enumerate(expressions): if expr.contains_aggregate: before_resolved = self.get_source_expressions()[index] name = before_resolved.name if hasattr(before_resolved, 'name') else repr(before_resolved) raise FieldError("Cannot compute %s('%s'): '%s' is an aggregate" % (c.name, name, name)) if (default := c.default) is None: return c if hasattr(default, 'resolve_expression'): default = default.resolve_expression(query, allow_joins, reuse, summarize) c.default = None # Reset the default argument before wrapping. return Coalesce(c, default, output_field=c._output_field_or_none) @property def default_alias(self): expressions = self.get_source_expressions() if len(expressions) == 1 and hasattr(expressions[0], 'name'): return '%s__%s' % (expressions[0].name, self.name.lower()) raise TypeError("Complex expressions require an alias") def get_group_by_cols(self, alias=None): return [] def as_sql(self, compiler, connection, **extra_context): extra_context['distinct'] = 'DISTINCT ' if self.distinct else '' if self.filter: if connection.features.supports_aggregate_filter_clause: filter_sql, filter_params = self.filter.as_sql(compiler, connection) template = self.filter_template % extra_context.get('template', self.template) sql, params = super().as_sql( compiler, connection, template=template, filter=filter_sql, **extra_context ) return sql, params + filter_params else: copy = self.copy() copy.filter = None source_expressions = copy.get_source_expressions() condition = When(self.filter, then=source_expressions[0]) copy.set_source_expressions([Case(condition)] + source_expressions[1:]) return super(Aggregate, copy).as_sql(compiler, connection, **extra_context) return super().as_sql(compiler, connection, **extra_context) def _get_repr_options(self): options = super()._get_repr_options() if self.distinct: options['distinct'] = self.distinct if self.filter: options['filter'] = self.filter return options class Avg(FixDurationInputMixin, NumericOutputFieldMixin, Aggregate): function = 'AVG' name = 'Avg' allow_distinct = True class Count(Aggregate): function = 'COUNT' name = 'Count' output_field = IntegerField() allow_distinct = True empty_aggregate_value = 0 def __init__(self, expression, filter=None, **extra): if expression == '*': expression = Star() if isinstance(expression, Star) and filter is not None: raise ValueError('Star cannot be used with filter. Please specify a field.') super().__init__(expression, filter=filter, **extra) class Max(Aggregate): function = 'MAX' name = 'Max' class Min(Aggregate): function = 'MIN' name = 'Min' class StdDev(NumericOutputFieldMixin, Aggregate): name = 'StdDev' def __init__(self, expression, sample=False, **extra): self.function = 'STDDEV_SAMP' if sample else 'STDDEV_POP' super().__init__(expression, **extra) def _get_repr_options(self): return {**super()._get_repr_options(), 'sample': self.function == 'STDDEV_SAMP'} class Sum(FixDurationInputMixin, Aggregate): function = 'SUM' name = 'Sum' allow_distinct = True class Variance(NumericOutputFieldMixin, Aggregate): name = 'Variance' def __init__(self, expression, sample=False, **extra): self.function = 'VAR_SAMP' if sample else 'VAR_POP' super().__init__(expression, **extra) def _get_repr_options(self): return {**super()._get_repr_options(), 'sample': self.function == 'VAR_SAMP'}
bef6b0eb56d99b68392a1942110343830d9a058028c6cbd5f53a74912d3be484
from django.db.backends.utils import names_digest, split_identifier from django.db.models.expressions import Col, ExpressionList, F, Func, OrderBy from django.db.models.functions import Collate from django.db.models.query_utils import Q from django.db.models.sql import Query from django.utils.functional import partition __all__ = ['Index'] class Index: suffix = 'idx' # The max length of the name of the index (restricted to 30 for # cross-database compatibility with Oracle) max_name_length = 30 def __init__( self, *expressions, fields=(), name=None, db_tablespace=None, opclasses=(), condition=None, include=None, ): if opclasses and not name: raise ValueError('An index must be named to use opclasses.') if not isinstance(condition, (type(None), Q)): raise ValueError('Index.condition must be a Q instance.') if condition and not name: raise ValueError('An index must be named to use condition.') if not isinstance(fields, (list, tuple)): raise ValueError('Index.fields must be a list or tuple.') if not isinstance(opclasses, (list, tuple)): raise ValueError('Index.opclasses must be a list or tuple.') if not expressions and not fields: raise ValueError( 'At least one field or expression is required to define an ' 'index.' ) if expressions and fields: raise ValueError( 'Index.fields and expressions are mutually exclusive.', ) if expressions and not name: raise ValueError('An index must be named to use expressions.') if expressions and opclasses: raise ValueError( 'Index.opclasses cannot be used with expressions. Use ' 'django.contrib.postgres.indexes.OpClass() instead.' ) if opclasses and len(fields) != len(opclasses): raise ValueError('Index.fields and Index.opclasses must have the same number of elements.') if fields and not all(isinstance(field, str) for field in fields): raise ValueError('Index.fields must contain only strings with field names.') if include and not name: raise ValueError('A covering index must be named.') if not isinstance(include, (type(None), list, tuple)): raise ValueError('Index.include must be a list or tuple.') self.fields = list(fields) # A list of 2-tuple with the field name and ordering ('' or 'DESC'). self.fields_orders = [ (field_name[1:], 'DESC') if field_name.startswith('-') else (field_name, '') for field_name in self.fields ] self.name = name or '' self.db_tablespace = db_tablespace self.opclasses = opclasses self.condition = condition self.include = tuple(include) if include else () self.expressions = tuple( F(expression) if isinstance(expression, str) else expression for expression in expressions ) @property def contains_expressions(self): return bool(self.expressions) def _get_condition_sql(self, model, schema_editor): if self.condition is None: return None query = Query(model=model, alias_cols=False) where = query.build_where(self.condition) compiler = query.get_compiler(connection=schema_editor.connection) sql, params = where.as_sql(compiler, schema_editor.connection) return sql % tuple(schema_editor.quote_value(p) for p in params) def create_sql(self, model, schema_editor, using='', **kwargs): include = [model._meta.get_field(field_name).column for field_name in self.include] condition = self._get_condition_sql(model, schema_editor) if self.expressions: index_expressions = [] for expression in self.expressions: index_expression = IndexExpression(expression) index_expression.set_wrapper_classes(schema_editor.connection) index_expressions.append(index_expression) expressions = ExpressionList(*index_expressions).resolve_expression( Query(model, alias_cols=False), ) fields = None col_suffixes = None else: fields = [ model._meta.get_field(field_name) for field_name, _ in self.fields_orders ] col_suffixes = [order[1] for order in self.fields_orders] expressions = None return schema_editor._create_index_sql( model, fields=fields, name=self.name, using=using, db_tablespace=self.db_tablespace, col_suffixes=col_suffixes, opclasses=self.opclasses, condition=condition, include=include, expressions=expressions, **kwargs, ) def remove_sql(self, model, schema_editor, **kwargs): return schema_editor._delete_index_sql(model, self.name, **kwargs) def deconstruct(self): path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__) path = path.replace('django.db.models.indexes', 'django.db.models') kwargs = {'name': self.name} if self.fields: kwargs['fields'] = self.fields if self.db_tablespace is not None: kwargs['db_tablespace'] = self.db_tablespace if self.opclasses: kwargs['opclasses'] = self.opclasses if self.condition: kwargs['condition'] = self.condition if self.include: kwargs['include'] = self.include return (path, self.expressions, kwargs) def clone(self): """Create a copy of this Index.""" _, args, kwargs = self.deconstruct() return self.__class__(*args, **kwargs) def set_name_with_model(self, model): """ Generate a unique name for the index. The name is divided into 3 parts - table name (12 chars), field name (8 chars) and unique hash + suffix (10 chars). Each part is made to fit its size by truncating the excess length. """ _, table_name = split_identifier(model._meta.db_table) column_names = [model._meta.get_field(field_name).column for field_name, order in self.fields_orders] column_names_with_order = [ (('-%s' if order else '%s') % column_name) for column_name, (field_name, order) in zip(column_names, self.fields_orders) ] # The length of the parts of the name is based on the default max # length of 30 characters. hash_data = [table_name] + column_names_with_order + [self.suffix] self.name = '%s_%s_%s' % ( table_name[:11], column_names[0][:7], '%s_%s' % (names_digest(*hash_data, length=6), self.suffix), ) if len(self.name) > self.max_name_length: raise ValueError( 'Index too long for multiple database support. Is self.suffix ' 'longer than 3 characters?' ) if self.name[0] == '_' or self.name[0].isdigit(): self.name = 'D%s' % self.name[1:] def __repr__(self): return '<%s:%s%s%s%s%s%s%s>' % ( self.__class__.__qualname__, '' if not self.fields else ' fields=%s' % repr(self.fields), '' if not self.expressions else ' expressions=%s' % repr(self.expressions), '' if not self.name else ' name=%s' % repr(self.name), '' if self.db_tablespace is None else ' db_tablespace=%s' % repr(self.db_tablespace), '' if self.condition is None else ' condition=%s' % self.condition, '' if not self.include else ' include=%s' % repr(self.include), '' if not self.opclasses else ' opclasses=%s' % repr(self.opclasses), ) def __eq__(self, other): if self.__class__ == other.__class__: return self.deconstruct() == other.deconstruct() return NotImplemented class IndexExpression(Func): """Order and wrap expressions for CREATE INDEX statements.""" template = '%(expressions)s' wrapper_classes = (OrderBy, Collate) def set_wrapper_classes(self, connection=None): # Some databases (e.g. MySQL) treats COLLATE as an indexed expression. if connection and connection.features.collate_as_index_expression: self.wrapper_classes = tuple([ wrapper_cls for wrapper_cls in self.wrapper_classes if wrapper_cls is not Collate ]) @classmethod def register_wrappers(cls, *wrapper_classes): cls.wrapper_classes = wrapper_classes def resolve_expression( self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False, ): expressions = list(self.flatten()) # Split expressions and wrappers. index_expressions, wrappers = partition( lambda e: isinstance(e, self.wrapper_classes), expressions, ) wrapper_types = [type(wrapper) for wrapper in wrappers] if len(wrapper_types) != len(set(wrapper_types)): raise ValueError( "Multiple references to %s can't be used in an indexed " "expression." % ', '.join([ wrapper_cls.__qualname__ for wrapper_cls in self.wrapper_classes ]) ) if expressions[1:len(wrappers) + 1] != wrappers: raise ValueError( '%s must be topmost expressions in an indexed expression.' % ', '.join([ wrapper_cls.__qualname__ for wrapper_cls in self.wrapper_classes ]) ) # Wrap expressions in parentheses if they are not column references. root_expression = index_expressions[1] resolve_root_expression = root_expression.resolve_expression( query, allow_joins, reuse, summarize, for_save, ) if not isinstance(resolve_root_expression, Col): root_expression = Func(root_expression, template='(%(expressions)s)') if wrappers: # Order wrappers and set their expressions. wrappers = sorted( wrappers, key=lambda w: self.wrapper_classes.index(type(w)), ) wrappers = [wrapper.copy() for wrapper in wrappers] for i, wrapper in enumerate(wrappers[:-1]): wrapper.set_source_expressions([wrappers[i + 1]]) # Set the root expression on the deepest wrapper. wrappers[-1].set_source_expressions([root_expression]) self.set_source_expressions([wrappers[0]]) else: # Use the root expression, if there are no wrappers. self.set_source_expressions([root_expression]) return super().resolve_expression(query, allow_joins, reuse, summarize, for_save) def as_sqlite(self, compiler, connection, **extra_context): # Casting to numeric is unnecessary. return self.as_sql(compiler, connection, **extra_context)
9911d1a956989731db91344e6b5a282a7a108b742211dff0ac05ae37a9ace7ae
""" The main QuerySet implementation. This provides the public API for the ORM. """ import copy import operator import warnings from itertools import chain import django from django.conf import settings from django.core import exceptions from django.db import ( DJANGO_VERSION_PICKLE_KEY, IntegrityError, NotSupportedError, connections, router, transaction, ) from django.db.models import AutoField, DateField, DateTimeField, sql from django.db.models.constants import LOOKUP_SEP from django.db.models.deletion import Collector from django.db.models.expressions import Case, Expression, F, Ref, Value, When from django.db.models.functions import Cast, Trunc from django.db.models.query_utils import FilteredRelation, Q from django.db.models.sql.constants import CURSOR, GET_ITERATOR_CHUNK_SIZE from django.db.models.utils import create_namedtuple_class, resolve_callables from django.utils import timezone from django.utils.functional import cached_property, partition # The maximum number of results to fetch in a get() query. MAX_GET_RESULTS = 21 # The maximum number of items to display in a QuerySet.__repr__ REPR_OUTPUT_SIZE = 20 class BaseIterable: def __init__(self, queryset, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE): self.queryset = queryset self.chunked_fetch = chunked_fetch self.chunk_size = chunk_size class ModelIterable(BaseIterable): """Iterable that yields a model instance for each row.""" def __iter__(self): queryset = self.queryset db = queryset.db compiler = queryset.query.get_compiler(using=db) # Execute the query. This will also fill compiler.select, klass_info, # and annotations. results = compiler.execute_sql(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size) select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info, compiler.annotation_col_map) model_cls = klass_info['model'] select_fields = klass_info['select_fields'] model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1 init_list = [f[0].target.attname for f in select[model_fields_start:model_fields_end]] related_populators = get_related_populators(klass_info, select, db) known_related_objects = [ (field, related_objs, operator.attrgetter(*[ field.attname if from_field == 'self' else queryset.model._meta.get_field(from_field).attname for from_field in field.from_fields ])) for field, related_objs in queryset._known_related_objects.items() ] for row in compiler.results_iter(results): obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end]) for rel_populator in related_populators: rel_populator.populate(row, obj) if annotation_col_map: for attr_name, col_pos in annotation_col_map.items(): setattr(obj, attr_name, row[col_pos]) # Add the known related objects to the model. for field, rel_objs, rel_getter in known_related_objects: # Avoid overwriting objects loaded by, e.g., select_related(). if field.is_cached(obj): continue rel_obj_id = rel_getter(obj) try: rel_obj = rel_objs[rel_obj_id] except KeyError: pass # May happen in qs1 | qs2 scenarios. else: setattr(obj, field.name, rel_obj) yield obj class ValuesIterable(BaseIterable): """ Iterable returned by QuerySet.values() that yields a dict for each row. """ def __iter__(self): queryset = self.queryset query = queryset.query compiler = query.get_compiler(queryset.db) # extra(select=...) cols are always at the start of the row. names = [ *query.extra_select, *query.values_select, *query.annotation_select, ] indexes = range(len(names)) for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size): yield {names[i]: row[i] for i in indexes} class ValuesListIterable(BaseIterable): """ Iterable returned by QuerySet.values_list(flat=False) that yields a tuple for each row. """ def __iter__(self): queryset = self.queryset query = queryset.query compiler = query.get_compiler(queryset.db) if queryset._fields: # extra(select=...) cols are always at the start of the row. names = [ *query.extra_select, *query.values_select, *query.annotation_select, ] fields = [*queryset._fields, *(f for f in query.annotation_select if f not in queryset._fields)] if fields != names: # Reorder according to fields. index_map = {name: idx for idx, name in enumerate(names)} rowfactory = operator.itemgetter(*[index_map[f] for f in fields]) return map( rowfactory, compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size) ) return compiler.results_iter(tuple_expected=True, chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size) class NamedValuesListIterable(ValuesListIterable): """ Iterable returned by QuerySet.values_list(named=True) that yields a namedtuple for each row. """ def __iter__(self): queryset = self.queryset if queryset._fields: names = queryset._fields else: query = queryset.query names = [*query.extra_select, *query.values_select, *query.annotation_select] tuple_class = create_namedtuple_class(*names) new = tuple.__new__ for row in super().__iter__(): yield new(tuple_class, row) class FlatValuesListIterable(BaseIterable): """ Iterable returned by QuerySet.values_list(flat=True) that yields single values. """ def __iter__(self): queryset = self.queryset compiler = queryset.query.get_compiler(queryset.db) for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size): yield row[0] class QuerySet: """Represent a lazy database lookup for a set of objects.""" def __init__(self, model=None, query=None, using=None, hints=None): self.model = model self._db = using self._hints = hints or {} self._query = query or sql.Query(self.model) self._result_cache = None self._sticky_filter = False self._for_write = False self._prefetch_related_lookups = () self._prefetch_done = False self._known_related_objects = {} # {rel_field: {pk: rel_obj}} self._iterable_class = ModelIterable self._fields = None self._defer_next_filter = False self._deferred_filter = None @property def query(self): if self._deferred_filter: negate, args, kwargs = self._deferred_filter self._filter_or_exclude_inplace(negate, args, kwargs) self._deferred_filter = None return self._query @query.setter def query(self, value): if value.values_select: self._iterable_class = ValuesIterable self._query = value def as_manager(cls): # Address the circular dependency between `Queryset` and `Manager`. from django.db.models.manager import Manager manager = Manager.from_queryset(cls)() manager._built_with_as_manager = True return manager as_manager.queryset_only = True as_manager = classmethod(as_manager) ######################## # PYTHON MAGIC METHODS # ######################## def __deepcopy__(self, memo): """Don't populate the QuerySet's cache.""" obj = self.__class__() for k, v in self.__dict__.items(): if k == '_result_cache': obj.__dict__[k] = None else: obj.__dict__[k] = copy.deepcopy(v, memo) return obj def __getstate__(self): # Force the cache to be fully populated. self._fetch_all() return {**self.__dict__, DJANGO_VERSION_PICKLE_KEY: django.__version__} def __setstate__(self, state): pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY) if pickled_version: if pickled_version != django.__version__: warnings.warn( "Pickled queryset instance's Django version %s does not " "match the current version %s." % (pickled_version, django.__version__), RuntimeWarning, stacklevel=2, ) else: warnings.warn( "Pickled queryset instance's Django version is not specified.", RuntimeWarning, stacklevel=2, ) self.__dict__.update(state) def __repr__(self): data = list(self[:REPR_OUTPUT_SIZE + 1]) if len(data) > REPR_OUTPUT_SIZE: data[-1] = "...(remaining elements truncated)..." return '<%s %r>' % (self.__class__.__name__, data) def __len__(self): self._fetch_all() return len(self._result_cache) def __iter__(self): """ The queryset iterator protocol uses three nested iterators in the default case: 1. sql.compiler.execute_sql() - Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE) using cursor.fetchmany(). This part is responsible for doing some column masking, and returning the rows in chunks. 2. sql.compiler.results_iter() - Returns one row at time. At this point the rows are still just tuples. In some cases the return values are converted to Python values at this location. 3. self.iterator() - Responsible for turning the rows into model objects. """ self._fetch_all() return iter(self._result_cache) def __bool__(self): self._fetch_all() return bool(self._result_cache) def __getitem__(self, k): """Retrieve an item or slice from the set of results.""" if not isinstance(k, (int, slice)): raise TypeError( 'QuerySet indices must be integers or slices, not %s.' % type(k).__name__ ) if ( (isinstance(k, int) and k < 0) or (isinstance(k, slice) and ( (k.start is not None and k.start < 0) or (k.stop is not None and k.stop < 0) )) ): raise ValueError('Negative indexing is not supported.') if self._result_cache is not None: return self._result_cache[k] if isinstance(k, slice): qs = self._chain() if k.start is not None: start = int(k.start) else: start = None if k.stop is not None: stop = int(k.stop) else: stop = None qs.query.set_limits(start, stop) return list(qs)[::k.step] if k.step else qs qs = self._chain() qs.query.set_limits(k, k + 1) qs._fetch_all() return qs._result_cache[0] def __class_getitem__(cls, *args, **kwargs): return cls def __and__(self, other): self._merge_sanity_check(other) if isinstance(other, EmptyQuerySet): return other if isinstance(self, EmptyQuerySet): return self combined = self._chain() combined._merge_known_related_objects(other) combined.query.combine(other.query, sql.AND) return combined def __or__(self, other): self._merge_sanity_check(other) if isinstance(self, EmptyQuerySet): return other if isinstance(other, EmptyQuerySet): return self query = self if self.query.can_filter() else self.model._base_manager.filter(pk__in=self.values('pk')) combined = query._chain() combined._merge_known_related_objects(other) if not other.query.can_filter(): other = other.model._base_manager.filter(pk__in=other.values('pk')) combined.query.combine(other.query, sql.OR) return combined #################################### # METHODS THAT DO DATABASE QUERIES # #################################### def _iterator(self, use_chunked_fetch, chunk_size): yield from self._iterable_class(self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size) def iterator(self, chunk_size=2000): """ An iterator over the results from applying this QuerySet to the database. """ if chunk_size <= 0: raise ValueError('Chunk size must be strictly positive.') use_chunked_fetch = not connections[self.db].settings_dict.get('DISABLE_SERVER_SIDE_CURSORS') return self._iterator(use_chunked_fetch, chunk_size) def aggregate(self, *args, **kwargs): """ Return a dictionary containing the calculations (aggregation) over the current queryset. If args is present the expression is passed as a kwarg using the Aggregate object's default alias. """ if self.query.distinct_fields: raise NotImplementedError("aggregate() + distinct(fields) not implemented.") self._validate_values_are_expressions((*args, *kwargs.values()), method_name='aggregate') for arg in args: # The default_alias property raises TypeError if default_alias # can't be set automatically or AttributeError if it isn't an # attribute. try: arg.default_alias except (AttributeError, TypeError): raise TypeError("Complex aggregates require an alias") kwargs[arg.default_alias] = arg query = self.query.chain() for (alias, aggregate_expr) in kwargs.items(): query.add_annotation(aggregate_expr, alias, is_summary=True) annotation = query.annotations[alias] if not annotation.contains_aggregate: raise TypeError("%s is not an aggregate expression" % alias) for expr in annotation.get_source_expressions(): if expr.contains_aggregate and isinstance(expr, Ref) and expr.refs in kwargs: name = expr.refs raise exceptions.FieldError( "Cannot compute %s('%s'): '%s' is an aggregate" % (annotation.name, name, name) ) return query.get_aggregation(self.db, kwargs) def count(self): """ Perform a SELECT COUNT() and return the number of records as an integer. If the QuerySet is already fully cached, return the length of the cached results set to avoid multiple SELECT COUNT(*) calls. """ if self._result_cache is not None: return len(self._result_cache) return self.query.get_count(using=self.db) def get(self, *args, **kwargs): """ Perform the query and return a single object matching the given keyword arguments. """ if self.query.combinator and (args or kwargs): raise NotSupportedError( 'Calling QuerySet.get(...) with filters after %s() is not ' 'supported.' % self.query.combinator ) clone = self._chain() if self.query.combinator else self.filter(*args, **kwargs) if self.query.can_filter() and not self.query.distinct_fields: clone = clone.order_by() limit = None if not clone.query.select_for_update or connections[clone.db].features.supports_select_for_update_with_limit: limit = MAX_GET_RESULTS clone.query.set_limits(high=limit) num = len(clone) if num == 1: return clone._result_cache[0] if not num: raise self.model.DoesNotExist( "%s matching query does not exist." % self.model._meta.object_name ) raise self.model.MultipleObjectsReturned( 'get() returned more than one %s -- it returned %s!' % ( self.model._meta.object_name, num if not limit or num < limit else 'more than %s' % (limit - 1), ) ) def create(self, **kwargs): """ Create a new object with the given kwargs, saving it to the database and returning the created object. """ obj = self.model(**kwargs) self._for_write = True obj.save(force_insert=True, using=self.db) return obj def _prepare_for_bulk_create(self, objs): for obj in objs: if obj.pk is None: # Populate new PK values. obj.pk = obj._meta.pk.get_pk_value_on_save(obj) obj._prepare_related_fields_for_save(operation_name='bulk_create') def bulk_create(self, objs, batch_size=None, ignore_conflicts=False): """ Insert each of the instances into the database. Do *not* call save() on each of the instances, do not send any pre/post_save signals, and do not set the primary key attribute if it is an autoincrement field (except if features.can_return_rows_from_bulk_insert=True). Multi-table models are not supported. """ # When you bulk insert you don't get the primary keys back (if it's an # autoincrement, except if can_return_rows_from_bulk_insert=True), so # you can't insert into the child tables which references this. There # are two workarounds: # 1) This could be implemented if you didn't have an autoincrement pk # 2) You could do it by doing O(n) normal inserts into the parent # tables to get the primary keys back and then doing a single bulk # insert into the childmost table. # We currently set the primary keys on the objects when using # PostgreSQL via the RETURNING ID clause. It should be possible for # Oracle as well, but the semantics for extracting the primary keys is # trickier so it's not done yet. if batch_size is not None and batch_size <= 0: raise ValueError('Batch size must be a positive integer.') # Check that the parents share the same concrete model with the our # model to detect the inheritance pattern ConcreteGrandParent -> # MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy # would not identify that case as involving multiple tables. for parent in self.model._meta.get_parent_list(): if parent._meta.concrete_model is not self.model._meta.concrete_model: raise ValueError("Can't bulk create a multi-table inherited model") if not objs: return objs self._for_write = True connection = connections[self.db] opts = self.model._meta fields = opts.concrete_fields objs = list(objs) self._prepare_for_bulk_create(objs) with transaction.atomic(using=self.db, savepoint=False): objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs) if objs_with_pk: returned_columns = self._batched_insert( objs_with_pk, fields, batch_size, ignore_conflicts=ignore_conflicts, ) for obj_with_pk, results in zip(objs_with_pk, returned_columns): for result, field in zip(results, opts.db_returning_fields): if field != opts.pk: setattr(obj_with_pk, field.attname, result) for obj_with_pk in objs_with_pk: obj_with_pk._state.adding = False obj_with_pk._state.db = self.db if objs_without_pk: fields = [f for f in fields if not isinstance(f, AutoField)] returned_columns = self._batched_insert( objs_without_pk, fields, batch_size, ignore_conflicts=ignore_conflicts, ) if connection.features.can_return_rows_from_bulk_insert and not ignore_conflicts: assert len(returned_columns) == len(objs_without_pk) for obj_without_pk, results in zip(objs_without_pk, returned_columns): for result, field in zip(results, opts.db_returning_fields): setattr(obj_without_pk, field.attname, result) obj_without_pk._state.adding = False obj_without_pk._state.db = self.db return objs def bulk_update(self, objs, fields, batch_size=None): """ Update the given fields in each of the given objects in the database. """ if batch_size is not None and batch_size < 0: raise ValueError('Batch size must be a positive integer.') if not fields: raise ValueError('Field names must be given to bulk_update().') objs = tuple(objs) if any(obj.pk is None for obj in objs): raise ValueError('All bulk_update() objects must have a primary key set.') fields = [self.model._meta.get_field(name) for name in fields] if any(not f.concrete or f.many_to_many for f in fields): raise ValueError('bulk_update() can only be used with concrete fields.') if any(f.primary_key for f in fields): raise ValueError('bulk_update() cannot be used with primary key fields.') if not objs: return 0 # PK is used twice in the resulting update query, once in the filter # and once in the WHEN. Each field will also have one CAST. max_batch_size = connections[self.db].ops.bulk_batch_size(['pk', 'pk'] + fields, objs) batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size requires_casting = connections[self.db].features.requires_casted_case_in_updates batches = (objs[i:i + batch_size] for i in range(0, len(objs), batch_size)) updates = [] for batch_objs in batches: update_kwargs = {} for field in fields: when_statements = [] for obj in batch_objs: attr = getattr(obj, field.attname) if not isinstance(attr, Expression): attr = Value(attr, output_field=field) when_statements.append(When(pk=obj.pk, then=attr)) case_statement = Case(*when_statements, output_field=field) if requires_casting: case_statement = Cast(case_statement, output_field=field) update_kwargs[field.attname] = case_statement updates.append(([obj.pk for obj in batch_objs], update_kwargs)) rows_updated = 0 with transaction.atomic(using=self.db, savepoint=False): for pks, update_kwargs in updates: rows_updated += self.filter(pk__in=pks).update(**update_kwargs) return rows_updated bulk_update.alters_data = True def get_or_create(self, defaults=None, **kwargs): """ Look up an object with the given kwargs, creating one if necessary. Return a tuple of (object, created), where created is a boolean specifying whether an object was created. """ # The get() needs to be targeted at the write database in order # to avoid potential transaction consistency problems. self._for_write = True try: return self.get(**kwargs), False except self.model.DoesNotExist: params = self._extract_model_params(defaults, **kwargs) # Try to create an object using passed params. try: with transaction.atomic(using=self.db): params = dict(resolve_callables(params)) return self.create(**params), True except IntegrityError: try: return self.get(**kwargs), False except self.model.DoesNotExist: pass raise def update_or_create(self, defaults=None, **kwargs): """ Look up an object with the given kwargs, updating one with defaults if it exists, otherwise create a new one. Return a tuple (object, created), where created is a boolean specifying whether an object was created. """ defaults = defaults or {} self._for_write = True with transaction.atomic(using=self.db): # Lock the row so that a concurrent update is blocked until # update_or_create() has performed its save. obj, created = self.select_for_update().get_or_create(defaults, **kwargs) if created: return obj, created for k, v in resolve_callables(defaults): setattr(obj, k, v) obj.save(using=self.db) return obj, False def _extract_model_params(self, defaults, **kwargs): """ Prepare `params` for creating a model instance based on the given kwargs; for use by get_or_create(). """ defaults = defaults or {} params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k} params.update(defaults) property_names = self.model._meta._property_names invalid_params = [] for param in params: try: self.model._meta.get_field(param) except exceptions.FieldDoesNotExist: # It's okay to use a model's property if it has a setter. if not (param in property_names and getattr(self.model, param).fset): invalid_params.append(param) if invalid_params: raise exceptions.FieldError( "Invalid field name(s) for model %s: '%s'." % ( self.model._meta.object_name, "', '".join(sorted(invalid_params)), )) return params def _earliest(self, *fields): """ Return the earliest object according to fields (if given) or by the model's Meta.get_latest_by. """ if fields: order_by = fields else: order_by = getattr(self.model._meta, 'get_latest_by') if order_by and not isinstance(order_by, (tuple, list)): order_by = (order_by,) if order_by is None: raise ValueError( "earliest() and latest() require either fields as positional " "arguments or 'get_latest_by' in the model's Meta." ) obj = self._chain() obj.query.set_limits(high=1) obj.query.clear_ordering(force=True) obj.query.add_ordering(*order_by) return obj.get() def earliest(self, *fields): if self.query.is_sliced: raise TypeError('Cannot change a query once a slice has been taken.') return self._earliest(*fields) def latest(self, *fields): if self.query.is_sliced: raise TypeError('Cannot change a query once a slice has been taken.') return self.reverse()._earliest(*fields) def first(self): """Return the first object of a query or None if no match is found.""" for obj in (self if self.ordered else self.order_by('pk'))[:1]: return obj def last(self): """Return the last object of a query or None if no match is found.""" for obj in (self.reverse() if self.ordered else self.order_by('-pk'))[:1]: return obj def in_bulk(self, id_list=None, *, field_name='pk'): """ Return a dictionary mapping each of the given IDs to the object with that ID. If `id_list` isn't provided, evaluate the entire QuerySet. """ if self.query.is_sliced: raise TypeError("Cannot use 'limit' or 'offset' with in_bulk().") opts = self.model._meta unique_fields = [ constraint.fields[0] for constraint in opts.total_unique_constraints if len(constraint.fields) == 1 ] if ( field_name != 'pk' and not opts.get_field(field_name).unique and field_name not in unique_fields and self.query.distinct_fields != (field_name,) ): raise ValueError("in_bulk()'s field_name must be a unique field but %r isn't." % field_name) if id_list is not None: if not id_list: return {} filter_key = '{}__in'.format(field_name) batch_size = connections[self.db].features.max_query_params id_list = tuple(id_list) # If the database has a limit on the number of query parameters # (e.g. SQLite), retrieve objects in batches if necessary. if batch_size and batch_size < len(id_list): qs = () for offset in range(0, len(id_list), batch_size): batch = id_list[offset:offset + batch_size] qs += tuple(self.filter(**{filter_key: batch}).order_by()) else: qs = self.filter(**{filter_key: id_list}).order_by() else: qs = self._chain() return {getattr(obj, field_name): obj for obj in qs} def delete(self): """Delete the records in the current QuerySet.""" self._not_support_combined_queries('delete') if self.query.is_sliced: raise TypeError("Cannot use 'limit' or 'offset' with delete().") if self.query.distinct or self.query.distinct_fields: raise TypeError('Cannot call delete() after .distinct().') if self._fields is not None: raise TypeError("Cannot call delete() after .values() or .values_list()") del_query = self._chain() # The delete is actually 2 queries - one to find related objects, # and one to delete. Make sure that the discovery of related # objects is performed on the same database as the deletion. del_query._for_write = True # Disable non-supported fields. del_query.query.select_for_update = False del_query.query.select_related = False del_query.query.clear_ordering(force=True) collector = Collector(using=del_query.db) collector.collect(del_query) deleted, _rows_count = collector.delete() # Clear the result cache, in case this QuerySet gets reused. self._result_cache = None return deleted, _rows_count delete.alters_data = True delete.queryset_only = True def _raw_delete(self, using): """ Delete objects found from the given queryset in single direct SQL query. No signals are sent and there is no protection for cascades. """ query = self.query.clone() query.__class__ = sql.DeleteQuery cursor = query.get_compiler(using).execute_sql(CURSOR) if cursor: with cursor: return cursor.rowcount return 0 _raw_delete.alters_data = True def update(self, **kwargs): """ Update all elements in the current QuerySet, setting all the given fields to the appropriate values. """ self._not_support_combined_queries('update') if self.query.is_sliced: raise TypeError('Cannot update a query once a slice has been taken.') self._for_write = True query = self.query.chain(sql.UpdateQuery) query.add_update_values(kwargs) # Clear any annotations so that they won't be present in subqueries. query.annotations = {} with transaction.mark_for_rollback_on_error(using=self.db): rows = query.get_compiler(self.db).execute_sql(CURSOR) self._result_cache = None return rows update.alters_data = True def _update(self, values): """ A version of update() that accepts field objects instead of field names. Used primarily for model saving and not intended for use by general code (it requires too much poking around at model internals to be useful at that level). """ if self.query.is_sliced: raise TypeError('Cannot update a query once a slice has been taken.') query = self.query.chain(sql.UpdateQuery) query.add_update_fields(values) # Clear any annotations so that they won't be present in subqueries. query.annotations = {} self._result_cache = None return query.get_compiler(self.db).execute_sql(CURSOR) _update.alters_data = True _update.queryset_only = False def exists(self): if self._result_cache is None: return self.query.has_results(using=self.db) return bool(self._result_cache) def contains(self, obj): """Return True if the queryset contains an object.""" self._not_support_combined_queries('contains') if self._fields is not None: raise TypeError( 'Cannot call QuerySet.contains() after .values() or ' '.values_list().' ) try: if obj._meta.concrete_model != self.model._meta.concrete_model: return False except AttributeError: raise TypeError("'obj' must be a model instance.") if obj.pk is None: raise ValueError( 'QuerySet.contains() cannot be used on unsaved objects.' ) if self._result_cache is not None: return obj in self._result_cache return self.filter(pk=obj.pk).exists() def _prefetch_related_objects(self): # This method can only be called once the result cache has been filled. prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups) self._prefetch_done = True def explain(self, *, format=None, **options): return self.query.explain(using=self.db, format=format, **options) ################################################## # PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS # ################################################## def raw(self, raw_query, params=(), translations=None, using=None): if using is None: using = self.db qs = RawQuerySet(raw_query, model=self.model, params=params, translations=translations, using=using) qs._prefetch_related_lookups = self._prefetch_related_lookups[:] return qs def _values(self, *fields, **expressions): clone = self._chain() if expressions: clone = clone.annotate(**expressions) clone._fields = fields clone.query.set_values(fields) return clone def values(self, *fields, **expressions): fields += tuple(expressions) clone = self._values(*fields, **expressions) clone._iterable_class = ValuesIterable return clone def values_list(self, *fields, flat=False, named=False): if flat and named: raise TypeError("'flat' and 'named' can't be used together.") if flat and len(fields) > 1: raise TypeError("'flat' is not valid when values_list is called with more than one field.") field_names = {f for f in fields if not hasattr(f, 'resolve_expression')} _fields = [] expressions = {} counter = 1 for field in fields: if hasattr(field, 'resolve_expression'): field_id_prefix = getattr(field, 'default_alias', field.__class__.__name__.lower()) while True: field_id = field_id_prefix + str(counter) counter += 1 if field_id not in field_names: break expressions[field_id] = field _fields.append(field_id) else: _fields.append(field) clone = self._values(*_fields, **expressions) clone._iterable_class = ( NamedValuesListIterable if named else FlatValuesListIterable if flat else ValuesListIterable ) return clone def dates(self, field_name, kind, order='ASC'): """ Return a list of date objects representing all available dates for the given field_name, scoped to 'kind'. """ if kind not in ('year', 'month', 'week', 'day'): raise ValueError("'kind' must be one of 'year', 'month', 'week', or 'day'.") if order not in ('ASC', 'DESC'): raise ValueError("'order' must be either 'ASC' or 'DESC'.") return self.annotate( datefield=Trunc(field_name, kind, output_field=DateField()), plain_field=F(field_name) ).values_list( 'datefield', flat=True ).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield') def datetimes(self, field_name, kind, order='ASC', tzinfo=None, is_dst=None): """ Return a list of datetime objects representing all available datetimes for the given field_name, scoped to 'kind'. """ if kind not in ('year', 'month', 'week', 'day', 'hour', 'minute', 'second'): raise ValueError( "'kind' must be one of 'year', 'month', 'week', 'day', " "'hour', 'minute', or 'second'." ) if order not in ('ASC', 'DESC'): raise ValueError("'order' must be either 'ASC' or 'DESC'.") if settings.USE_TZ: if tzinfo is None: tzinfo = timezone.get_current_timezone() else: tzinfo = None return self.annotate( datetimefield=Trunc( field_name, kind, output_field=DateTimeField(), tzinfo=tzinfo, is_dst=is_dst, ), plain_field=F(field_name) ).values_list( 'datetimefield', flat=True ).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield') def none(self): """Return an empty QuerySet.""" clone = self._chain() clone.query.set_empty() return clone ################################################################## # PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET # ################################################################## def all(self): """ Return a new QuerySet that is a copy of the current one. This allows a QuerySet to proxy for a model manager in some cases. """ return self._chain() def filter(self, *args, **kwargs): """ Return a new QuerySet instance with the args ANDed to the existing set. """ self._not_support_combined_queries('filter') return self._filter_or_exclude(False, args, kwargs) def exclude(self, *args, **kwargs): """ Return a new QuerySet instance with NOT (args) ANDed to the existing set. """ self._not_support_combined_queries('exclude') return self._filter_or_exclude(True, args, kwargs) def _filter_or_exclude(self, negate, args, kwargs): if (args or kwargs) and self.query.is_sliced: raise TypeError('Cannot filter a query once a slice has been taken.') clone = self._chain() if self._defer_next_filter: self._defer_next_filter = False clone._deferred_filter = negate, args, kwargs else: clone._filter_or_exclude_inplace(negate, args, kwargs) return clone def _filter_or_exclude_inplace(self, negate, args, kwargs): if negate: self._query.add_q(~Q(*args, **kwargs)) else: self._query.add_q(Q(*args, **kwargs)) def complex_filter(self, filter_obj): """ Return a new QuerySet instance with filter_obj added to the filters. filter_obj can be a Q object or a dictionary of keyword lookup arguments. This exists to support framework features such as 'limit_choices_to', and usually it will be more natural to use other methods. """ if isinstance(filter_obj, Q): clone = self._chain() clone.query.add_q(filter_obj) return clone else: return self._filter_or_exclude(False, args=(), kwargs=filter_obj) def _combinator_query(self, combinator, *other_qs, all=False): # Clone the query to inherit the select list and everything clone = self._chain() # Clear limits and ordering so they can be reapplied clone.query.clear_ordering(force=True) clone.query.clear_limits() clone.query.combined_queries = (self.query,) + tuple(qs.query for qs in other_qs) clone.query.combinator = combinator clone.query.combinator_all = all return clone def union(self, *other_qs, all=False): # If the query is an EmptyQuerySet, combine all nonempty querysets. if isinstance(self, EmptyQuerySet): qs = [q for q in other_qs if not isinstance(q, EmptyQuerySet)] if not qs: return self if len(qs) == 1: return qs[0] return qs[0]._combinator_query('union', *qs[1:], all=all) return self._combinator_query('union', *other_qs, all=all) def intersection(self, *other_qs): # If any query is an EmptyQuerySet, return it. if isinstance(self, EmptyQuerySet): return self for other in other_qs: if isinstance(other, EmptyQuerySet): return other return self._combinator_query('intersection', *other_qs) def difference(self, *other_qs): # If the query is an EmptyQuerySet, return it. if isinstance(self, EmptyQuerySet): return self return self._combinator_query('difference', *other_qs) def select_for_update(self, nowait=False, skip_locked=False, of=(), no_key=False): """ Return a new QuerySet instance that will select objects with a FOR UPDATE lock. """ if nowait and skip_locked: raise ValueError('The nowait option cannot be used with skip_locked.') obj = self._chain() obj._for_write = True obj.query.select_for_update = True obj.query.select_for_update_nowait = nowait obj.query.select_for_update_skip_locked = skip_locked obj.query.select_for_update_of = of obj.query.select_for_no_key_update = no_key return obj def select_related(self, *fields): """ Return a new QuerySet instance that will select related objects. If fields are specified, they must be ForeignKey fields and only those related objects are included in the selection. If select_related(None) is called, clear the list. """ self._not_support_combined_queries('select_related') if self._fields is not None: raise TypeError("Cannot call select_related() after .values() or .values_list()") obj = self._chain() if fields == (None,): obj.query.select_related = False elif fields: obj.query.add_select_related(fields) else: obj.query.select_related = True return obj def prefetch_related(self, *lookups): """ Return a new QuerySet instance that will prefetch the specified Many-To-One and Many-To-Many related objects when the QuerySet is evaluated. When prefetch_related() is called more than once, append to the list of prefetch lookups. If prefetch_related(None) is called, clear the list. """ self._not_support_combined_queries('prefetch_related') clone = self._chain() if lookups == (None,): clone._prefetch_related_lookups = () else: for lookup in lookups: if isinstance(lookup, Prefetch): lookup = lookup.prefetch_to lookup = lookup.split(LOOKUP_SEP, 1)[0] if lookup in self.query._filtered_relations: raise ValueError('prefetch_related() is not supported with FilteredRelation.') clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups return clone def annotate(self, *args, **kwargs): """ Return a query set in which the returned objects have been annotated with extra data or aggregations. """ self._not_support_combined_queries('annotate') return self._annotate(args, kwargs, select=True) def alias(self, *args, **kwargs): """ Return a query set with added aliases for extra data or aggregations. """ self._not_support_combined_queries('alias') return self._annotate(args, kwargs, select=False) def _annotate(self, args, kwargs, select=True): self._validate_values_are_expressions(args + tuple(kwargs.values()), method_name='annotate') annotations = {} for arg in args: # The default_alias property may raise a TypeError. try: if arg.default_alias in kwargs: raise ValueError("The named annotation '%s' conflicts with the " "default name for another annotation." % arg.default_alias) except TypeError: raise TypeError("Complex annotations require an alias") annotations[arg.default_alias] = arg annotations.update(kwargs) clone = self._chain() names = self._fields if names is None: names = set(chain.from_iterable( (field.name, field.attname) if hasattr(field, 'attname') else (field.name,) for field in self.model._meta.get_fields() )) for alias, annotation in annotations.items(): if alias in names: raise ValueError("The annotation '%s' conflicts with a field on " "the model." % alias) if isinstance(annotation, FilteredRelation): clone.query.add_filtered_relation(annotation, alias) else: clone.query.add_annotation( annotation, alias, is_summary=False, select=select, ) for alias, annotation in clone.query.annotations.items(): if alias in annotations and annotation.contains_aggregate: if clone._fields is None: clone.query.group_by = True else: clone.query.set_group_by() break return clone def order_by(self, *field_names): """Return a new QuerySet instance with the ordering changed.""" if self.query.is_sliced: raise TypeError('Cannot reorder a query once a slice has been taken.') obj = self._chain() obj.query.clear_ordering(force=True, clear_default=False) obj.query.add_ordering(*field_names) return obj def distinct(self, *field_names): """ Return a new QuerySet instance that will select only distinct results. """ self._not_support_combined_queries('distinct') if self.query.is_sliced: raise TypeError('Cannot create distinct fields once a slice has been taken.') obj = self._chain() obj.query.add_distinct_fields(*field_names) return obj def extra(self, select=None, where=None, params=None, tables=None, order_by=None, select_params=None): """Add extra SQL fragments to the query.""" self._not_support_combined_queries('extra') if self.query.is_sliced: raise TypeError('Cannot change a query once a slice has been taken.') clone = self._chain() clone.query.add_extra(select, select_params, where, params, tables, order_by) return clone def reverse(self): """Reverse the ordering of the QuerySet.""" if self.query.is_sliced: raise TypeError('Cannot reverse a query once a slice has been taken.') clone = self._chain() clone.query.standard_ordering = not clone.query.standard_ordering return clone def defer(self, *fields): """ Defer the loading of data for certain fields until they are accessed. Add the set of deferred fields to any existing set of deferred fields. The only exception to this is if None is passed in as the only parameter, in which case removal all deferrals. """ self._not_support_combined_queries('defer') if self._fields is not None: raise TypeError("Cannot call defer() after .values() or .values_list()") clone = self._chain() if fields == (None,): clone.query.clear_deferred_loading() else: clone.query.add_deferred_loading(fields) return clone def only(self, *fields): """ Essentially, the opposite of defer(). Only the fields passed into this method and that are not already specified as deferred are loaded immediately when the queryset is evaluated. """ self._not_support_combined_queries('only') if self._fields is not None: raise TypeError("Cannot call only() after .values() or .values_list()") if fields == (None,): # Can only pass None to defer(), not only(), as the rest option. # That won't stop people trying to do this, so let's be explicit. raise TypeError("Cannot pass None as an argument to only().") for field in fields: field = field.split(LOOKUP_SEP, 1)[0] if field in self.query._filtered_relations: raise ValueError('only() is not supported with FilteredRelation.') clone = self._chain() clone.query.add_immediate_loading(fields) return clone def using(self, alias): """Select which database this QuerySet should execute against.""" clone = self._chain() clone._db = alias return clone ################################### # PUBLIC INTROSPECTION ATTRIBUTES # ################################### @property def ordered(self): """ Return True if the QuerySet is ordered -- i.e. has an order_by() clause or a default ordering on the model (or is empty). """ if isinstance(self, EmptyQuerySet): return True if self.query.extra_order_by or self.query.order_by: return True elif ( self.query.default_ordering and self.query.get_meta().ordering and # A default ordering doesn't affect GROUP BY queries. not self.query.group_by ): return True else: return False @property def db(self): """Return the database used if this query is executed now.""" if self._for_write: return self._db or router.db_for_write(self.model, **self._hints) return self._db or router.db_for_read(self.model, **self._hints) ################### # PRIVATE METHODS # ################### def _insert(self, objs, fields, returning_fields=None, raw=False, using=None, ignore_conflicts=False): """ Insert a new record for the given model. This provides an interface to the InsertQuery class and is how Model.save() is implemented. """ self._for_write = True if using is None: using = self.db query = sql.InsertQuery(self.model, ignore_conflicts=ignore_conflicts) query.insert_values(fields, objs, raw=raw) return query.get_compiler(using=using).execute_sql(returning_fields) _insert.alters_data = True _insert.queryset_only = False def _batched_insert(self, objs, fields, batch_size, ignore_conflicts=False): """ Helper method for bulk_create() to insert objs one batch at a time. """ if ignore_conflicts and not connections[self.db].features.supports_ignore_conflicts: raise NotSupportedError('This database backend does not support ignoring conflicts.') ops = connections[self.db].ops max_batch_size = max(ops.bulk_batch_size(fields, objs), 1) batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size inserted_rows = [] bulk_return = connections[self.db].features.can_return_rows_from_bulk_insert for item in [objs[i:i + batch_size] for i in range(0, len(objs), batch_size)]: if bulk_return and not ignore_conflicts: inserted_rows.extend(self._insert( item, fields=fields, using=self.db, returning_fields=self.model._meta.db_returning_fields, ignore_conflicts=ignore_conflicts, )) else: self._insert(item, fields=fields, using=self.db, ignore_conflicts=ignore_conflicts) return inserted_rows def _chain(self, **kwargs): """ Return a copy of the current QuerySet that's ready for another operation. """ obj = self._clone() if obj._sticky_filter: obj.query.filter_is_sticky = True obj._sticky_filter = False obj.__dict__.update(kwargs) return obj def _clone(self): """ Return a copy of the current QuerySet. A lightweight alternative to deepcopy(). """ c = self.__class__(model=self.model, query=self.query.chain(), using=self._db, hints=self._hints) c._sticky_filter = self._sticky_filter c._for_write = self._for_write c._prefetch_related_lookups = self._prefetch_related_lookups[:] c._known_related_objects = self._known_related_objects c._iterable_class = self._iterable_class c._fields = self._fields return c def _fetch_all(self): if self._result_cache is None: self._result_cache = list(self._iterable_class(self)) if self._prefetch_related_lookups and not self._prefetch_done: self._prefetch_related_objects() def _next_is_sticky(self): """ Indicate that the next filter call and the one following that should be treated as a single filter. This is only important when it comes to determining when to reuse tables for many-to-many filters. Required so that we can filter naturally on the results of related managers. This doesn't return a clone of the current QuerySet (it returns "self"). The method is only used internally and should be immediately followed by a filter() that does create a clone. """ self._sticky_filter = True return self def _merge_sanity_check(self, other): """Check that two QuerySet classes may be merged.""" if self._fields is not None and ( set(self.query.values_select) != set(other.query.values_select) or set(self.query.extra_select) != set(other.query.extra_select) or set(self.query.annotation_select) != set(other.query.annotation_select)): raise TypeError( "Merging '%s' classes must involve the same values in each case." % self.__class__.__name__ ) def _merge_known_related_objects(self, other): """ Keep track of all known related objects from either QuerySet instance. """ for field, objects in other._known_related_objects.items(): self._known_related_objects.setdefault(field, {}).update(objects) def resolve_expression(self, *args, **kwargs): if self._fields and len(self._fields) > 1: # values() queryset can only be used as nested queries # if they are set up to select only a single field. raise TypeError('Cannot use multi-field values as a filter value.') query = self.query.resolve_expression(*args, **kwargs) query._db = self._db return query resolve_expression.queryset_only = True def _add_hints(self, **hints): """ Update hinting information for use by routers. Add new key/values or overwrite existing key/values. """ self._hints.update(hints) def _has_filters(self): """ Check if this QuerySet has any filtering going on. This isn't equivalent with checking if all objects are present in results, for example, qs[1:]._has_filters() -> False. """ return self.query.has_filters() @staticmethod def _validate_values_are_expressions(values, method_name): invalid_args = sorted(str(arg) for arg in values if not hasattr(arg, 'resolve_expression')) if invalid_args: raise TypeError( 'QuerySet.%s() received non-expression(s): %s.' % ( method_name, ', '.join(invalid_args), ) ) def _not_support_combined_queries(self, operation_name): if self.query.combinator: raise NotSupportedError( 'Calling QuerySet.%s() after %s() is not supported.' % (operation_name, self.query.combinator) ) class InstanceCheckMeta(type): def __instancecheck__(self, instance): return isinstance(instance, QuerySet) and instance.query.is_empty() class EmptyQuerySet(metaclass=InstanceCheckMeta): """ Marker class to checking if a queryset is empty by .none(): isinstance(qs.none(), EmptyQuerySet) -> True """ def __init__(self, *args, **kwargs): raise TypeError("EmptyQuerySet can't be instantiated") class RawQuerySet: """ Provide an iterator which converts the results of raw SQL queries into annotated model instances. """ def __init__(self, raw_query, model=None, query=None, params=(), translations=None, using=None, hints=None): self.raw_query = raw_query self.model = model self._db = using self._hints = hints or {} self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params) self.params = params self.translations = translations or {} self._result_cache = None self._prefetch_related_lookups = () self._prefetch_done = False def resolve_model_init_order(self): """Resolve the init field names and value positions.""" converter = connections[self.db].introspection.identifier_converter model_init_fields = [f for f in self.model._meta.fields if converter(f.column) in self.columns] annotation_fields = [(column, pos) for pos, column in enumerate(self.columns) if column not in self.model_fields] model_init_order = [self.columns.index(converter(f.column)) for f in model_init_fields] model_init_names = [f.attname for f in model_init_fields] return model_init_names, model_init_order, annotation_fields def prefetch_related(self, *lookups): """Same as QuerySet.prefetch_related()""" clone = self._clone() if lookups == (None,): clone._prefetch_related_lookups = () else: clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups return clone def _prefetch_related_objects(self): prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups) self._prefetch_done = True def _clone(self): """Same as QuerySet._clone()""" c = self.__class__( self.raw_query, model=self.model, query=self.query, params=self.params, translations=self.translations, using=self._db, hints=self._hints ) c._prefetch_related_lookups = self._prefetch_related_lookups[:] return c def _fetch_all(self): if self._result_cache is None: self._result_cache = list(self.iterator()) if self._prefetch_related_lookups and not self._prefetch_done: self._prefetch_related_objects() def __len__(self): self._fetch_all() return len(self._result_cache) def __bool__(self): self._fetch_all() return bool(self._result_cache) def __iter__(self): self._fetch_all() return iter(self._result_cache) def iterator(self): # Cache some things for performance reasons outside the loop. db = self.db compiler = connections[db].ops.compiler('SQLCompiler')( self.query, connections[db], db ) query = iter(self.query) try: model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order() if self.model._meta.pk.attname not in model_init_names: raise exceptions.FieldDoesNotExist( 'Raw query must include the primary key' ) model_cls = self.model fields = [self.model_fields.get(c) for c in self.columns] converters = compiler.get_converters([ f.get_col(f.model._meta.db_table) if f else None for f in fields ]) if converters: query = compiler.apply_converters(query, converters) for values in query: # Associate fields to values model_init_values = [values[pos] for pos in model_init_pos] instance = model_cls.from_db(db, model_init_names, model_init_values) if annotation_fields: for column, pos in annotation_fields: setattr(instance, column, values[pos]) yield instance finally: # Done iterating the Query. If it has its own cursor, close it. if hasattr(self.query, 'cursor') and self.query.cursor: self.query.cursor.close() def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self.query) def __getitem__(self, k): return list(self)[k] @property def db(self): """Return the database used if this query is executed now.""" return self._db or router.db_for_read(self.model, **self._hints) def using(self, alias): """Select the database this RawQuerySet should execute against.""" return RawQuerySet( self.raw_query, model=self.model, query=self.query.chain(using=alias), params=self.params, translations=self.translations, using=alias, ) @cached_property def columns(self): """ A list of model field names in the order they'll appear in the query results. """ columns = self.query.get_columns() # Adjust any column names which don't match field names for (query_name, model_name) in self.translations.items(): # Ignore translations for nonexistent column names try: index = columns.index(query_name) except ValueError: pass else: columns[index] = model_name return columns @cached_property def model_fields(self): """A dict mapping column names to model field names.""" converter = connections[self.db].introspection.identifier_converter model_fields = {} for field in self.model._meta.fields: name, column = field.get_attname_column() model_fields[converter(column)] = field return model_fields class Prefetch: def __init__(self, lookup, queryset=None, to_attr=None): # `prefetch_through` is the path we traverse to perform the prefetch. self.prefetch_through = lookup # `prefetch_to` is the path to the attribute that stores the result. self.prefetch_to = lookup if queryset is not None and ( isinstance(queryset, RawQuerySet) or ( hasattr(queryset, '_iterable_class') and not issubclass(queryset._iterable_class, ModelIterable) ) ): raise ValueError( 'Prefetch querysets cannot use raw(), values(), and ' 'values_list().' ) if to_attr: self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr]) self.queryset = queryset self.to_attr = to_attr def __getstate__(self): obj_dict = self.__dict__.copy() if self.queryset is not None: # Prevent the QuerySet from being evaluated obj_dict['queryset'] = self.queryset._chain( _result_cache=[], _prefetch_done=True, ) return obj_dict def add_prefix(self, prefix): self.prefetch_through = prefix + LOOKUP_SEP + self.prefetch_through self.prefetch_to = prefix + LOOKUP_SEP + self.prefetch_to def get_current_prefetch_to(self, level): return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1]) def get_current_to_attr(self, level): parts = self.prefetch_to.split(LOOKUP_SEP) to_attr = parts[level] as_attr = self.to_attr and level == len(parts) - 1 return to_attr, as_attr def get_current_queryset(self, level): if self.get_current_prefetch_to(level) == self.prefetch_to: return self.queryset return None def __eq__(self, other): if not isinstance(other, Prefetch): return NotImplemented return self.prefetch_to == other.prefetch_to def __hash__(self): return hash((self.__class__, self.prefetch_to)) def normalize_prefetch_lookups(lookups, prefix=None): """Normalize lookups into Prefetch objects.""" ret = [] for lookup in lookups: if not isinstance(lookup, Prefetch): lookup = Prefetch(lookup) if prefix: lookup.add_prefix(prefix) ret.append(lookup) return ret def prefetch_related_objects(model_instances, *related_lookups): """ Populate prefetched object caches for a list of model instances based on the lookups/Prefetch instances given. """ if not model_instances: return # nothing to do # We need to be able to dynamically add to the list of prefetch_related # lookups that we look up (see below). So we need some book keeping to # ensure we don't do duplicate work. done_queries = {} # dictionary of things like 'foo__bar': [results] auto_lookups = set() # we add to this as we go through. followed_descriptors = set() # recursion protection all_lookups = normalize_prefetch_lookups(reversed(related_lookups)) while all_lookups: lookup = all_lookups.pop() if lookup.prefetch_to in done_queries: if lookup.queryset is not None: raise ValueError("'%s' lookup was already seen with a different queryset. " "You may need to adjust the ordering of your lookups." % lookup.prefetch_to) continue # Top level, the list of objects to decorate is the result cache # from the primary QuerySet. It won't be for deeper levels. obj_list = model_instances through_attrs = lookup.prefetch_through.split(LOOKUP_SEP) for level, through_attr in enumerate(through_attrs): # Prepare main instances if not obj_list: break prefetch_to = lookup.get_current_prefetch_to(level) if prefetch_to in done_queries: # Skip any prefetching, and any object preparation obj_list = done_queries[prefetch_to] continue # Prepare objects: good_objects = True for obj in obj_list: # Since prefetching can re-use instances, it is possible to have # the same instance multiple times in obj_list, so obj might # already be prepared. if not hasattr(obj, '_prefetched_objects_cache'): try: obj._prefetched_objects_cache = {} except (AttributeError, TypeError): # Must be an immutable object from # values_list(flat=True), for example (TypeError) or # a QuerySet subclass that isn't returning Model # instances (AttributeError), either in Django or a 3rd # party. prefetch_related() doesn't make sense, so quit. good_objects = False break if not good_objects: break # Descend down tree # We assume that objects retrieved are homogeneous (which is the premise # of prefetch_related), so what applies to first object applies to all. first_obj = obj_list[0] to_attr = lookup.get_current_to_attr(level)[0] prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr, to_attr) if not attr_found: raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid " "parameter to prefetch_related()" % (through_attr, first_obj.__class__.__name__, lookup.prefetch_through)) if level == len(through_attrs) - 1 and prefetcher is None: # Last one, this *must* resolve to something that supports # prefetching, otherwise there is no point adding it and the # developer asking for it has made a mistake. raise ValueError("'%s' does not resolve to an item that supports " "prefetching - this is an invalid parameter to " "prefetch_related()." % lookup.prefetch_through) obj_to_fetch = None if prefetcher is not None: obj_to_fetch = [obj for obj in obj_list if not is_fetched(obj)] if obj_to_fetch: obj_list, additional_lookups = prefetch_one_level( obj_to_fetch, prefetcher, lookup, level, ) # We need to ensure we don't keep adding lookups from the # same relationships to stop infinite recursion. So, if we # are already on an automatically added lookup, don't add # the new lookups from relationships we've seen already. if not (prefetch_to in done_queries and lookup in auto_lookups and descriptor in followed_descriptors): done_queries[prefetch_to] = obj_list new_lookups = normalize_prefetch_lookups(reversed(additional_lookups), prefetch_to) auto_lookups.update(new_lookups) all_lookups.extend(new_lookups) followed_descriptors.add(descriptor) else: # Either a singly related object that has already been fetched # (e.g. via select_related), or hopefully some other property # that doesn't support prefetching but needs to be traversed. # We replace the current list of parent objects with the list # of related objects, filtering out empty or missing values so # that we can continue with nullable or reverse relations. new_obj_list = [] for obj in obj_list: if through_attr in getattr(obj, '_prefetched_objects_cache', ()): # If related objects have been prefetched, use the # cache rather than the object's through_attr. new_obj = list(obj._prefetched_objects_cache.get(through_attr)) else: try: new_obj = getattr(obj, through_attr) except exceptions.ObjectDoesNotExist: continue if new_obj is None: continue # We special-case `list` rather than something more generic # like `Iterable` because we don't want to accidentally match # user models that define __iter__. if isinstance(new_obj, list): new_obj_list.extend(new_obj) else: new_obj_list.append(new_obj) obj_list = new_obj_list def get_prefetcher(instance, through_attr, to_attr): """ For the attribute 'through_attr' on the given instance, find an object that has a get_prefetch_queryset(). Return a 4 tuple containing: (the object with get_prefetch_queryset (or None), the descriptor object representing this relationship (or None), a boolean that is False if the attribute was not found at all, a function that takes an instance and returns a boolean that is True if the attribute has already been fetched for that instance) """ def has_to_attr_attribute(instance): return hasattr(instance, to_attr) prefetcher = None is_fetched = has_to_attr_attribute # For singly related objects, we have to avoid getting the attribute # from the object, as this will trigger the query. So we first try # on the class, in order to get the descriptor object. rel_obj_descriptor = getattr(instance.__class__, through_attr, None) if rel_obj_descriptor is None: attr_found = hasattr(instance, through_attr) else: attr_found = True if rel_obj_descriptor: # singly related object, descriptor object has the # get_prefetch_queryset() method. if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'): prefetcher = rel_obj_descriptor is_fetched = rel_obj_descriptor.is_cached else: # descriptor doesn't support prefetching, so we go ahead and get # the attribute on the instance rather than the class to # support many related managers rel_obj = getattr(instance, through_attr) if hasattr(rel_obj, 'get_prefetch_queryset'): prefetcher = rel_obj if through_attr != to_attr: # Special case cached_property instances because hasattr # triggers attribute computation and assignment. if isinstance(getattr(instance.__class__, to_attr, None), cached_property): def has_cached_property(instance): return to_attr in instance.__dict__ is_fetched = has_cached_property else: def in_prefetched_cache(instance): return through_attr in instance._prefetched_objects_cache is_fetched = in_prefetched_cache return prefetcher, rel_obj_descriptor, attr_found, is_fetched def prefetch_one_level(instances, prefetcher, lookup, level): """ Helper function for prefetch_related_objects(). Run prefetches on all instances using the prefetcher object, assigning results to relevant caches in instance. Return the prefetched objects along with any additional prefetches that must be done due to prefetch_related lookups found from default managers. """ # prefetcher must have a method get_prefetch_queryset() which takes a list # of instances, and returns a tuple: # (queryset of instances of self.model that are related to passed in instances, # callable that gets value to be matched for returned instances, # callable that gets value to be matched for passed in instances, # boolean that is True for singly related objects, # cache or field name to assign to, # boolean that is True when the previous argument is a cache name vs a field name). # The 'values to be matched' must be hashable as they will be used # in a dictionary. rel_qs, rel_obj_attr, instance_attr, single, cache_name, is_descriptor = ( prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level))) # We have to handle the possibility that the QuerySet we just got back # contains some prefetch_related lookups. We don't want to trigger the # prefetch_related functionality by evaluating the query. Rather, we need # to merge in the prefetch_related lookups. # Copy the lookups in case it is a Prefetch object which could be reused # later (happens in nested prefetch_related). additional_lookups = [ copy.copy(additional_lookup) for additional_lookup in getattr(rel_qs, '_prefetch_related_lookups', ()) ] if additional_lookups: # Don't need to clone because the manager should have given us a fresh # instance, so we access an internal instead of using public interface # for performance reasons. rel_qs._prefetch_related_lookups = () all_related_objects = list(rel_qs) rel_obj_cache = {} for rel_obj in all_related_objects: rel_attr_val = rel_obj_attr(rel_obj) rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj) to_attr, as_attr = lookup.get_current_to_attr(level) # Make sure `to_attr` does not conflict with a field. if as_attr and instances: # We assume that objects retrieved are homogeneous (which is the premise # of prefetch_related), so what applies to first object applies to all. model = instances[0].__class__ try: model._meta.get_field(to_attr) except exceptions.FieldDoesNotExist: pass else: msg = 'to_attr={} conflicts with a field on the {} model.' raise ValueError(msg.format(to_attr, model.__name__)) # Whether or not we're prefetching the last part of the lookup. leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level for obj in instances: instance_attr_val = instance_attr(obj) vals = rel_obj_cache.get(instance_attr_val, []) if single: val = vals[0] if vals else None if as_attr: # A to_attr has been given for the prefetch. setattr(obj, to_attr, val) elif is_descriptor: # cache_name points to a field name in obj. # This field is a descriptor for a related object. setattr(obj, cache_name, val) else: # No to_attr has been given for this prefetch operation and the # cache_name does not point to a descriptor. Store the value of # the field in the object's field cache. obj._state.fields_cache[cache_name] = val else: if as_attr: setattr(obj, to_attr, vals) else: manager = getattr(obj, to_attr) if leaf and lookup.queryset is not None: qs = manager._apply_rel_filters(lookup.queryset) else: qs = manager.get_queryset() qs._result_cache = vals # We don't want the individual qs doing prefetch_related now, # since we have merged this into the current work. qs._prefetch_done = True obj._prefetched_objects_cache[cache_name] = qs return all_related_objects, additional_lookups class RelatedPopulator: """ RelatedPopulator is used for select_related() object instantiation. The idea is that each select_related() model will be populated by a different RelatedPopulator instance. The RelatedPopulator instances get klass_info and select (computed in SQLCompiler) plus the used db as input for initialization. That data is used to compute which columns to use, how to instantiate the model, and how to populate the links between the objects. The actual creation of the objects is done in populate() method. This method gets row and from_obj as input and populates the select_related() model instance. """ def __init__(self, klass_info, select, db): self.db = db # Pre-compute needed attributes. The attributes are: # - model_cls: the possibly deferred model class to instantiate # - either: # - cols_start, cols_end: usually the columns in the row are # in the same order model_cls.__init__ expects them, so we # can instantiate by model_cls(*row[cols_start:cols_end]) # - reorder_for_init: When select_related descends to a child # class, then we want to reuse the already selected parent # data. However, in this case the parent data isn't necessarily # in the same order that Model.__init__ expects it to be, so # we have to reorder the parent data. The reorder_for_init # attribute contains a function used to reorder the field data # in the order __init__ expects it. # - pk_idx: the index of the primary key field in the reordered # model data. Used to check if a related object exists at all. # - init_list: the field attnames fetched from the database. For # deferred models this isn't the same as all attnames of the # model's fields. # - related_populators: a list of RelatedPopulator instances if # select_related() descends to related models from this model. # - local_setter, remote_setter: Methods to set cached values on # the object being populated and on the remote object. Usually # these are Field.set_cached_value() methods. select_fields = klass_info['select_fields'] from_parent = klass_info['from_parent'] if not from_parent: self.cols_start = select_fields[0] self.cols_end = select_fields[-1] + 1 self.init_list = [ f[0].target.attname for f in select[self.cols_start:self.cols_end] ] self.reorder_for_init = None else: attname_indexes = {select[idx][0].target.attname: idx for idx in select_fields} model_init_attnames = (f.attname for f in klass_info['model']._meta.concrete_fields) self.init_list = [attname for attname in model_init_attnames if attname in attname_indexes] self.reorder_for_init = operator.itemgetter(*[attname_indexes[attname] for attname in self.init_list]) self.model_cls = klass_info['model'] self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname) self.related_populators = get_related_populators(klass_info, select, self.db) self.local_setter = klass_info['local_setter'] self.remote_setter = klass_info['remote_setter'] def populate(self, row, from_obj): if self.reorder_for_init: obj_data = self.reorder_for_init(row) else: obj_data = row[self.cols_start:self.cols_end] if obj_data[self.pk_idx] is None: obj = None else: obj = self.model_cls.from_db(self.db, self.init_list, obj_data) for rel_iter in self.related_populators: rel_iter.populate(row, obj) self.local_setter(from_obj, obj) if obj is not None: self.remote_setter(obj, from_obj) def get_related_populators(klass_info, select, db): iterators = [] related_klass_infos = klass_info.get('related_klass_infos', []) for rel_klass_info in related_klass_infos: rel_cls = RelatedPopulator(rel_klass_info, select, db) iterators.append(rel_cls) return iterators
2a727a1b507676ea6f6a78e037c7a105aa61067627ecbbab0dca5f9dbe8d6ce3
import enum from types import DynamicClassAttribute from django.utils.functional import Promise __all__ = ['Choices', 'IntegerChoices', 'TextChoices'] class ChoicesMeta(enum.EnumMeta): """A metaclass for creating a enum choices.""" def __new__(metacls, classname, bases, classdict, **kwds): labels = [] for key in classdict._member_names: value = classdict[key] if ( isinstance(value, (list, tuple)) and len(value) > 1 and isinstance(value[-1], (Promise, str)) ): *value, label = value value = tuple(value) else: label = key.replace('_', ' ').title() labels.append(label) # Use dict.__setitem__() to suppress defenses against double # assignment in enum's classdict. dict.__setitem__(classdict, key, value) cls = super().__new__(metacls, classname, bases, classdict, **kwds) for member, label in zip(cls.__members__.values(), labels): member._label_ = label return enum.unique(cls) def __contains__(cls, member): if not isinstance(member, enum.Enum): # Allow non-enums to match against member values. return any(x.value == member for x in cls) return super().__contains__(member) @property def names(cls): empty = ['__empty__'] if hasattr(cls, '__empty__') else [] return empty + [member.name for member in cls] @property def choices(cls): empty = [(None, cls.__empty__)] if hasattr(cls, '__empty__') else [] return empty + [(member.value, member.label) for member in cls] @property def labels(cls): return [label for _, label in cls.choices] @property def values(cls): return [value for value, _ in cls.choices] class Choices(enum.Enum, metaclass=ChoicesMeta): """Class for creating enumerated choices.""" @DynamicClassAttribute def label(self): return self._label_ @property def do_not_call_in_templates(self): return True def __str__(self): """ Use value when cast to str, so that Choices set as model instance attributes are rendered as expected in templates and similar contexts. """ return str(self.value) # A similar format was proposed for Python 3.10. def __repr__(self): return f'{self.__class__.__qualname__}.{self._name_}' class IntegerChoices(int, Choices): """Class for creating enumerated integer choices.""" pass class TextChoices(str, Choices): """Class for creating enumerated string choices.""" def _generate_next_value_(name, start, count, last_values): return name
ca9548b655cb47b1aac737c0e78f570f38f43b2f1e7123a6d52cb7e013539a95
import copy import inspect import warnings from functools import partialmethod from itertools import chain import django from django.apps import apps from django.conf import settings from django.core import checks from django.core.exceptions import ( NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, MultipleObjectsReturned, ObjectDoesNotExist, ValidationError, ) from django.db import ( DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, DatabaseError, connection, connections, router, transaction, ) from django.db.models import ( NOT_PROVIDED, ExpressionWrapper, IntegerField, Max, Value, ) from django.db.models.constants import LOOKUP_SEP from django.db.models.constraints import CheckConstraint, UniqueConstraint from django.db.models.deletion import CASCADE, Collector from django.db.models.fields.related import ( ForeignObjectRel, OneToOneField, lazy_related_operation, resolve_relation, ) from django.db.models.functions import Coalesce from django.db.models.manager import Manager from django.db.models.options import Options from django.db.models.query import F, Q from django.db.models.signals import ( class_prepared, post_init, post_save, pre_init, pre_save, ) from django.db.models.utils import make_model_tuple from django.utils.encoding import force_str from django.utils.hashable import make_hashable from django.utils.text import capfirst, get_text_list from django.utils.translation import gettext_lazy as _ class Deferred: def __repr__(self): return '<Deferred field>' def __str__(self): return '<Deferred field>' DEFERRED = Deferred() def subclass_exception(name, bases, module, attached_to): """ Create exception subclass. Used by ModelBase below. The exception is created in a way that allows it to be pickled, assuming that the returned exception class will be added as an attribute to the 'attached_to' class. """ return type(name, bases, { '__module__': module, '__qualname__': '%s.%s' % (attached_to.__qualname__, name), }) def _has_contribute_to_class(value): # Only call contribute_to_class() if it's bound. return not inspect.isclass(value) and hasattr(value, 'contribute_to_class') class ModelBase(type): """Metaclass for all models.""" def __new__(cls, name, bases, attrs, **kwargs): super_new = super().__new__ # Also ensure initialization is only performed for subclasses of Model # (excluding Model class itself). parents = [b for b in bases if isinstance(b, ModelBase)] if not parents: return super_new(cls, name, bases, attrs) # Create the class. module = attrs.pop('__module__') new_attrs = {'__module__': module} classcell = attrs.pop('__classcell__', None) if classcell is not None: new_attrs['__classcell__'] = classcell attr_meta = attrs.pop('Meta', None) # Pass all attrs without a (Django-specific) contribute_to_class() # method to type.__new__() so that they're properly initialized # (i.e. __set_name__()). contributable_attrs = {} for obj_name, obj in attrs.items(): if _has_contribute_to_class(obj): contributable_attrs[obj_name] = obj else: new_attrs[obj_name] = obj new_class = super_new(cls, name, bases, new_attrs, **kwargs) abstract = getattr(attr_meta, 'abstract', False) meta = attr_meta or getattr(new_class, 'Meta', None) base_meta = getattr(new_class, '_meta', None) app_label = None # Look for an application configuration to attach the model to. app_config = apps.get_containing_app_config(module) if getattr(meta, 'app_label', None) is None: if app_config is None: if not abstract: raise RuntimeError( "Model class %s.%s doesn't declare an explicit " "app_label and isn't in an application in " "INSTALLED_APPS." % (module, name) ) else: app_label = app_config.label new_class.add_to_class('_meta', Options(meta, app_label)) if not abstract: new_class.add_to_class( 'DoesNotExist', subclass_exception( 'DoesNotExist', tuple( x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract ) or (ObjectDoesNotExist,), module, attached_to=new_class)) new_class.add_to_class( 'MultipleObjectsReturned', subclass_exception( 'MultipleObjectsReturned', tuple( x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract ) or (MultipleObjectsReturned,), module, attached_to=new_class)) if base_meta and not base_meta.abstract: # Non-abstract child classes inherit some attributes from their # non-abstract parent (unless an ABC comes before it in the # method resolution order). if not hasattr(meta, 'ordering'): new_class._meta.ordering = base_meta.ordering if not hasattr(meta, 'get_latest_by'): new_class._meta.get_latest_by = base_meta.get_latest_by is_proxy = new_class._meta.proxy # If the model is a proxy, ensure that the base class # hasn't been swapped out. if is_proxy and base_meta and base_meta.swapped: raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped)) # Add remaining attributes (those with a contribute_to_class() method) # to the class. for obj_name, obj in contributable_attrs.items(): new_class.add_to_class(obj_name, obj) # All the fields of any type declared on this model new_fields = chain( new_class._meta.local_fields, new_class._meta.local_many_to_many, new_class._meta.private_fields ) field_names = {f.name for f in new_fields} # Basic setup for proxy models. if is_proxy: base = None for parent in [kls for kls in parents if hasattr(kls, '_meta')]: if parent._meta.abstract: if parent._meta.fields: raise TypeError( "Abstract base class containing model fields not " "permitted for proxy model '%s'." % name ) else: continue if base is None: base = parent elif parent._meta.concrete_model is not base._meta.concrete_model: raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name) if base is None: raise TypeError("Proxy model '%s' has no non-abstract model base class." % name) new_class._meta.setup_proxy(base) new_class._meta.concrete_model = base._meta.concrete_model else: new_class._meta.concrete_model = new_class # Collect the parent links for multi-table inheritance. parent_links = {} for base in reversed([new_class] + parents): # Conceptually equivalent to `if base is Model`. if not hasattr(base, '_meta'): continue # Skip concrete parent classes. if base != new_class and not base._meta.abstract: continue # Locate OneToOneField instances. for field in base._meta.local_fields: if isinstance(field, OneToOneField) and field.remote_field.parent_link: related = resolve_relation(new_class, field.remote_field.model) parent_links[make_model_tuple(related)] = field # Track fields inherited from base models. inherited_attributes = set() # Do the appropriate setup for any model parents. for base in new_class.mro(): if base not in parents or not hasattr(base, '_meta'): # Things without _meta aren't functional models, so they're # uninteresting parents. inherited_attributes.update(base.__dict__) continue parent_fields = base._meta.local_fields + base._meta.local_many_to_many if not base._meta.abstract: # Check for clashes between locally declared fields and those # on the base classes. for field in parent_fields: if field.name in field_names: raise FieldError( 'Local field %r in class %r clashes with field of ' 'the same name from base class %r.' % ( field.name, name, base.__name__, ) ) else: inherited_attributes.add(field.name) # Concrete classes... base = base._meta.concrete_model base_key = make_model_tuple(base) if base_key in parent_links: field = parent_links[base_key] elif not is_proxy: attr_name = '%s_ptr' % base._meta.model_name field = OneToOneField( base, on_delete=CASCADE, name=attr_name, auto_created=True, parent_link=True, ) if attr_name in field_names: raise FieldError( "Auto-generated field '%s' in class %r for " "parent_link to base class %r clashes with " "declared field of the same name." % ( attr_name, name, base.__name__, ) ) # Only add the ptr field if it's not already present; # e.g. migrations will already have it specified if not hasattr(new_class, attr_name): new_class.add_to_class(attr_name, field) else: field = None new_class._meta.parents[base] = field else: base_parents = base._meta.parents.copy() # Add fields from abstract base class if it wasn't overridden. for field in parent_fields: if (field.name not in field_names and field.name not in new_class.__dict__ and field.name not in inherited_attributes): new_field = copy.deepcopy(field) new_class.add_to_class(field.name, new_field) # Replace parent links defined on this base by the new # field. It will be appropriately resolved if required. if field.one_to_one: for parent, parent_link in base_parents.items(): if field == parent_link: base_parents[parent] = new_field # Pass any non-abstract parent classes onto child. new_class._meta.parents.update(base_parents) # Inherit private fields (like GenericForeignKey) from the parent # class for field in base._meta.private_fields: if field.name in field_names: if not base._meta.abstract: raise FieldError( 'Local field %r in class %r clashes with field of ' 'the same name from base class %r.' % ( field.name, name, base.__name__, ) ) else: field = copy.deepcopy(field) if not base._meta.abstract: field.mti_inherited = True new_class.add_to_class(field.name, field) # Copy indexes so that index names are unique when models extend an # abstract model. new_class._meta.indexes = [copy.deepcopy(idx) for idx in new_class._meta.indexes] if abstract: # Abstract base models can't be instantiated and don't appear in # the list of models for an app. We do the final setup for them a # little differently from normal models. attr_meta.abstract = False new_class.Meta = attr_meta return new_class new_class._prepare() new_class._meta.apps.register_model(new_class._meta.app_label, new_class) return new_class def add_to_class(cls, name, value): if _has_contribute_to_class(value): value.contribute_to_class(cls, name) else: setattr(cls, name, value) def _prepare(cls): """Create some methods once self._meta has been populated.""" opts = cls._meta opts._prepare(cls) if opts.order_with_respect_to: cls.get_next_in_order = partialmethod(cls._get_next_or_previous_in_order, is_next=True) cls.get_previous_in_order = partialmethod(cls._get_next_or_previous_in_order, is_next=False) # Defer creating accessors on the foreign class until it has been # created and registered. If remote_field is None, we're ordering # with respect to a GenericForeignKey and don't know what the # foreign class is - we'll add those accessors later in # contribute_to_class(). if opts.order_with_respect_to.remote_field: wrt = opts.order_with_respect_to remote = wrt.remote_field.model lazy_related_operation(make_foreign_order_accessors, cls, remote) # Give the class a docstring -- its definition. if cls.__doc__ is None: cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join(f.name for f in opts.fields)) get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get(opts.label_lower) if get_absolute_url_override: setattr(cls, 'get_absolute_url', get_absolute_url_override) if not opts.managers: if any(f.name == 'objects' for f in opts.fields): raise ValueError( "Model %s must specify a custom Manager, because it has a " "field named 'objects'." % cls.__name__ ) manager = Manager() manager.auto_created = True cls.add_to_class('objects', manager) # Set the name of _meta.indexes. This can't be done in # Options.contribute_to_class() because fields haven't been added to # the model at that point. for index in cls._meta.indexes: if not index.name: index.set_name_with_model(cls) class_prepared.send(sender=cls) @property def _base_manager(cls): return cls._meta.base_manager @property def _default_manager(cls): return cls._meta.default_manager class ModelStateFieldsCacheDescriptor: def __get__(self, instance, cls=None): if instance is None: return self res = instance.fields_cache = {} return res class ModelState: """Store model instance state.""" db = None # If true, uniqueness validation checks will consider this a new, unsaved # object. Necessary for correct validation of new instances of objects with # explicit (non-auto) PKs. This impacts validation only; it has no effect # on the actual save. adding = True fields_cache = ModelStateFieldsCacheDescriptor() class Model(metaclass=ModelBase): def __init__(self, *args, **kwargs): # Alias some things as locals to avoid repeat global lookups cls = self.__class__ opts = self._meta _setattr = setattr _DEFERRED = DEFERRED if opts.abstract: raise TypeError('Abstract models cannot be instantiated.') pre_init.send(sender=cls, args=args, kwargs=kwargs) # Set up the storage for instance state self._state = ModelState() # There is a rather weird disparity here; if kwargs, it's set, then args # overrides it. It should be one or the other; don't duplicate the work # The reason for the kwargs check is that standard iterator passes in by # args, and instantiation for iteration is 33% faster. if len(args) > len(opts.concrete_fields): # Daft, but matches old exception sans the err msg. raise IndexError("Number of args exceeds number of fields") if not kwargs: fields_iter = iter(opts.concrete_fields) # The ordering of the zip calls matter - zip throws StopIteration # when an iter throws it. So if the first iter throws it, the second # is *not* consumed. We rely on this, so don't change the order # without changing the logic. for val, field in zip(args, fields_iter): if val is _DEFERRED: continue _setattr(self, field.attname, val) else: # Slower, kwargs-ready version. fields_iter = iter(opts.fields) for val, field in zip(args, fields_iter): if val is _DEFERRED: continue _setattr(self, field.attname, val) if kwargs.pop(field.name, NOT_PROVIDED) is not NOT_PROVIDED: raise TypeError( f"{cls.__qualname__}() got both positional and " f"keyword arguments for field '{field.name}'." ) # Now we're left with the unprocessed fields that *must* come from # keywords, or default. for field in fields_iter: is_related_object = False # Virtual field if field.attname not in kwargs and field.column is None: continue if kwargs: if isinstance(field.remote_field, ForeignObjectRel): try: # Assume object instance was passed in. rel_obj = kwargs.pop(field.name) is_related_object = True except KeyError: try: # Object instance wasn't passed in -- must be an ID. val = kwargs.pop(field.attname) except KeyError: val = field.get_default() else: try: val = kwargs.pop(field.attname) except KeyError: # This is done with an exception rather than the # default argument on pop because we don't want # get_default() to be evaluated, and then not used. # Refs #12057. val = field.get_default() else: val = field.get_default() if is_related_object: # If we are passed a related instance, set it using the # field.name instead of field.attname (e.g. "user" instead of # "user_id") so that the object gets properly cached (and type # checked) by the RelatedObjectDescriptor. if rel_obj is not _DEFERRED: _setattr(self, field.name, rel_obj) else: if val is not _DEFERRED: _setattr(self, field.attname, val) if kwargs: property_names = opts._property_names for prop in tuple(kwargs): try: # Any remaining kwargs must correspond to properties or # virtual fields. if prop in property_names or opts.get_field(prop): if kwargs[prop] is not _DEFERRED: _setattr(self, prop, kwargs[prop]) del kwargs[prop] except (AttributeError, FieldDoesNotExist): pass for kwarg in kwargs: raise TypeError("%s() got an unexpected keyword argument '%s'" % (cls.__name__, kwarg)) super().__init__() post_init.send(sender=cls, instance=self) @classmethod def from_db(cls, db, field_names, values): if len(values) != len(cls._meta.concrete_fields): values_iter = iter(values) values = [ next(values_iter) if f.attname in field_names else DEFERRED for f in cls._meta.concrete_fields ] new = cls(*values) new._state.adding = False new._state.db = db return new def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self) def __str__(self): return '%s object (%s)' % (self.__class__.__name__, self.pk) def __eq__(self, other): if not isinstance(other, Model): return NotImplemented if self._meta.concrete_model != other._meta.concrete_model: return False my_pk = self.pk if my_pk is None: return self is other return my_pk == other.pk def __hash__(self): if self.pk is None: raise TypeError("Model instances without primary key value are unhashable") return hash(self.pk) def __reduce__(self): data = self.__getstate__() data[DJANGO_VERSION_PICKLE_KEY] = django.__version__ class_id = self._meta.app_label, self._meta.object_name return model_unpickle, (class_id,), data def __getstate__(self): """Hook to allow choosing the attributes to pickle.""" state = self.__dict__.copy() state['_state'] = copy.copy(state['_state']) state['_state'].fields_cache = state['_state'].fields_cache.copy() return state def __setstate__(self, state): pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY) if pickled_version: if pickled_version != django.__version__: warnings.warn( "Pickled model instance's Django version %s does not " "match the current version %s." % (pickled_version, django.__version__), RuntimeWarning, stacklevel=2, ) else: warnings.warn( "Pickled model instance's Django version is not specified.", RuntimeWarning, stacklevel=2, ) self.__dict__.update(state) def _get_pk_val(self, meta=None): meta = meta or self._meta return getattr(self, meta.pk.attname) def _set_pk_val(self, value): for parent_link in self._meta.parents.values(): if parent_link and parent_link != self._meta.pk: setattr(self, parent_link.target_field.attname, value) return setattr(self, self._meta.pk.attname, value) pk = property(_get_pk_val, _set_pk_val) def get_deferred_fields(self): """ Return a set containing names of deferred fields for this instance. """ return { f.attname for f in self._meta.concrete_fields if f.attname not in self.__dict__ } def refresh_from_db(self, using=None, fields=None): """ Reload field values from the database. By default, the reloading happens from the database this instance was loaded from, or by the read router if this instance wasn't loaded from any database. The using parameter will override the default. Fields can be used to specify which fields to reload. The fields should be an iterable of field attnames. If fields is None, then all non-deferred fields are reloaded. When accessing deferred fields of an instance, the deferred loading of the field will call this method. """ if fields is None: self._prefetched_objects_cache = {} else: prefetched_objects_cache = getattr(self, '_prefetched_objects_cache', ()) for field in fields: if field in prefetched_objects_cache: del prefetched_objects_cache[field] fields.remove(field) if not fields: return if any(LOOKUP_SEP in f for f in fields): raise ValueError( 'Found "%s" in fields argument. Relations and transforms ' 'are not allowed in fields.' % LOOKUP_SEP) hints = {'instance': self} db_instance_qs = self.__class__._base_manager.db_manager(using, hints=hints).filter(pk=self.pk) # Use provided fields, if not set then reload all non-deferred fields. deferred_fields = self.get_deferred_fields() if fields is not None: fields = list(fields) db_instance_qs = db_instance_qs.only(*fields) elif deferred_fields: fields = [f.attname for f in self._meta.concrete_fields if f.attname not in deferred_fields] db_instance_qs = db_instance_qs.only(*fields) db_instance = db_instance_qs.get() non_loaded_fields = db_instance.get_deferred_fields() for field in self._meta.concrete_fields: if field.attname in non_loaded_fields: # This field wasn't refreshed - skip ahead. continue setattr(self, field.attname, getattr(db_instance, field.attname)) # Clear cached foreign keys. if field.is_relation and field.is_cached(self): field.delete_cached_value(self) # Clear cached relations. for field in self._meta.related_objects: if field.is_cached(self): field.delete_cached_value(self) self._state.db = db_instance._state.db def serializable_value(self, field_name): """ Return the value of the field name for this instance. If the field is a foreign key, return the id value instead of the object. If there's no Field object with this name on the model, return the model attribute's value. Used to serialize a field's value (in the serializer, or form output, for example). Normally, you would just access the attribute directly and not use this method. """ try: field = self._meta.get_field(field_name) except FieldDoesNotExist: return getattr(self, field_name) return getattr(self, field.attname) def save(self, force_insert=False, force_update=False, using=None, update_fields=None): """ Save the current instance. Override this in a subclass if you want to control the saving process. The 'force_insert' and 'force_update' parameters can be used to insist that the "save" must be an SQL insert or update (or equivalent for non-SQL backends), respectively. Normally, they should not be set. """ self._prepare_related_fields_for_save(operation_name='save') using = using or router.db_for_write(self.__class__, instance=self) if force_insert and (force_update or update_fields): raise ValueError("Cannot force both insert and updating in model saving.") deferred_fields = self.get_deferred_fields() if update_fields is not None: # If update_fields is empty, skip the save. We do also check for # no-op saves later on for inheritance cases. This bailout is # still needed for skipping signal sending. if not update_fields: return update_fields = frozenset(update_fields) field_names = set() for field in self._meta.concrete_fields: if not field.primary_key: field_names.add(field.name) if field.name != field.attname: field_names.add(field.attname) non_model_fields = update_fields.difference(field_names) if non_model_fields: raise ValueError( 'The following fields do not exist in this model, are m2m ' 'fields, or are non-concrete fields: %s' % ', '.join(non_model_fields) ) # If saving to the same database, and this model is deferred, then # automatically do an "update_fields" save on the loaded fields. elif not force_insert and deferred_fields and using == self._state.db: field_names = set() for field in self._meta.concrete_fields: if not field.primary_key and not hasattr(field, 'through'): field_names.add(field.attname) loaded_fields = field_names.difference(deferred_fields) if loaded_fields: update_fields = frozenset(loaded_fields) self.save_base(using=using, force_insert=force_insert, force_update=force_update, update_fields=update_fields) save.alters_data = True def save_base(self, raw=False, force_insert=False, force_update=False, using=None, update_fields=None): """ Handle the parts of saving which should be done only once per save, yet need to be done in raw saves, too. This includes some sanity checks and signal sending. The 'raw' argument is telling save_base not to save any parent models and not to do any changes to the values before save. This is used by fixture loading. """ using = using or router.db_for_write(self.__class__, instance=self) assert not (force_insert and (force_update or update_fields)) assert update_fields is None or update_fields cls = origin = self.__class__ # Skip proxies, but keep the origin as the proxy model. if cls._meta.proxy: cls = cls._meta.concrete_model meta = cls._meta if not meta.auto_created: pre_save.send( sender=origin, instance=self, raw=raw, using=using, update_fields=update_fields, ) # A transaction isn't needed if one query is issued. if meta.parents: context_manager = transaction.atomic(using=using, savepoint=False) else: context_manager = transaction.mark_for_rollback_on_error(using=using) with context_manager: parent_inserted = False if not raw: parent_inserted = self._save_parents(cls, using, update_fields) updated = self._save_table( raw, cls, force_insert or parent_inserted, force_update, using, update_fields, ) # Store the database on which the object was saved self._state.db = using # Once saved, this is no longer a to-be-added instance. self._state.adding = False # Signal that the save is complete if not meta.auto_created: post_save.send( sender=origin, instance=self, created=(not updated), update_fields=update_fields, raw=raw, using=using, ) save_base.alters_data = True def _save_parents(self, cls, using, update_fields): """Save all the parents of cls using values from self.""" meta = cls._meta inserted = False for parent, field in meta.parents.items(): # Make sure the link fields are synced between parent and self. if (field and getattr(self, parent._meta.pk.attname) is None and getattr(self, field.attname) is not None): setattr(self, parent._meta.pk.attname, getattr(self, field.attname)) parent_inserted = self._save_parents(cls=parent, using=using, update_fields=update_fields) updated = self._save_table( cls=parent, using=using, update_fields=update_fields, force_insert=parent_inserted, ) if not updated: inserted = True # Set the parent's PK value to self. if field: setattr(self, field.attname, self._get_pk_val(parent._meta)) # Since we didn't have an instance of the parent handy set # attname directly, bypassing the descriptor. Invalidate # the related object cache, in case it's been accidentally # populated. A fresh instance will be re-built from the # database if necessary. if field.is_cached(self): field.delete_cached_value(self) return inserted def _save_table(self, raw=False, cls=None, force_insert=False, force_update=False, using=None, update_fields=None): """ Do the heavy-lifting involved in saving. Update or insert the data for a single table. """ meta = cls._meta non_pks = [f for f in meta.local_concrete_fields if not f.primary_key] if update_fields: non_pks = [f for f in non_pks if f.name in update_fields or f.attname in update_fields] pk_val = self._get_pk_val(meta) if pk_val is None: pk_val = meta.pk.get_pk_value_on_save(self) setattr(self, meta.pk.attname, pk_val) pk_set = pk_val is not None if not pk_set and (force_update or update_fields): raise ValueError("Cannot force an update in save() with no primary key.") updated = False # Skip an UPDATE when adding an instance and primary key has a default. if ( not raw and not force_insert and self._state.adding and meta.pk.default and meta.pk.default is not NOT_PROVIDED ): force_insert = True # If possible, try an UPDATE. If that doesn't update anything, do an INSERT. if pk_set and not force_insert: base_qs = cls._base_manager.using(using) values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False))) for f in non_pks] forced_update = update_fields or force_update updated = self._do_update(base_qs, using, pk_val, values, update_fields, forced_update) if force_update and not updated: raise DatabaseError("Forced update did not affect any rows.") if update_fields and not updated: raise DatabaseError("Save with update_fields did not affect any rows.") if not updated: if meta.order_with_respect_to: # If this is a model with an order_with_respect_to # autopopulate the _order field field = meta.order_with_respect_to filter_args = field.get_filter_kwargs_for_object(self) self._order = cls._base_manager.using(using).filter(**filter_args).aggregate( _order__max=Coalesce( ExpressionWrapper(Max('_order') + Value(1), output_field=IntegerField()), Value(0), ), )['_order__max'] fields = meta.local_concrete_fields if not pk_set: fields = [f for f in fields if f is not meta.auto_field] returning_fields = meta.db_returning_fields results = self._do_insert(cls._base_manager, using, fields, returning_fields, raw) if results: for value, field in zip(results[0], returning_fields): setattr(self, field.attname, value) return updated def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update): """ Try to update the model. Return True if the model was updated (if an update query was done and a matching row was found in the DB). """ filtered = base_qs.filter(pk=pk_val) if not values: # We can end up here when saving a model in inheritance chain where # update_fields doesn't target any field in current model. In that # case we just say the update succeeded. Another case ending up here # is a model with just PK - in that case check that the PK still # exists. return update_fields is not None or filtered.exists() if self._meta.select_on_save and not forced_update: return ( filtered.exists() and # It may happen that the object is deleted from the DB right after # this check, causing the subsequent UPDATE to return zero matching # rows. The same result can occur in some rare cases when the # database returns zero despite the UPDATE being executed # successfully (a row is matched and updated). In order to # distinguish these two cases, the object's existence in the # database is again checked for if the UPDATE query returns 0. (filtered._update(values) > 0 or filtered.exists()) ) return filtered._update(values) > 0 def _do_insert(self, manager, using, fields, returning_fields, raw): """ Do an INSERT. If returning_fields is defined then this method should return the newly created data for the model. """ return manager._insert( [self], fields=fields, returning_fields=returning_fields, using=using, raw=raw, ) def _prepare_related_fields_for_save(self, operation_name): # Ensure that a model instance without a PK hasn't been assigned to # a ForeignKey or OneToOneField on this model. If the field is # nullable, allowing the save would result in silent data loss. for field in self._meta.concrete_fields: # If the related field isn't cached, then an instance hasn't been # assigned and there's no need to worry about this check. if field.is_relation and field.is_cached(self): obj = getattr(self, field.name, None) if not obj: continue # A pk may have been assigned manually to a model instance not # saved to the database (or auto-generated in a case like # UUIDField), but we allow the save to proceed and rely on the # database to raise an IntegrityError if applicable. If # constraints aren't supported by the database, there's the # unavoidable risk of data corruption. if obj.pk is None: # Remove the object from a related instance cache. if not field.remote_field.multiple: field.remote_field.delete_cached_value(obj) raise ValueError( "%s() prohibited to prevent data loss due to unsaved " "related object '%s'." % (operation_name, field.name) ) elif getattr(self, field.attname) in field.empty_values: # Use pk from related object if it has been saved after # an assignment. setattr(self, field.attname, obj.pk) # If the relationship's pk/to_field was changed, clear the # cached relationship. if getattr(obj, field.target_field.attname) != getattr(self, field.attname): field.delete_cached_value(self) def delete(self, using=None, keep_parents=False): if self.pk is None: raise ValueError( "%s object can't be deleted because its %s attribute is set " "to None." % (self._meta.object_name, self._meta.pk.attname) ) using = using or router.db_for_write(self.__class__, instance=self) collector = Collector(using=using) collector.collect([self], keep_parents=keep_parents) return collector.delete() delete.alters_data = True def _get_FIELD_display(self, field): value = getattr(self, field.attname) choices_dict = dict(make_hashable(field.flatchoices)) # force_str() to coerce lazy strings. return force_str(choices_dict.get(make_hashable(value), value), strings_only=True) def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs): if not self.pk: raise ValueError("get_next/get_previous cannot be used on unsaved objects.") op = 'gt' if is_next else 'lt' order = '' if is_next else '-' param = getattr(self, field.attname) q = Q((field.name, param), (f'pk__{op}', self.pk), _connector=Q.AND) q = Q(q, (f'{field.name}__{op}', param), _connector=Q.OR) qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by( '%s%s' % (order, field.name), '%spk' % order ) try: return qs[0] except IndexError: raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name) def _get_next_or_previous_in_order(self, is_next): cachename = "__%s_order_cache" % is_next if not hasattr(self, cachename): op = 'gt' if is_next else 'lt' order = '_order' if is_next else '-_order' order_field = self._meta.order_with_respect_to filter_args = order_field.get_filter_kwargs_for_object(self) obj = self.__class__._default_manager.filter(**filter_args).filter(**{ '_order__%s' % op: self.__class__._default_manager.values('_order').filter(**{ self._meta.pk.name: self.pk }) }).order_by(order)[:1].get() setattr(self, cachename, obj) return getattr(self, cachename) def prepare_database_save(self, field): if self.pk is None: raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self) return getattr(self, field.remote_field.get_related_field().attname) def clean(self): """ Hook for doing any extra model-wide validation after clean() has been called on every field by self.clean_fields. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field defined by NON_FIELD_ERRORS. """ pass def validate_unique(self, exclude=None): """ Check unique constraints on the model and raise ValidationError if any failed. """ unique_checks, date_checks = self._get_unique_checks(exclude=exclude) errors = self._perform_unique_checks(unique_checks) date_errors = self._perform_date_checks(date_checks) for k, v in date_errors.items(): errors.setdefault(k, []).extend(v) if errors: raise ValidationError(errors) def _get_unique_checks(self, exclude=None): """ Return a list of checks to perform. Since validate_unique() could be called from a ModelForm, some fields may have been excluded; we can't perform a unique check on a model that is missing fields involved in that check. Fields that did not validate should also be excluded, but they need to be passed in via the exclude argument. """ if exclude is None: exclude = [] unique_checks = [] unique_togethers = [(self.__class__, self._meta.unique_together)] constraints = [(self.__class__, self._meta.total_unique_constraints)] for parent_class in self._meta.get_parent_list(): if parent_class._meta.unique_together: unique_togethers.append((parent_class, parent_class._meta.unique_together)) if parent_class._meta.total_unique_constraints: constraints.append( (parent_class, parent_class._meta.total_unique_constraints) ) for model_class, unique_together in unique_togethers: for check in unique_together: if not any(name in exclude for name in check): # Add the check if the field isn't excluded. unique_checks.append((model_class, tuple(check))) for model_class, model_constraints in constraints: for constraint in model_constraints: if not any(name in exclude for name in constraint.fields): unique_checks.append((model_class, constraint.fields)) # These are checks for the unique_for_<date/year/month>. date_checks = [] # Gather a list of checks for fields declared as unique and add them to # the list of checks. fields_with_class = [(self.__class__, self._meta.local_fields)] for parent_class in self._meta.get_parent_list(): fields_with_class.append((parent_class, parent_class._meta.local_fields)) for model_class, fields in fields_with_class: for f in fields: name = f.name if name in exclude: continue if f.unique: unique_checks.append((model_class, (name,))) if f.unique_for_date and f.unique_for_date not in exclude: date_checks.append((model_class, 'date', name, f.unique_for_date)) if f.unique_for_year and f.unique_for_year not in exclude: date_checks.append((model_class, 'year', name, f.unique_for_year)) if f.unique_for_month and f.unique_for_month not in exclude: date_checks.append((model_class, 'month', name, f.unique_for_month)) return unique_checks, date_checks def _perform_unique_checks(self, unique_checks): errors = {} for model_class, unique_check in unique_checks: # Try to look up an existing object with the same values as this # object's values for all the unique field. lookup_kwargs = {} for field_name in unique_check: f = self._meta.get_field(field_name) lookup_value = getattr(self, f.attname) # TODO: Handle multiple backends with different feature flags. if (lookup_value is None or (lookup_value == '' and connection.features.interprets_empty_strings_as_nulls)): # no value, skip the lookup continue if f.primary_key and not self._state.adding: # no need to check for unique primary key when editing continue lookup_kwargs[str(field_name)] = lookup_value # some fields were skipped, no reason to do the check if len(unique_check) != len(lookup_kwargs): continue qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) # Note that we need to use the pk as defined by model_class, not # self.pk. These can be different fields because model inheritance # allows single model to have effectively multiple primary keys. # Refs #17615. model_class_pk = self._get_pk_val(model_class._meta) if not self._state.adding and model_class_pk is not None: qs = qs.exclude(pk=model_class_pk) if qs.exists(): if len(unique_check) == 1: key = unique_check[0] else: key = NON_FIELD_ERRORS errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check)) return errors def _perform_date_checks(self, date_checks): errors = {} for model_class, lookup_type, field, unique_for in date_checks: lookup_kwargs = {} # there's a ticket to add a date lookup, we can remove this special # case if that makes it's way in date = getattr(self, unique_for) if date is None: continue if lookup_type == 'date': lookup_kwargs['%s__day' % unique_for] = date.day lookup_kwargs['%s__month' % unique_for] = date.month lookup_kwargs['%s__year' % unique_for] = date.year else: lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type) lookup_kwargs[field] = getattr(self, field) qs = model_class._default_manager.filter(**lookup_kwargs) # Exclude the current object from the query if we are editing an # instance (as opposed to creating a new one) if not self._state.adding and self.pk is not None: qs = qs.exclude(pk=self.pk) if qs.exists(): errors.setdefault(field, []).append( self.date_error_message(lookup_type, field, unique_for) ) return errors def date_error_message(self, lookup_type, field_name, unique_for): opts = self._meta field = opts.get_field(field_name) return ValidationError( message=field.error_messages['unique_for_date'], code='unique_for_date', params={ 'model': self, 'model_name': capfirst(opts.verbose_name), 'lookup_type': lookup_type, 'field': field_name, 'field_label': capfirst(field.verbose_name), 'date_field': unique_for, 'date_field_label': capfirst(opts.get_field(unique_for).verbose_name), } ) def unique_error_message(self, model_class, unique_check): opts = model_class._meta params = { 'model': self, 'model_class': model_class, 'model_name': capfirst(opts.verbose_name), 'unique_check': unique_check, } # A unique field if len(unique_check) == 1: field = opts.get_field(unique_check[0]) params['field_label'] = capfirst(field.verbose_name) return ValidationError( message=field.error_messages['unique'], code='unique', params=params, ) # unique_together else: field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check] params['field_labels'] = get_text_list(field_labels, _('and')) return ValidationError( message=_("%(model_name)s with this %(field_labels)s already exists."), code='unique_together', params=params, ) def full_clean(self, exclude=None, validate_unique=True): """ Call clean_fields(), clean(), and validate_unique() on the model. Raise a ValidationError for any errors that occur. """ errors = {} if exclude is None: exclude = [] else: exclude = list(exclude) try: self.clean_fields(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) # Form.clean() is run even if other validation fails, so do the # same with Model.clean() for consistency. try: self.clean() except ValidationError as e: errors = e.update_error_dict(errors) # Run unique checks, but only for fields that passed validation. if validate_unique: for name in errors: if name != NON_FIELD_ERRORS and name not in exclude: exclude.append(name) try: self.validate_unique(exclude=exclude) except ValidationError as e: errors = e.update_error_dict(errors) if errors: raise ValidationError(errors) def clean_fields(self, exclude=None): """ Clean all fields and raise a ValidationError containing a dict of all validation errors if any occur. """ if exclude is None: exclude = [] errors = {} for f in self._meta.fields: if f.name in exclude: continue # Skip validation for empty fields with blank=True. The developer # is responsible for making sure they have a valid value. raw_value = getattr(self, f.attname) if f.blank and raw_value in f.empty_values: continue try: setattr(self, f.attname, f.clean(raw_value, self)) except ValidationError as e: errors[f.name] = e.error_list if errors: raise ValidationError(errors) @classmethod def check(cls, **kwargs): errors = [*cls._check_swappable(), *cls._check_model(), *cls._check_managers(**kwargs)] if not cls._meta.swapped: databases = kwargs.get('databases') or [] errors += [ *cls._check_fields(**kwargs), *cls._check_m2m_through_same_relationship(), *cls._check_long_column_names(databases), ] clash_errors = ( *cls._check_id_field(), *cls._check_field_name_clashes(), *cls._check_model_name_db_lookup_clashes(), *cls._check_property_name_related_field_accessor_clashes(), *cls._check_single_primary_key(), ) errors.extend(clash_errors) # If there are field name clashes, hide consequent column name # clashes. if not clash_errors: errors.extend(cls._check_column_name_clashes()) errors += [ *cls._check_index_together(), *cls._check_unique_together(), *cls._check_indexes(databases), *cls._check_ordering(), *cls._check_constraints(databases), *cls._check_default_pk(), ] return errors @classmethod def _check_default_pk(cls): if ( not cls._meta.abstract and cls._meta.pk.auto_created and # Inherited PKs are checked in parents models. not ( isinstance(cls._meta.pk, OneToOneField) and cls._meta.pk.remote_field.parent_link ) and not settings.is_overridden('DEFAULT_AUTO_FIELD') and cls._meta.app_config and not cls._meta.app_config._is_default_auto_field_overridden ): return [ checks.Warning( f"Auto-created primary key used when not defining a " f"primary key type, by default " f"'{settings.DEFAULT_AUTO_FIELD}'.", hint=( f"Configure the DEFAULT_AUTO_FIELD setting or the " f"{cls._meta.app_config.__class__.__qualname__}." f"default_auto_field attribute to point to a subclass " f"of AutoField, e.g. 'django.db.models.BigAutoField'." ), obj=cls, id='models.W042', ), ] return [] @classmethod def _check_swappable(cls): """Check if the swapped model exists.""" errors = [] if cls._meta.swapped: try: apps.get_model(cls._meta.swapped) except ValueError: errors.append( checks.Error( "'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable, id='models.E001', ) ) except LookupError: app_label, model_name = cls._meta.swapped.split('.') errors.append( checks.Error( "'%s' references '%s.%s', which has not been " "installed, or is abstract." % ( cls._meta.swappable, app_label, model_name ), id='models.E002', ) ) return errors @classmethod def _check_model(cls): errors = [] if cls._meta.proxy: if cls._meta.local_fields or cls._meta.local_many_to_many: errors.append( checks.Error( "Proxy model '%s' contains model fields." % cls.__name__, id='models.E017', ) ) return errors @classmethod def _check_managers(cls, **kwargs): """Perform all manager checks.""" errors = [] for manager in cls._meta.managers: errors.extend(manager.check(**kwargs)) return errors @classmethod def _check_fields(cls, **kwargs): """Perform all field checks.""" errors = [] for field in cls._meta.local_fields: errors.extend(field.check(**kwargs)) for field in cls._meta.local_many_to_many: errors.extend(field.check(from_model=cls, **kwargs)) return errors @classmethod def _check_m2m_through_same_relationship(cls): """ Check if no relationship model is used by more than one m2m field. """ errors = [] seen_intermediary_signatures = [] fields = cls._meta.local_many_to_many # Skip when the target model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase)) # Skip when the relationship model wasn't found. fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase)) for f in fields: signature = (f.remote_field.model, cls, f.remote_field.through, f.remote_field.through_fields) if signature in seen_intermediary_signatures: errors.append( checks.Error( "The model has two identical many-to-many relations " "through the intermediate model '%s'." % f.remote_field.through._meta.label, obj=cls, id='models.E003', ) ) else: seen_intermediary_signatures.append(signature) return errors @classmethod def _check_id_field(cls): """Check if `id` field is a primary key.""" fields = [f for f in cls._meta.local_fields if f.name == 'id' and f != cls._meta.pk] # fields is empty or consists of the invalid "id" field if fields and not fields[0].primary_key and cls._meta.pk.name == 'id': return [ checks.Error( "'id' can only be used as a field name if the field also " "sets 'primary_key=True'.", obj=cls, id='models.E004', ) ] else: return [] @classmethod def _check_field_name_clashes(cls): """Forbid field shadowing in multi-table inheritance.""" errors = [] used_fields = {} # name or attname -> field # Check that multi-inheritance doesn't cause field name shadowing. for parent in cls._meta.get_parent_list(): for f in parent._meta.local_fields: clash = used_fields.get(f.name) or used_fields.get(f.attname) or None if clash: errors.append( checks.Error( "The field '%s' from parent model " "'%s' clashes with the field '%s' " "from parent model '%s'." % ( clash.name, clash.model._meta, f.name, f.model._meta ), obj=cls, id='models.E005', ) ) used_fields[f.name] = f used_fields[f.attname] = f # Check that fields defined in the model don't clash with fields from # parents, including auto-generated fields like multi-table inheritance # child accessors. for parent in cls._meta.get_parent_list(): for f in parent._meta.get_fields(): if f not in used_fields: used_fields[f.name] = f for f in cls._meta.local_fields: clash = used_fields.get(f.name) or used_fields.get(f.attname) or None # Note that we may detect clash between user-defined non-unique # field "id" and automatically added unique field "id", both # defined at the same model. This special case is considered in # _check_id_field and here we ignore it. id_conflict = f.name == "id" and clash and clash.name == "id" and clash.model == cls if clash and not id_conflict: errors.append( checks.Error( "The field '%s' clashes with the field '%s' " "from model '%s'." % ( f.name, clash.name, clash.model._meta ), obj=f, id='models.E006', ) ) used_fields[f.name] = f used_fields[f.attname] = f return errors @classmethod def _check_column_name_clashes(cls): # Store a list of column names which have already been used by other fields. used_column_names = [] errors = [] for f in cls._meta.local_fields: _, column_name = f.get_attname_column() # Ensure the column name is not already in use. if column_name and column_name in used_column_names: errors.append( checks.Error( "Field '%s' has column name '%s' that is used by " "another field." % (f.name, column_name), hint="Specify a 'db_column' for the field.", obj=cls, id='models.E007' ) ) else: used_column_names.append(column_name) return errors @classmethod def _check_model_name_db_lookup_clashes(cls): errors = [] model_name = cls.__name__ if model_name.startswith('_') or model_name.endswith('_'): errors.append( checks.Error( "The model name '%s' cannot start or end with an underscore " "as it collides with the query lookup syntax." % model_name, obj=cls, id='models.E023' ) ) elif LOOKUP_SEP in model_name: errors.append( checks.Error( "The model name '%s' cannot contain double underscores as " "it collides with the query lookup syntax." % model_name, obj=cls, id='models.E024' ) ) return errors @classmethod def _check_property_name_related_field_accessor_clashes(cls): errors = [] property_names = cls._meta._property_names related_field_accessors = ( f.get_attname() for f in cls._meta._get_fields(reverse=False) if f.is_relation and f.related_model is not None ) for accessor in related_field_accessors: if accessor in property_names: errors.append( checks.Error( "The property '%s' clashes with a related field " "accessor." % accessor, obj=cls, id='models.E025', ) ) return errors @classmethod def _check_single_primary_key(cls): errors = [] if sum(1 for f in cls._meta.local_fields if f.primary_key) > 1: errors.append( checks.Error( "The model cannot have more than one field with " "'primary_key=True'.", obj=cls, id='models.E026', ) ) return errors @classmethod def _check_index_together(cls): """Check the value of "index_together" option.""" if not isinstance(cls._meta.index_together, (tuple, list)): return [ checks.Error( "'index_together' must be a list or tuple.", obj=cls, id='models.E008', ) ] elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.index_together): return [ checks.Error( "All 'index_together' elements must be lists or tuples.", obj=cls, id='models.E009', ) ] else: errors = [] for fields in cls._meta.index_together: errors.extend(cls._check_local_fields(fields, "index_together")) return errors @classmethod def _check_unique_together(cls): """Check the value of "unique_together" option.""" if not isinstance(cls._meta.unique_together, (tuple, list)): return [ checks.Error( "'unique_together' must be a list or tuple.", obj=cls, id='models.E010', ) ] elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.unique_together): return [ checks.Error( "All 'unique_together' elements must be lists or tuples.", obj=cls, id='models.E011', ) ] else: errors = [] for fields in cls._meta.unique_together: errors.extend(cls._check_local_fields(fields, "unique_together")) return errors @classmethod def _check_indexes(cls, databases): """Check fields, names, and conditions of indexes.""" errors = [] references = set() for index in cls._meta.indexes: # Index name can't start with an underscore or a number, restricted # for cross-database compatibility with Oracle. if index.name[0] == '_' or index.name[0].isdigit(): errors.append( checks.Error( "The index name '%s' cannot start with an underscore " "or a number." % index.name, obj=cls, id='models.E033', ), ) if len(index.name) > index.max_name_length: errors.append( checks.Error( "The index name '%s' cannot be longer than %d " "characters." % (index.name, index.max_name_length), obj=cls, id='models.E034', ), ) if index.contains_expressions: for expression in index.expressions: references.update( ref[0] for ref in cls._get_expr_references(expression) ) for db in databases: if not router.allow_migrate_model(db, cls): continue connection = connections[db] if not ( connection.features.supports_partial_indexes or 'supports_partial_indexes' in cls._meta.required_db_features ) and any(index.condition is not None for index in cls._meta.indexes): errors.append( checks.Warning( '%s does not support indexes with conditions.' % connection.display_name, hint=( "Conditions will be ignored. Silence this warning " "if you don't care about it." ), obj=cls, id='models.W037', ) ) if not ( connection.features.supports_covering_indexes or 'supports_covering_indexes' in cls._meta.required_db_features ) and any(index.include for index in cls._meta.indexes): errors.append( checks.Warning( '%s does not support indexes with non-key columns.' % connection.display_name, hint=( "Non-key columns will be ignored. Silence this " "warning if you don't care about it." ), obj=cls, id='models.W040', ) ) if not ( connection.features.supports_expression_indexes or 'supports_expression_indexes' in cls._meta.required_db_features ) and any(index.contains_expressions for index in cls._meta.indexes): errors.append( checks.Warning( '%s does not support indexes on expressions.' % connection.display_name, hint=( "An index won't be created. Silence this warning " "if you don't care about it." ), obj=cls, id='models.W043', ) ) fields = [field for index in cls._meta.indexes for field, _ in index.fields_orders] fields += [include for index in cls._meta.indexes for include in index.include] fields += references errors.extend(cls._check_local_fields(fields, 'indexes')) return errors @classmethod def _check_local_fields(cls, fields, option): from django.db import models # In order to avoid hitting the relation tree prematurely, we use our # own fields_map instead of using get_field() forward_fields_map = {} for field in cls._meta._get_fields(reverse=False): forward_fields_map[field.name] = field if hasattr(field, 'attname'): forward_fields_map[field.attname] = field errors = [] for field_name in fields: try: field = forward_fields_map[field_name] except KeyError: errors.append( checks.Error( "'%s' refers to the nonexistent field '%s'." % ( option, field_name, ), obj=cls, id='models.E012', ) ) else: if isinstance(field.remote_field, models.ManyToManyRel): errors.append( checks.Error( "'%s' refers to a ManyToManyField '%s', but " "ManyToManyFields are not permitted in '%s'." % ( option, field_name, option, ), obj=cls, id='models.E013', ) ) elif field not in cls._meta.local_fields: errors.append( checks.Error( "'%s' refers to field '%s' which is not local to model '%s'." % (option, field_name, cls._meta.object_name), hint="This issue may be caused by multi-table inheritance.", obj=cls, id='models.E016', ) ) return errors @classmethod def _check_ordering(cls): """ Check "ordering" option -- is it a list of strings and do all fields exist? """ if cls._meta._ordering_clash: return [ checks.Error( "'ordering' and 'order_with_respect_to' cannot be used together.", obj=cls, id='models.E021', ), ] if cls._meta.order_with_respect_to or not cls._meta.ordering: return [] if not isinstance(cls._meta.ordering, (list, tuple)): return [ checks.Error( "'ordering' must be a tuple or list (even if you want to order by only one field).", obj=cls, id='models.E014', ) ] errors = [] fields = cls._meta.ordering # Skip expressions and '?' fields. fields = (f for f in fields if isinstance(f, str) and f != '?') # Convert "-field" to "field". fields = ((f[1:] if f.startswith('-') else f) for f in fields) # Separate related fields and non-related fields. _fields = [] related_fields = [] for f in fields: if LOOKUP_SEP in f: related_fields.append(f) else: _fields.append(f) fields = _fields # Check related fields. for field in related_fields: _cls = cls fld = None for part in field.split(LOOKUP_SEP): try: # pk is an alias that won't be found by opts.get_field. if part == 'pk': fld = _cls._meta.pk else: fld = _cls._meta.get_field(part) if fld.is_relation: _cls = fld.get_path_info()[-1].to_opts.model else: _cls = None except (FieldDoesNotExist, AttributeError): if fld is None or ( fld.get_transform(part) is None and fld.get_lookup(part) is None ): errors.append( checks.Error( "'ordering' refers to the nonexistent field, " "related field, or lookup '%s'." % field, obj=cls, id='models.E015', ) ) # Skip ordering on pk. This is always a valid order_by field # but is an alias and therefore won't be found by opts.get_field. fields = {f for f in fields if f != 'pk'} # Check for invalid or nonexistent fields in ordering. invalid_fields = [] # Any field name that is not present in field_names does not exist. # Also, ordering by m2m fields is not allowed. opts = cls._meta valid_fields = set(chain.from_iterable( (f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),) for f in chain(opts.fields, opts.related_objects) )) invalid_fields.extend(fields - valid_fields) for invalid_field in invalid_fields: errors.append( checks.Error( "'ordering' refers to the nonexistent field, related " "field, or lookup '%s'." % invalid_field, obj=cls, id='models.E015', ) ) return errors @classmethod def _check_long_column_names(cls, databases): """ Check that any auto-generated column names are shorter than the limits for each database in which the model will be created. """ if not databases: return [] errors = [] allowed_len = None db_alias = None # Find the minimum max allowed length among all specified db_aliases. for db in databases: # skip databases where the model won't be created if not router.allow_migrate_model(db, cls): continue connection = connections[db] max_name_length = connection.ops.max_name_length() if max_name_length is None or connection.features.truncates_names: continue else: if allowed_len is None: allowed_len = max_name_length db_alias = db elif max_name_length < allowed_len: allowed_len = max_name_length db_alias = db if allowed_len is None: return errors for f in cls._meta.local_fields: _, column_name = f.get_attname_column() # Check if auto-generated name for the field is too long # for the database. if f.db_column is None and column_name is not None and len(column_name) > allowed_len: errors.append( checks.Error( 'Autogenerated column name too long for field "%s". ' 'Maximum length is "%s" for database "%s".' % (column_name, allowed_len, db_alias), hint="Set the column name manually using 'db_column'.", obj=cls, id='models.E018', ) ) for f in cls._meta.local_many_to_many: # Skip nonexistent models. if isinstance(f.remote_field.through, str): continue # Check if auto-generated name for the M2M field is too long # for the database. for m2m in f.remote_field.through._meta.local_fields: _, rel_name = m2m.get_attname_column() if m2m.db_column is None and rel_name is not None and len(rel_name) > allowed_len: errors.append( checks.Error( 'Autogenerated column name too long for M2M field ' '"%s". Maximum length is "%s" for database "%s".' % (rel_name, allowed_len, db_alias), hint=( "Use 'through' to create a separate model for " "M2M and then set column_name using 'db_column'." ), obj=cls, id='models.E019', ) ) return errors @classmethod def _get_expr_references(cls, expr): if isinstance(expr, Q): for child in expr.children: if isinstance(child, tuple): lookup, value = child yield tuple(lookup.split(LOOKUP_SEP)) yield from cls._get_expr_references(value) else: yield from cls._get_expr_references(child) elif isinstance(expr, F): yield tuple(expr.name.split(LOOKUP_SEP)) elif hasattr(expr, 'get_source_expressions'): for src_expr in expr.get_source_expressions(): yield from cls._get_expr_references(src_expr) @classmethod def _check_constraints(cls, databases): errors = [] for db in databases: if not router.allow_migrate_model(db, cls): continue connection = connections[db] if not ( connection.features.supports_table_check_constraints or 'supports_table_check_constraints' in cls._meta.required_db_features ) and any( isinstance(constraint, CheckConstraint) for constraint in cls._meta.constraints ): errors.append( checks.Warning( '%s does not support check constraints.' % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id='models.W027', ) ) if not ( connection.features.supports_partial_indexes or 'supports_partial_indexes' in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.condition is not None for constraint in cls._meta.constraints ): errors.append( checks.Warning( '%s does not support unique constraints with ' 'conditions.' % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id='models.W036', ) ) if not ( connection.features.supports_deferrable_unique_constraints or 'supports_deferrable_unique_constraints' in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.deferrable is not None for constraint in cls._meta.constraints ): errors.append( checks.Warning( '%s does not support deferrable unique constraints.' % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id='models.W038', ) ) if not ( connection.features.supports_covering_indexes or 'supports_covering_indexes' in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.include for constraint in cls._meta.constraints ): errors.append( checks.Warning( '%s does not support unique constraints with non-key ' 'columns.' % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id='models.W039', ) ) if not ( connection.features.supports_expression_indexes or 'supports_expression_indexes' in cls._meta.required_db_features ) and any( isinstance(constraint, UniqueConstraint) and constraint.contains_expressions for constraint in cls._meta.constraints ): errors.append( checks.Warning( '%s does not support unique constraints on ' 'expressions.' % connection.display_name, hint=( "A constraint won't be created. Silence this " "warning if you don't care about it." ), obj=cls, id='models.W044', ) ) fields = set(chain.from_iterable( (*constraint.fields, *constraint.include) for constraint in cls._meta.constraints if isinstance(constraint, UniqueConstraint) )) references = set() for constraint in cls._meta.constraints: if isinstance(constraint, UniqueConstraint): if ( connection.features.supports_partial_indexes or 'supports_partial_indexes' not in cls._meta.required_db_features ) and isinstance(constraint.condition, Q): references.update(cls._get_expr_references(constraint.condition)) if ( connection.features.supports_expression_indexes or 'supports_expression_indexes' not in cls._meta.required_db_features ) and constraint.contains_expressions: for expression in constraint.expressions: references.update(cls._get_expr_references(expression)) elif isinstance(constraint, CheckConstraint): if ( connection.features.supports_table_check_constraints or 'supports_table_check_constraints' not in cls._meta.required_db_features ) and isinstance(constraint.check, Q): references.update(cls._get_expr_references(constraint.check)) for field_name, *lookups in references: # pk is an alias that won't be found by opts.get_field. if field_name != 'pk': fields.add(field_name) if not lookups: # If it has no lookups it cannot result in a JOIN. continue try: if field_name == 'pk': field = cls._meta.pk else: field = cls._meta.get_field(field_name) if not field.is_relation or field.many_to_many or field.one_to_many: continue except FieldDoesNotExist: continue # JOIN must happen at the first lookup. first_lookup = lookups[0] if ( hasattr(field, 'get_transform') and hasattr(field, 'get_lookup') and field.get_transform(first_lookup) is None and field.get_lookup(first_lookup) is None ): errors.append( checks.Error( "'constraints' refers to the joined field '%s'." % LOOKUP_SEP.join([field_name] + lookups), obj=cls, id='models.E041', ) ) errors.extend(cls._check_local_fields(fields, 'constraints')) return errors ############################################ # HELPER FUNCTIONS (CURRIED MODEL METHODS) # ############################################ # ORDERING METHODS ######################### def method_set_order(self, ordered_obj, id_list, using=None): if using is None: using = DEFAULT_DB_ALIAS order_wrt = ordered_obj._meta.order_with_respect_to filter_args = order_wrt.get_forward_related_filter(self) ordered_obj.objects.db_manager(using).filter(**filter_args).bulk_update([ ordered_obj(pk=pk, _order=order) for order, pk in enumerate(id_list) ], ['_order']) def method_get_order(self, ordered_obj): order_wrt = ordered_obj._meta.order_with_respect_to filter_args = order_wrt.get_forward_related_filter(self) pk_name = ordered_obj._meta.pk.name return ordered_obj.objects.filter(**filter_args).values_list(pk_name, flat=True) def make_foreign_order_accessors(model, related_model): setattr( related_model, 'get_%s_order' % model.__name__.lower(), partialmethod(method_get_order, model) ) setattr( related_model, 'set_%s_order' % model.__name__.lower(), partialmethod(method_set_order, model) ) ######## # MISC # ######## def model_unpickle(model_id): """Used to unpickle Model subclasses with deferred fields.""" if isinstance(model_id, tuple): model = apps.get_model(*model_id) else: # Backwards compat - the model was cached directly in earlier versions. model = model_id return model.__new__(model) model_unpickle.__safe_for_unpickle__ = True
bf084e6c978795f169fe7054dcbddc2f63ec6083971fb8ceb4d56a4dfd2e3beb
import functools from collections import namedtuple def make_model_tuple(model): """ Take a model or a string of the form "app_label.ModelName" and return a corresponding ("app_label", "modelname") tuple. If a tuple is passed in, assume it's a valid model tuple already and return it unchanged. """ try: if isinstance(model, tuple): model_tuple = model elif isinstance(model, str): app_label, model_name = model.split(".") model_tuple = app_label, model_name.lower() else: model_tuple = model._meta.app_label, model._meta.model_name assert len(model_tuple) == 2 return model_tuple except (ValueError, AssertionError): raise ValueError( "Invalid model reference '%s'. String model references " "must be of the form 'app_label.ModelName'." % model ) def resolve_callables(mapping): """ Generate key/value pairs for the given mapping where the values are evaluated if they're callable. """ for k, v in mapping.items(): yield k, v() if callable(v) else v def unpickle_named_row(names, values): return create_namedtuple_class(*names)(*values) @functools.lru_cache() def create_namedtuple_class(*names): # Cache type() with @lru_cache() since it's too slow to be called for every # QuerySet evaluation. def __reduce__(self): return unpickle_named_row, (names, tuple(self)) return type( 'Row', (namedtuple('Row', names),), {'__reduce__': __reduce__, '__slots__': ()}, )
ef76721c50920bc994f64cae93435949f1a3ad4b05caaa69cd3e8a603a592c73
""" Various data structures used in query construction. Factored out from django.db.models.query to avoid making the main module very large and/or so that they can be used by other modules without getting into circular import difficulties. """ import copy import functools import inspect from collections import namedtuple from django.core.exceptions import FieldError from django.db.models.constants import LOOKUP_SEP from django.utils import tree # PathInfo is used when converting lookups (fk__somecol). The contents # describe the relation in Model terms (model Options and Fields for both # sides of the relation. The join_field is the field backing the relation. PathInfo = namedtuple('PathInfo', 'from_opts to_opts target_fields join_field m2m direct filtered_relation') def subclasses(cls): yield cls for subclass in cls.__subclasses__(): yield from subclasses(subclass) class Q(tree.Node): """ Encapsulate filters as objects that can then be combined logically (using `&` and `|`). """ # Connection types AND = 'AND' OR = 'OR' default = AND conditional = True def __init__(self, *args, _connector=None, _negated=False, **kwargs): super().__init__(children=[*args, *sorted(kwargs.items())], connector=_connector, negated=_negated) def _combine(self, other, conn): if not(isinstance(other, Q) or getattr(other, 'conditional', False) is True): raise TypeError(other) if not self: return other.copy() if hasattr(other, 'copy') else copy.copy(other) elif isinstance(other, Q) and not other: _, args, kwargs = self.deconstruct() return type(self)(*args, **kwargs) obj = type(self)() obj.connector = conn obj.add(self, conn) obj.add(other, conn) return obj def __or__(self, other): return self._combine(other, self.OR) def __and__(self, other): return self._combine(other, self.AND) def __invert__(self): obj = type(self)() obj.add(self, self.AND) obj.negate() return obj def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): # We must promote any new joins to left outer joins so that when Q is # used as an expression, rows aren't filtered due to joins. clause, joins = query._add_q( self, reuse, allow_joins=allow_joins, split_subq=False, check_filterable=False, ) query.promote_joins(joins) return clause def deconstruct(self): path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__) if path.startswith('django.db.models.query_utils'): path = path.replace('django.db.models.query_utils', 'django.db.models') args = tuple(self.children) kwargs = {} if self.connector != self.default: kwargs['_connector'] = self.connector if self.negated: kwargs['_negated'] = True return path, args, kwargs class DeferredAttribute: """ A wrapper for a deferred-loading field. When the value is read from this object the first time, the query is executed. """ def __init__(self, field): self.field = field def __get__(self, instance, cls=None): """ Retrieve and caches the value from the datastore on the first lookup. Return the cached value. """ if instance is None: return self data = instance.__dict__ field_name = self.field.attname if field_name not in data: # Let's see if the field is part of the parent chain. If so we # might be able to reuse the already loaded value. Refs #18343. val = self._check_parent_chain(instance) if val is None: instance.refresh_from_db(fields=[field_name]) else: data[field_name] = val return data[field_name] def _check_parent_chain(self, instance): """ Check if the field value can be fetched from a parent field already loaded in the instance. This can be done if the to-be fetched field is a primary key field. """ opts = instance._meta link_field = opts.get_ancestor_link(self.field.model) if self.field.primary_key and self.field != link_field: return getattr(instance, link_field.attname) return None class RegisterLookupMixin: @classmethod def _get_lookup(cls, lookup_name): return cls.get_lookups().get(lookup_name, None) @classmethod @functools.lru_cache(maxsize=None) def get_lookups(cls): class_lookups = [parent.__dict__.get('class_lookups', {}) for parent in inspect.getmro(cls)] return cls.merge_dicts(class_lookups) def get_lookup(self, lookup_name): from django.db.models.lookups import Lookup found = self._get_lookup(lookup_name) if found is None and hasattr(self, 'output_field'): return self.output_field.get_lookup(lookup_name) if found is not None and not issubclass(found, Lookup): return None return found def get_transform(self, lookup_name): from django.db.models.lookups import Transform found = self._get_lookup(lookup_name) if found is None and hasattr(self, 'output_field'): return self.output_field.get_transform(lookup_name) if found is not None and not issubclass(found, Transform): return None return found @staticmethod def merge_dicts(dicts): """ Merge dicts in reverse to preference the order of the original list. e.g., merge_dicts([a, b]) will preference the keys in 'a' over those in 'b'. """ merged = {} for d in reversed(dicts): merged.update(d) return merged @classmethod def _clear_cached_lookups(cls): for subclass in subclasses(cls): subclass.get_lookups.cache_clear() @classmethod def register_lookup(cls, lookup, lookup_name=None): if lookup_name is None: lookup_name = lookup.lookup_name if 'class_lookups' not in cls.__dict__: cls.class_lookups = {} cls.class_lookups[lookup_name] = lookup cls._clear_cached_lookups() return lookup @classmethod def _unregister_lookup(cls, lookup, lookup_name=None): """ Remove given lookup from cls lookups. For use in tests only as it's not thread-safe. """ if lookup_name is None: lookup_name = lookup.lookup_name del cls.class_lookups[lookup_name] def select_related_descend(field, restricted, requested, load_fields, reverse=False): """ Return True if this field should be used to descend deeper for select_related() purposes. Used by both the query construction code (sql.query.fill_related_selections()) and the model instance creation code (query.get_klass_info()). Arguments: * field - the field to be checked * restricted - a boolean field, indicating if the field list has been manually restricted using a requested clause) * requested - The select_related() dictionary. * load_fields - the set of fields to be loaded on this model * reverse - boolean, True if we are checking a reverse select related """ if not field.remote_field: return False if field.remote_field.parent_link and not reverse: return False if restricted: if reverse and field.related_query_name() not in requested: return False if not reverse and field.name not in requested: return False if not restricted and field.null: return False if load_fields: if field.attname not in load_fields: if restricted and field.name in requested: msg = ( 'Field %s.%s cannot be both deferred and traversed using ' 'select_related at the same time.' ) % (field.model._meta.object_name, field.name) raise FieldError(msg) return True def refs_expression(lookup_parts, annotations): """ Check if the lookup_parts contains references to the given annotations set. Because the LOOKUP_SEP is contained in the default annotation names, check each prefix of the lookup_parts for a match. """ for n in range(1, len(lookup_parts) + 1): level_n_lookup = LOOKUP_SEP.join(lookup_parts[0:n]) if level_n_lookup in annotations and annotations[level_n_lookup]: return annotations[level_n_lookup], lookup_parts[n:] return False, () def check_rel_lookup_compatibility(model, target_opts, field): """ Check that self.model is compatible with target_opts. Compatibility is OK if: 1) model and opts match (where proxy inheritance is removed) 2) model is parent of opts' model or the other way around """ def check(opts): return ( model._meta.concrete_model == opts.concrete_model or opts.concrete_model in model._meta.get_parent_list() or model in opts.get_parent_list() ) # If the field is a primary key, then doing a query against the field's # model is ok, too. Consider the case: # class Restaurant(models.Model): # place = OneToOneField(Place, primary_key=True): # Restaurant.objects.filter(pk__in=Restaurant.objects.all()). # If we didn't have the primary key check, then pk__in (== place__in) would # give Place's opts as the target opts, but Restaurant isn't compatible # with that. This logic applies only to primary keys, as when doing __in=qs, # we are going to turn this into __in=qs.values('pk') later on. return ( check(target_opts) or (getattr(field, 'primary_key', False) and check(field.model._meta)) ) class FilteredRelation: """Specify custom filtering in the ON clause of SQL joins.""" def __init__(self, relation_name, *, condition=Q()): if not relation_name: raise ValueError('relation_name cannot be empty.') self.relation_name = relation_name self.alias = None if not isinstance(condition, Q): raise ValueError('condition argument must be a Q() instance.') self.condition = condition self.path = [] def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return ( self.relation_name == other.relation_name and self.alias == other.alias and self.condition == other.condition ) def clone(self): clone = FilteredRelation(self.relation_name, condition=self.condition) clone.alias = self.alias clone.path = self.path[:] return clone def resolve_expression(self, *args, **kwargs): """ QuerySet.annotate() only accepts expression-like arguments (with a resolve_expression() method). """ raise NotImplementedError('FilteredRelation.resolve_expression() is unused.') def as_sql(self, compiler, connection): # Resolve the condition in Join.filtered_relation. query = compiler.query where = query.build_filtered_relation_q(self.condition, reuse=set(self.path)) return compiler.compile(where)
e77160b47c011fa1ba49f374af5d4215ebfd031fbc82bdb5aca4ca2dc1178d18
import copy import datetime import functools import inspect from decimal import Decimal from uuid import UUID from django.core.exceptions import EmptyResultSet, FieldError from django.db import DatabaseError, NotSupportedError, connection from django.db.models import fields from django.db.models.constants import LOOKUP_SEP from django.db.models.query_utils import Q from django.utils.deconstruct import deconstructible from django.utils.functional import cached_property from django.utils.hashable import make_hashable class SQLiteNumericMixin: """ Some expressions with output_field=DecimalField() must be cast to numeric to be properly filtered. """ def as_sqlite(self, compiler, connection, **extra_context): sql, params = self.as_sql(compiler, connection, **extra_context) try: if self.output_field.get_internal_type() == 'DecimalField': sql = 'CAST(%s AS NUMERIC)' % sql except FieldError: pass return sql, params class Combinable: """ Provide the ability to combine one or two objects with some connector. For example F('foo') + F('bar'). """ # Arithmetic connectors ADD = '+' SUB = '-' MUL = '*' DIV = '/' POW = '^' # The following is a quoted % operator - it is quoted because it can be # used in strings that also have parameter substitution. MOD = '%%' # Bitwise operators - note that these are generated by .bitand() # and .bitor(), the '&' and '|' are reserved for boolean operator # usage. BITAND = '&' BITOR = '|' BITLEFTSHIFT = '<<' BITRIGHTSHIFT = '>>' BITXOR = '#' def _combine(self, other, connector, reversed): if not hasattr(other, 'resolve_expression'): # everything must be resolvable to an expression other = Value(other) if reversed: return CombinedExpression(other, connector, self) return CombinedExpression(self, connector, other) ############# # OPERATORS # ############# def __neg__(self): return self._combine(-1, self.MUL, False) def __add__(self, other): return self._combine(other, self.ADD, False) def __sub__(self, other): return self._combine(other, self.SUB, False) def __mul__(self, other): return self._combine(other, self.MUL, False) def __truediv__(self, other): return self._combine(other, self.DIV, False) def __mod__(self, other): return self._combine(other, self.MOD, False) def __pow__(self, other): return self._combine(other, self.POW, False) def __and__(self, other): if getattr(self, 'conditional', False) and getattr(other, 'conditional', False): return Q(self) & Q(other) raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) def bitand(self, other): return self._combine(other, self.BITAND, False) def bitleftshift(self, other): return self._combine(other, self.BITLEFTSHIFT, False) def bitrightshift(self, other): return self._combine(other, self.BITRIGHTSHIFT, False) def bitxor(self, other): return self._combine(other, self.BITXOR, False) def __or__(self, other): if getattr(self, 'conditional', False) and getattr(other, 'conditional', False): return Q(self) | Q(other) raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) def bitor(self, other): return self._combine(other, self.BITOR, False) def __radd__(self, other): return self._combine(other, self.ADD, True) def __rsub__(self, other): return self._combine(other, self.SUB, True) def __rmul__(self, other): return self._combine(other, self.MUL, True) def __rtruediv__(self, other): return self._combine(other, self.DIV, True) def __rmod__(self, other): return self._combine(other, self.MOD, True) def __rpow__(self, other): return self._combine(other, self.POW, True) def __rand__(self, other): raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) def __ror__(self, other): raise NotImplementedError( "Use .bitand() and .bitor() for bitwise logical operations." ) class BaseExpression: """Base class for all query expressions.""" # aggregate specific fields is_summary = False _output_field_resolved_to_none = False empty_aggregate_value = NotImplemented # Can the expression be used in a WHERE clause? filterable = True # Can the expression can be used as a source expression in Window? window_compatible = False def __init__(self, output_field=None): if output_field is not None: self.output_field = output_field def __getstate__(self): state = self.__dict__.copy() state.pop('convert_value', None) return state def get_db_converters(self, connection): return ( [] if self.convert_value is self._convert_value_noop else [self.convert_value] ) + self.output_field.get_db_converters(connection) def get_source_expressions(self): return [] def set_source_expressions(self, exprs): assert not exprs def _parse_expressions(self, *expressions): return [ arg if hasattr(arg, 'resolve_expression') else ( F(arg) if isinstance(arg, str) else Value(arg) ) for arg in expressions ] def as_sql(self, compiler, connection): """ Responsible for returning a (sql, [params]) tuple to be included in the current query. Different backends can provide their own implementation, by providing an `as_{vendor}` method and patching the Expression: ``` def override_as_sql(self, compiler, connection): # custom logic return super().as_sql(compiler, connection) setattr(Expression, 'as_' + connection.vendor, override_as_sql) ``` Arguments: * compiler: the query compiler responsible for generating the query. Must have a compile method, returning a (sql, [params]) tuple. Calling compiler(value) will return a quoted `value`. * connection: the database connection used for the current query. Return: (sql, params) Where `sql` is a string containing ordered sql parameters to be replaced with the elements of the list `params`. """ raise NotImplementedError("Subclasses must implement as_sql()") @cached_property def contains_aggregate(self): return any(expr and expr.contains_aggregate for expr in self.get_source_expressions()) @cached_property def contains_over_clause(self): return any(expr and expr.contains_over_clause for expr in self.get_source_expressions()) @cached_property def contains_column_references(self): return any(expr and expr.contains_column_references for expr in self.get_source_expressions()) def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): """ Provide the chance to do any preprocessing or validation before being added to the query. Arguments: * query: the backend query implementation * allow_joins: boolean allowing or denying use of joins in this query * reuse: a set of reusable joins for multijoins * summarize: a terminal aggregate clause * for_save: whether this expression about to be used in a save or update Return: an Expression to be added to the query. """ c = self.copy() c.is_summary = summarize c.set_source_expressions([ expr.resolve_expression(query, allow_joins, reuse, summarize) if expr else None for expr in c.get_source_expressions() ]) return c @property def conditional(self): return isinstance(self.output_field, fields.BooleanField) @property def field(self): return self.output_field @cached_property def output_field(self): """Return the output type of this expressions.""" output_field = self._resolve_output_field() if output_field is None: self._output_field_resolved_to_none = True raise FieldError('Cannot resolve expression type, unknown output_field') return output_field @cached_property def _output_field_or_none(self): """ Return the output field of this expression, or None if _resolve_output_field() didn't return an output type. """ try: return self.output_field except FieldError: if not self._output_field_resolved_to_none: raise def _resolve_output_field(self): """ Attempt to infer the output type of the expression. If the output fields of all source fields match then, simply infer the same type here. This isn't always correct, but it makes sense most of the time. Consider the difference between `2 + 2` and `2 / 3`. Inferring the type here is a convenience for the common case. The user should supply their own output_field with more complex computations. If a source's output field resolves to None, exclude it from this check. If all sources are None, then an error is raised higher up the stack in the output_field property. """ sources_iter = (source for source in self.get_source_fields() if source is not None) for output_field in sources_iter: for source in sources_iter: if not isinstance(output_field, source.__class__): raise FieldError( 'Expression contains mixed types: %s, %s. You must ' 'set output_field.' % ( output_field.__class__.__name__, source.__class__.__name__, ) ) return output_field @staticmethod def _convert_value_noop(value, expression, connection): return value @cached_property def convert_value(self): """ Expressions provide their own converters because users have the option of manually specifying the output_field which may be a different type from the one the database returns. """ field = self.output_field internal_type = field.get_internal_type() if internal_type == 'FloatField': return lambda value, expression, connection: None if value is None else float(value) elif internal_type.endswith('IntegerField'): return lambda value, expression, connection: None if value is None else int(value) elif internal_type == 'DecimalField': return lambda value, expression, connection: None if value is None else Decimal(value) return self._convert_value_noop def get_lookup(self, lookup): return self.output_field.get_lookup(lookup) def get_transform(self, name): return self.output_field.get_transform(name) def relabeled_clone(self, change_map): clone = self.copy() clone.set_source_expressions([ e.relabeled_clone(change_map) if e is not None else None for e in self.get_source_expressions() ]) return clone def copy(self): return copy.copy(self) def get_group_by_cols(self, alias=None): if not self.contains_aggregate: return [self] cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def get_source_fields(self): """Return the underlying field types used by this aggregate.""" return [e._output_field_or_none for e in self.get_source_expressions()] def asc(self, **kwargs): return OrderBy(self, **kwargs) def desc(self, **kwargs): return OrderBy(self, descending=True, **kwargs) def reverse_ordering(self): return self def flatten(self): """ Recursively yield this expression and all subexpressions, in depth-first order. """ yield self for expr in self.get_source_expressions(): if expr: if hasattr(expr, 'flatten'): yield from expr.flatten() else: yield expr def select_format(self, compiler, sql, params): """ Custom format for select clauses. For example, EXISTS expressions need to be wrapped in CASE WHEN on Oracle. """ if hasattr(self.output_field, 'select_format'): return self.output_field.select_format(compiler, sql, params) return sql, params @deconstructible class Expression(BaseExpression, Combinable): """An expression that can be combined with other expressions.""" @cached_property def identity(self): constructor_signature = inspect.signature(self.__init__) args, kwargs = self._constructor_args signature = constructor_signature.bind_partial(*args, **kwargs) signature.apply_defaults() arguments = signature.arguments.items() identity = [self.__class__] for arg, value in arguments: if isinstance(value, fields.Field): if value.name and value.model: value = (value.model._meta.label, value.name) else: value = type(value) else: value = make_hashable(value) identity.append((arg, value)) return tuple(identity) def __eq__(self, other): if not isinstance(other, Expression): return NotImplemented return other.identity == self.identity def __hash__(self): return hash(self.identity) _connector_combinators = { connector: [ (fields.IntegerField, fields.IntegerField, fields.IntegerField), (fields.IntegerField, fields.DecimalField, fields.DecimalField), (fields.DecimalField, fields.IntegerField, fields.DecimalField), (fields.IntegerField, fields.FloatField, fields.FloatField), (fields.FloatField, fields.IntegerField, fields.FloatField), ] for connector in (Combinable.ADD, Combinable.SUB, Combinable.MUL, Combinable.DIV) } @functools.lru_cache(maxsize=128) def _resolve_combined_type(connector, lhs_type, rhs_type): combinators = _connector_combinators.get(connector, ()) for combinator_lhs_type, combinator_rhs_type, combined_type in combinators: if issubclass(lhs_type, combinator_lhs_type) and issubclass(rhs_type, combinator_rhs_type): return combined_type class CombinedExpression(SQLiteNumericMixin, Expression): def __init__(self, lhs, connector, rhs, output_field=None): super().__init__(output_field=output_field) self.connector = connector self.lhs = lhs self.rhs = rhs def __repr__(self): return "<{}: {}>".format(self.__class__.__name__, self) def __str__(self): return "{} {} {}".format(self.lhs, self.connector, self.rhs) def get_source_expressions(self): return [self.lhs, self.rhs] def set_source_expressions(self, exprs): self.lhs, self.rhs = exprs def _resolve_output_field(self): try: return super()._resolve_output_field() except FieldError: combined_type = _resolve_combined_type( self.connector, type(self.lhs.output_field), type(self.rhs.output_field), ) if combined_type is None: raise return combined_type() def as_sql(self, compiler, connection): expressions = [] expression_params = [] sql, params = compiler.compile(self.lhs) expressions.append(sql) expression_params.extend(params) sql, params = compiler.compile(self.rhs) expressions.append(sql) expression_params.extend(params) # order of precedence expression_wrapper = '(%s)' sql = connection.ops.combine_expression(self.connector, expressions) return expression_wrapper % sql, expression_params def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): lhs = self.lhs.resolve_expression(query, allow_joins, reuse, summarize, for_save) rhs = self.rhs.resolve_expression(query, allow_joins, reuse, summarize, for_save) if not isinstance(self, (DurationExpression, TemporalSubtraction)): try: lhs_type = lhs.output_field.get_internal_type() except (AttributeError, FieldError): lhs_type = None try: rhs_type = rhs.output_field.get_internal_type() except (AttributeError, FieldError): rhs_type = None if 'DurationField' in {lhs_type, rhs_type} and lhs_type != rhs_type: return DurationExpression(self.lhs, self.connector, self.rhs).resolve_expression( query, allow_joins, reuse, summarize, for_save, ) datetime_fields = {'DateField', 'DateTimeField', 'TimeField'} if self.connector == self.SUB and lhs_type in datetime_fields and lhs_type == rhs_type: return TemporalSubtraction(self.lhs, self.rhs).resolve_expression( query, allow_joins, reuse, summarize, for_save, ) c = self.copy() c.is_summary = summarize c.lhs = lhs c.rhs = rhs return c class DurationExpression(CombinedExpression): def compile(self, side, compiler, connection): try: output = side.output_field except FieldError: pass else: if output.get_internal_type() == 'DurationField': sql, params = compiler.compile(side) return connection.ops.format_for_duration_arithmetic(sql), params return compiler.compile(side) def as_sql(self, compiler, connection): if connection.features.has_native_duration_field: return super().as_sql(compiler, connection) connection.ops.check_expression_support(self) expressions = [] expression_params = [] sql, params = self.compile(self.lhs, compiler, connection) expressions.append(sql) expression_params.extend(params) sql, params = self.compile(self.rhs, compiler, connection) expressions.append(sql) expression_params.extend(params) # order of precedence expression_wrapper = '(%s)' sql = connection.ops.combine_duration_expression(self.connector, expressions) return expression_wrapper % sql, expression_params def as_sqlite(self, compiler, connection, **extra_context): sql, params = self.as_sql(compiler, connection, **extra_context) if self.connector in {Combinable.MUL, Combinable.DIV}: try: lhs_type = self.lhs.output_field.get_internal_type() rhs_type = self.rhs.output_field.get_internal_type() except (AttributeError, FieldError): pass else: allowed_fields = { 'DecimalField', 'DurationField', 'FloatField', 'IntegerField', } if lhs_type not in allowed_fields or rhs_type not in allowed_fields: raise DatabaseError( f'Invalid arguments for operator {self.connector}.' ) return sql, params class TemporalSubtraction(CombinedExpression): output_field = fields.DurationField() def __init__(self, lhs, rhs): super().__init__(lhs, self.SUB, rhs) def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) lhs = compiler.compile(self.lhs) rhs = compiler.compile(self.rhs) return connection.ops.subtract_temporals(self.lhs.output_field.get_internal_type(), lhs, rhs) @deconstructible class F(Combinable): """An object capable of resolving references to existing query objects.""" def __init__(self, name): """ Arguments: * name: the name of the field this expression references """ self.name = name def __repr__(self): return "{}({})".format(self.__class__.__name__, self.name) def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): return query.resolve_ref(self.name, allow_joins, reuse, summarize) def asc(self, **kwargs): return OrderBy(self, **kwargs) def desc(self, **kwargs): return OrderBy(self, descending=True, **kwargs) def __eq__(self, other): return self.__class__ == other.__class__ and self.name == other.name def __hash__(self): return hash(self.name) class ResolvedOuterRef(F): """ An object that contains a reference to an outer query. In this case, the reference to the outer query has been resolved because the inner query has been used as a subquery. """ contains_aggregate = False def as_sql(self, *args, **kwargs): raise ValueError( 'This queryset contains a reference to an outer query and may ' 'only be used in a subquery.' ) def resolve_expression(self, *args, **kwargs): col = super().resolve_expression(*args, **kwargs) # FIXME: Rename possibly_multivalued to multivalued and fix detection # for non-multivalued JOINs (e.g. foreign key fields). This should take # into account only many-to-many and one-to-many relationships. col.possibly_multivalued = LOOKUP_SEP in self.name return col def relabeled_clone(self, relabels): return self def get_group_by_cols(self, alias=None): return [] class OuterRef(F): contains_aggregate = False def resolve_expression(self, *args, **kwargs): if isinstance(self.name, self.__class__): return self.name return ResolvedOuterRef(self.name) def relabeled_clone(self, relabels): return self class Func(SQLiteNumericMixin, Expression): """An SQL function call.""" function = None template = '%(function)s(%(expressions)s)' arg_joiner = ', ' arity = None # The number of arguments the function accepts. def __init__(self, *expressions, output_field=None, **extra): if self.arity is not None and len(expressions) != self.arity: raise TypeError( "'%s' takes exactly %s %s (%s given)" % ( self.__class__.__name__, self.arity, "argument" if self.arity == 1 else "arguments", len(expressions), ) ) super().__init__(output_field=output_field) self.source_expressions = self._parse_expressions(*expressions) self.extra = extra def __repr__(self): args = self.arg_joiner.join(str(arg) for arg in self.source_expressions) extra = {**self.extra, **self._get_repr_options()} if extra: extra = ', '.join(str(key) + '=' + str(val) for key, val in sorted(extra.items())) return "{}({}, {})".format(self.__class__.__name__, args, extra) return "{}({})".format(self.__class__.__name__, args) def _get_repr_options(self): """Return a dict of extra __init__() options to include in the repr.""" return {} def get_source_expressions(self): return self.source_expressions def set_source_expressions(self, exprs): self.source_expressions = exprs def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize for pos, arg in enumerate(c.source_expressions): c.source_expressions[pos] = arg.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c def as_sql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context): connection.ops.check_expression_support(self) sql_parts = [] params = [] for arg in self.source_expressions: arg_sql, arg_params = compiler.compile(arg) sql_parts.append(arg_sql) params.extend(arg_params) data = {**self.extra, **extra_context} # Use the first supplied value in this order: the parameter to this # method, a value supplied in __init__()'s **extra (the value in # `data`), or the value defined on the class. if function is not None: data['function'] = function else: data.setdefault('function', self.function) template = template or data.get('template', self.template) arg_joiner = arg_joiner or data.get('arg_joiner', self.arg_joiner) data['expressions'] = data['field'] = arg_joiner.join(sql_parts) return template % data, params def copy(self): copy = super().copy() copy.source_expressions = self.source_expressions[:] copy.extra = self.extra.copy() return copy class Value(SQLiteNumericMixin, Expression): """Represent a wrapped value as a node within an expression.""" # Provide a default value for `for_save` in order to allow unresolved # instances to be compiled until a decision is taken in #25425. for_save = False def __init__(self, value, output_field=None): """ Arguments: * value: the value this expression represents. The value will be added into the sql parameter list and properly quoted. * output_field: an instance of the model field type that this expression will return, such as IntegerField() or CharField(). """ super().__init__(output_field=output_field) self.value = value def __repr__(self): return f'{self.__class__.__name__}({self.value!r})' def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) val = self.value output_field = self._output_field_or_none if output_field is not None: if self.for_save: val = output_field.get_db_prep_save(val, connection=connection) else: val = output_field.get_db_prep_value(val, connection=connection) if hasattr(output_field, 'get_placeholder'): return output_field.get_placeholder(val, compiler, connection), [val] if val is None: # cx_Oracle does not always convert None to the appropriate # NULL type (like in case expressions using numbers), so we # use a literal SQL NULL return 'NULL', [] return '%s', [val] def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) c.for_save = for_save return c def get_group_by_cols(self, alias=None): return [] def _resolve_output_field(self): if isinstance(self.value, str): return fields.CharField() if isinstance(self.value, bool): return fields.BooleanField() if isinstance(self.value, int): return fields.IntegerField() if isinstance(self.value, float): return fields.FloatField() if isinstance(self.value, datetime.datetime): return fields.DateTimeField() if isinstance(self.value, datetime.date): return fields.DateField() if isinstance(self.value, datetime.time): return fields.TimeField() if isinstance(self.value, datetime.timedelta): return fields.DurationField() if isinstance(self.value, Decimal): return fields.DecimalField() if isinstance(self.value, bytes): return fields.BinaryField() if isinstance(self.value, UUID): return fields.UUIDField() @property def empty_aggregate_value(self): return self.value class RawSQL(Expression): def __init__(self, sql, params, output_field=None): if output_field is None: output_field = fields.Field() self.sql, self.params = sql, params super().__init__(output_field=output_field) def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params) def as_sql(self, compiler, connection): return '(%s)' % self.sql, self.params def get_group_by_cols(self, alias=None): return [self] def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): # Resolve parents fields used in raw SQL. for parent in query.model._meta.get_parent_list(): for parent_field in parent._meta.local_fields: _, column_name = parent_field.get_attname_column() if column_name.lower() in self.sql.lower(): query.resolve_ref(parent_field.name, allow_joins, reuse, summarize) break return super().resolve_expression(query, allow_joins, reuse, summarize, for_save) class Star(Expression): def __repr__(self): return "'*'" def as_sql(self, compiler, connection): return '*', [] class Col(Expression): contains_column_references = True possibly_multivalued = False def __init__(self, alias, target, output_field=None): if output_field is None: output_field = target super().__init__(output_field=output_field) self.alias, self.target = alias, target def __repr__(self): alias, target = self.alias, self.target identifiers = (alias, str(target)) if alias else (str(target),) return '{}({})'.format(self.__class__.__name__, ', '.join(identifiers)) def as_sql(self, compiler, connection): alias, column = self.alias, self.target.column identifiers = (alias, column) if alias else (column,) sql = '.'.join(map(compiler.quote_name_unless_alias, identifiers)) return sql, [] def relabeled_clone(self, relabels): if self.alias is None: return self return self.__class__(relabels.get(self.alias, self.alias), self.target, self.output_field) def get_group_by_cols(self, alias=None): return [self] def get_db_converters(self, connection): if self.target == self.output_field: return self.output_field.get_db_converters(connection) return (self.output_field.get_db_converters(connection) + self.target.get_db_converters(connection)) class Ref(Expression): """ Reference to column alias of the query. For example, Ref('sum_cost') in qs.annotate(sum_cost=Sum('cost')) query. """ def __init__(self, refs, source): super().__init__() self.refs, self.source = refs, source def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source) def get_source_expressions(self): return [self.source] def set_source_expressions(self, exprs): self.source, = exprs def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): # The sub-expression `source` has already been resolved, as this is # just a reference to the name of `source`. return self def relabeled_clone(self, relabels): return self def as_sql(self, compiler, connection): return connection.ops.quote_name(self.refs), [] def get_group_by_cols(self, alias=None): return [self] class ExpressionList(Func): """ An expression containing multiple expressions. Can be used to provide a list of expressions as an argument to another expression, like an ordering clause. """ template = '%(expressions)s' def __init__(self, *expressions, **extra): if not expressions: raise ValueError('%s requires at least one expression.' % self.__class__.__name__) super().__init__(*expressions, **extra) def __str__(self): return self.arg_joiner.join(str(arg) for arg in self.source_expressions) def as_sqlite(self, compiler, connection, **extra_context): # Casting to numeric is unnecessary. return self.as_sql(compiler, connection, **extra_context) class ExpressionWrapper(Expression): """ An expression that can wrap another expression so that it can provide extra context to the inner expression, such as the output_field. """ def __init__(self, expression, output_field): super().__init__(output_field=output_field) self.expression = expression def set_source_expressions(self, exprs): self.expression = exprs[0] def get_source_expressions(self): return [self.expression] def get_group_by_cols(self, alias=None): if isinstance(self.expression, Expression): expression = self.expression.copy() expression.output_field = self.output_field return expression.get_group_by_cols(alias=alias) # For non-expressions e.g. an SQL WHERE clause, the entire # `expression` must be included in the GROUP BY clause. return super().get_group_by_cols() def as_sql(self, compiler, connection): return compiler.compile(self.expression) def __repr__(self): return "{}({})".format(self.__class__.__name__, self.expression) class When(Expression): template = 'WHEN %(condition)s THEN %(result)s' # This isn't a complete conditional expression, must be used in Case(). conditional = False def __init__(self, condition=None, then=None, **lookups): if lookups: if condition is None: condition, lookups = Q(**lookups), None elif getattr(condition, 'conditional', False): condition, lookups = Q(condition, **lookups), None if condition is None or not getattr(condition, 'conditional', False) or lookups: raise TypeError( 'When() supports a Q object, a boolean expression, or lookups ' 'as a condition.' ) if isinstance(condition, Q) and not condition: raise ValueError("An empty Q() can't be used as a When() condition.") super().__init__(output_field=None) self.condition = condition self.result = self._parse_expressions(then)[0] def __str__(self): return "WHEN %r THEN %r" % (self.condition, self.result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_source_expressions(self): return [self.condition, self.result] def set_source_expressions(self, exprs): self.condition, self.result = exprs def get_source_fields(self): # We're only interested in the fields of the result expressions. return [self.result._output_field_or_none] def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize if hasattr(c.condition, 'resolve_expression'): c.condition = c.condition.resolve_expression(query, allow_joins, reuse, summarize, False) c.result = c.result.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c def as_sql(self, compiler, connection, template=None, **extra_context): connection.ops.check_expression_support(self) template_params = extra_context sql_params = [] condition_sql, condition_params = compiler.compile(self.condition) template_params['condition'] = condition_sql sql_params.extend(condition_params) result_sql, result_params = compiler.compile(self.result) template_params['result'] = result_sql sql_params.extend(result_params) template = template or self.template return template % template_params, sql_params def get_group_by_cols(self, alias=None): # This is not a complete expression and cannot be used in GROUP BY. cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols class Case(Expression): """ An SQL searched CASE expression: CASE WHEN n > 0 THEN 'positive' WHEN n < 0 THEN 'negative' ELSE 'zero' END """ template = 'CASE %(cases)s ELSE %(default)s END' case_joiner = ' ' def __init__(self, *cases, default=None, output_field=None, **extra): if not all(isinstance(case, When) for case in cases): raise TypeError("Positional arguments must all be When objects.") super().__init__(output_field) self.cases = list(cases) self.default = self._parse_expressions(default)[0] self.extra = extra def __str__(self): return "CASE %s, ELSE %r" % (', '.join(str(c) for c in self.cases), self.default) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) def get_source_expressions(self): return self.cases + [self.default] def set_source_expressions(self, exprs): *self.cases, self.default = exprs def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize for pos, case in enumerate(c.cases): c.cases[pos] = case.resolve_expression(query, allow_joins, reuse, summarize, for_save) c.default = c.default.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c def copy(self): c = super().copy() c.cases = c.cases[:] return c def as_sql(self, compiler, connection, template=None, case_joiner=None, **extra_context): connection.ops.check_expression_support(self) if not self.cases: return compiler.compile(self.default) template_params = {**self.extra, **extra_context} case_parts = [] sql_params = [] for case in self.cases: try: case_sql, case_params = compiler.compile(case) except EmptyResultSet: continue case_parts.append(case_sql) sql_params.extend(case_params) default_sql, default_params = compiler.compile(self.default) if not case_parts: return default_sql, default_params case_joiner = case_joiner or self.case_joiner template_params['cases'] = case_joiner.join(case_parts) template_params['default'] = default_sql sql_params.extend(default_params) template = template or template_params.get('template', self.template) sql = template % template_params if self._output_field_or_none is not None: sql = connection.ops.unification_cast_sql(self.output_field) % sql return sql, sql_params def get_group_by_cols(self, alias=None): if not self.cases: return self.default.get_group_by_cols(alias) return super().get_group_by_cols(alias) class Subquery(BaseExpression, Combinable): """ An explicit subquery. It may contain OuterRef() references to the outer query which will be resolved when it is applied to that query. """ template = '(%(subquery)s)' contains_aggregate = False def __init__(self, queryset, output_field=None, **extra): # Allow the usage of both QuerySet and sql.Query objects. self.query = getattr(queryset, 'query', queryset) self.extra = extra super().__init__(output_field) def get_source_expressions(self): return [self.query] def set_source_expressions(self, exprs): self.query = exprs[0] def _resolve_output_field(self): return self.query.output_field def copy(self): clone = super().copy() clone.query = clone.query.clone() return clone @property def external_aliases(self): return self.query.external_aliases def get_external_cols(self): return self.query.get_external_cols() def as_sql(self, compiler, connection, template=None, query=None, **extra_context): connection.ops.check_expression_support(self) template_params = {**self.extra, **extra_context} query = query or self.query subquery_sql, sql_params = query.as_sql(compiler, connection) template_params['subquery'] = subquery_sql[1:-1] template = template or template_params.get('template', self.template) sql = template % template_params return sql, sql_params def get_group_by_cols(self, alias=None): if alias: return [Ref(alias, self)] external_cols = self.get_external_cols() if any(col.possibly_multivalued for col in external_cols): return [self] return external_cols class Exists(Subquery): template = 'EXISTS(%(subquery)s)' output_field = fields.BooleanField() def __init__(self, queryset, negated=False, **kwargs): self.negated = negated super().__init__(queryset, **kwargs) def __invert__(self): clone = self.copy() clone.negated = not self.negated return clone def as_sql(self, compiler, connection, template=None, **extra_context): query = self.query.exists(using=connection.alias) sql, params = super().as_sql( compiler, connection, template=template, query=query, **extra_context, ) if self.negated: sql = 'NOT {}'.format(sql) return sql, params def select_format(self, compiler, sql, params): # Wrap EXISTS() with a CASE WHEN expression if a database backend # (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP # BY list. if not compiler.connection.features.supports_boolean_expr_in_select_clause: sql = 'CASE WHEN {} THEN 1 ELSE 0 END'.format(sql) return sql, params class OrderBy(Expression): template = '%(expression)s %(ordering)s' conditional = False def __init__(self, expression, descending=False, nulls_first=False, nulls_last=False): if nulls_first and nulls_last: raise ValueError('nulls_first and nulls_last are mutually exclusive') self.nulls_first = nulls_first self.nulls_last = nulls_last self.descending = descending if not hasattr(expression, 'resolve_expression'): raise ValueError('expression must be an expression type') self.expression = expression def __repr__(self): return "{}({}, descending={})".format( self.__class__.__name__, self.expression, self.descending) def set_source_expressions(self, exprs): self.expression = exprs[0] def get_source_expressions(self): return [self.expression] def as_sql(self, compiler, connection, template=None, **extra_context): template = template or self.template if connection.features.supports_order_by_nulls_modifier: if self.nulls_last: template = '%s NULLS LAST' % template elif self.nulls_first: template = '%s NULLS FIRST' % template else: if self.nulls_last and not ( self.descending and connection.features.order_by_nulls_first ): template = '%%(expression)s IS NULL, %s' % template elif self.nulls_first and not ( not self.descending and connection.features.order_by_nulls_first ): template = '%%(expression)s IS NOT NULL, %s' % template connection.ops.check_expression_support(self) expression_sql, params = compiler.compile(self.expression) placeholders = { 'expression': expression_sql, 'ordering': 'DESC' if self.descending else 'ASC', **extra_context, } params *= template.count('%(expression)s') return (template % placeholders).rstrip(), params def as_oracle(self, compiler, connection): # Oracle doesn't allow ORDER BY EXISTS() or filters unless it's wrapped # in a CASE WHEN. if connection.ops.conditional_expression_supported_in_where_clause(self.expression): copy = self.copy() copy.expression = Case( When(self.expression, then=True), default=False, ) return copy.as_sql(compiler, connection) return self.as_sql(compiler, connection) def get_group_by_cols(self, alias=None): cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def reverse_ordering(self): self.descending = not self.descending if self.nulls_first or self.nulls_last: self.nulls_first = not self.nulls_first self.nulls_last = not self.nulls_last return self def asc(self): self.descending = False def desc(self): self.descending = True class Window(SQLiteNumericMixin, Expression): template = '%(expression)s OVER (%(window)s)' # Although the main expression may either be an aggregate or an # expression with an aggregate function, the GROUP BY that will # be introduced in the query as a result is not desired. contains_aggregate = False contains_over_clause = True filterable = False def __init__(self, expression, partition_by=None, order_by=None, frame=None, output_field=None): self.partition_by = partition_by self.order_by = order_by self.frame = frame if not getattr(expression, 'window_compatible', False): raise ValueError( "Expression '%s' isn't compatible with OVER clauses." % expression.__class__.__name__ ) if self.partition_by is not None: if not isinstance(self.partition_by, (tuple, list)): self.partition_by = (self.partition_by,) self.partition_by = ExpressionList(*self.partition_by) if self.order_by is not None: if isinstance(self.order_by, (list, tuple)): self.order_by = ExpressionList(*self.order_by) elif not isinstance(self.order_by, BaseExpression): raise ValueError( 'order_by must be either an Expression or a sequence of ' 'expressions.' ) super().__init__(output_field=output_field) self.source_expression = self._parse_expressions(expression)[0] def _resolve_output_field(self): return self.source_expression.output_field def get_source_expressions(self): return [self.source_expression, self.partition_by, self.order_by, self.frame] def set_source_expressions(self, exprs): self.source_expression, self.partition_by, self.order_by, self.frame = exprs def as_sql(self, compiler, connection, template=None): connection.ops.check_expression_support(self) if not connection.features.supports_over_clause: raise NotSupportedError('This backend does not support window expressions.') expr_sql, params = compiler.compile(self.source_expression) window_sql, window_params = [], [] if self.partition_by is not None: sql_expr, sql_params = self.partition_by.as_sql( compiler=compiler, connection=connection, template='PARTITION BY %(expressions)s', ) window_sql.extend(sql_expr) window_params.extend(sql_params) if self.order_by is not None: window_sql.append(' ORDER BY ') order_sql, order_params = compiler.compile(self.order_by) window_sql.extend(order_sql) window_params.extend(order_params) if self.frame: frame_sql, frame_params = compiler.compile(self.frame) window_sql.append(' ' + frame_sql) window_params.extend(frame_params) params.extend(window_params) template = template or self.template return template % { 'expression': expr_sql, 'window': ''.join(window_sql).strip() }, params def as_sqlite(self, compiler, connection): if isinstance(self.output_field, fields.DecimalField): # Casting to numeric must be outside of the window expression. copy = self.copy() source_expressions = copy.get_source_expressions() source_expressions[0].output_field = fields.FloatField() copy.set_source_expressions(source_expressions) return super(Window, copy).as_sqlite(compiler, connection) return self.as_sql(compiler, connection) def __str__(self): return '{} OVER ({}{}{})'.format( str(self.source_expression), 'PARTITION BY ' + str(self.partition_by) if self.partition_by else '', 'ORDER BY ' + str(self.order_by) if self.order_by else '', str(self.frame or ''), ) def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self) def get_group_by_cols(self, alias=None): return [] class WindowFrame(Expression): """ Model the frame clause in window expressions. There are two types of frame clauses which are subclasses, however, all processing and validation (by no means intended to be complete) is done here. Thus, providing an end for a frame is optional (the default is UNBOUNDED FOLLOWING, which is the last row in the frame). """ template = '%(frame_type)s BETWEEN %(start)s AND %(end)s' def __init__(self, start=None, end=None): self.start = Value(start) self.end = Value(end) def set_source_expressions(self, exprs): self.start, self.end = exprs def get_source_expressions(self): return [self.start, self.end] def as_sql(self, compiler, connection): connection.ops.check_expression_support(self) start, end = self.window_frame_start_end(connection, self.start.value, self.end.value) return self.template % { 'frame_type': self.frame_type, 'start': start, 'end': end, }, [] def __repr__(self): return '<%s: %s>' % (self.__class__.__name__, self) def get_group_by_cols(self, alias=None): return [] def __str__(self): if self.start.value is not None and self.start.value < 0: start = '%d %s' % (abs(self.start.value), connection.ops.PRECEDING) elif self.start.value is not None and self.start.value == 0: start = connection.ops.CURRENT_ROW else: start = connection.ops.UNBOUNDED_PRECEDING if self.end.value is not None and self.end.value > 0: end = '%d %s' % (self.end.value, connection.ops.FOLLOWING) elif self.end.value is not None and self.end.value == 0: end = connection.ops.CURRENT_ROW else: end = connection.ops.UNBOUNDED_FOLLOWING return self.template % { 'frame_type': self.frame_type, 'start': start, 'end': end, } def window_frame_start_end(self, connection, start, end): raise NotImplementedError('Subclasses must implement window_frame_start_end().') class RowRange(WindowFrame): frame_type = 'ROWS' def window_frame_start_end(self, connection, start, end): return connection.ops.window_frame_rows_start_end(start, end) class ValueRange(WindowFrame): frame_type = 'RANGE' def window_frame_start_end(self, connection, start, end): return connection.ops.window_frame_range_start_end(start, end)
5fd890de7db4f51c6664417f80eefaae9490bffb3ddd2aa0bbec9b402a6c3458
from collections import Counter, defaultdict from functools import partial from itertools import chain from operator import attrgetter from django.db import IntegrityError, connections, transaction from django.db.models import query_utils, signals, sql class ProtectedError(IntegrityError): def __init__(self, msg, protected_objects): self.protected_objects = protected_objects super().__init__(msg, protected_objects) class RestrictedError(IntegrityError): def __init__(self, msg, restricted_objects): self.restricted_objects = restricted_objects super().__init__(msg, restricted_objects) def CASCADE(collector, field, sub_objs, using): collector.collect( sub_objs, source=field.remote_field.model, source_attr=field.name, nullable=field.null, fail_on_restricted=False, ) if field.null and not connections[using].features.can_defer_constraint_checks: collector.add_field_update(field, None, sub_objs) def PROTECT(collector, field, sub_objs, using): raise ProtectedError( "Cannot delete some instances of model '%s' because they are " "referenced through a protected foreign key: '%s.%s'" % ( field.remote_field.model.__name__, sub_objs[0].__class__.__name__, field.name ), sub_objs ) def RESTRICT(collector, field, sub_objs, using): collector.add_restricted_objects(field, sub_objs) collector.add_dependency(field.remote_field.model, field.model) def SET(value): if callable(value): def set_on_delete(collector, field, sub_objs, using): collector.add_field_update(field, value(), sub_objs) else: def set_on_delete(collector, field, sub_objs, using): collector.add_field_update(field, value, sub_objs) set_on_delete.deconstruct = lambda: ('django.db.models.SET', (value,), {}) return set_on_delete def SET_NULL(collector, field, sub_objs, using): collector.add_field_update(field, None, sub_objs) def SET_DEFAULT(collector, field, sub_objs, using): collector.add_field_update(field, field.get_default(), sub_objs) def DO_NOTHING(collector, field, sub_objs, using): pass def get_candidate_relations_to_delete(opts): # The candidate relations are the ones that come from N-1 and 1-1 relations. # N-N (i.e., many-to-many) relations aren't candidates for deletion. return ( f for f in opts.get_fields(include_hidden=True) if f.auto_created and not f.concrete and (f.one_to_one or f.one_to_many) ) class Collector: def __init__(self, using): self.using = using # Initially, {model: {instances}}, later values become lists. self.data = defaultdict(set) # {model: {(field, value): {instances}}} self.field_updates = defaultdict(partial(defaultdict, set)) # {model: {field: {instances}}} self.restricted_objects = defaultdict(partial(defaultdict, set)) # fast_deletes is a list of queryset-likes that can be deleted without # fetching the objects into memory. self.fast_deletes = [] # Tracks deletion-order dependency for databases without transactions # or ability to defer constraint checks. Only concrete model classes # should be included, as the dependencies exist only between actual # database tables; proxy models are represented here by their concrete # parent. self.dependencies = defaultdict(set) # {model: {models}} def add(self, objs, source=None, nullable=False, reverse_dependency=False): """ Add 'objs' to the collection of objects to be deleted. If the call is the result of a cascade, 'source' should be the model that caused it, and 'nullable' should be set to True if the relation can be null. Return a list of all objects that were not already collected. """ if not objs: return [] new_objs = [] model = objs[0].__class__ instances = self.data[model] for obj in objs: if obj not in instances: new_objs.append(obj) instances.update(new_objs) # Nullable relationships can be ignored -- they are nulled out before # deleting, and therefore do not affect the order in which objects have # to be deleted. if source is not None and not nullable: self.add_dependency(source, model, reverse_dependency=reverse_dependency) return new_objs def add_dependency(self, model, dependency, reverse_dependency=False): if reverse_dependency: model, dependency = dependency, model self.dependencies[model._meta.concrete_model].add(dependency._meta.concrete_model) self.data.setdefault(dependency, self.data.default_factory()) def add_field_update(self, field, value, objs): """ Schedule a field update. 'objs' must be a homogeneous iterable collection of model instances (e.g. a QuerySet). """ if not objs: return model = objs[0].__class__ self.field_updates[model][field, value].update(objs) def add_restricted_objects(self, field, objs): if objs: model = objs[0].__class__ self.restricted_objects[model][field].update(objs) def clear_restricted_objects_from_set(self, model, objs): if model in self.restricted_objects: self.restricted_objects[model] = { field: items - objs for field, items in self.restricted_objects[model].items() } def clear_restricted_objects_from_queryset(self, model, qs): if model in self.restricted_objects: objs = set(qs.filter(pk__in=[ obj.pk for objs in self.restricted_objects[model].values() for obj in objs ])) self.clear_restricted_objects_from_set(model, objs) def _has_signal_listeners(self, model): return ( signals.pre_delete.has_listeners(model) or signals.post_delete.has_listeners(model) ) def can_fast_delete(self, objs, from_field=None): """ Determine if the objects in the given queryset-like or single object can be fast-deleted. This can be done if there are no cascades, no parents and no signal listeners for the object class. The 'from_field' tells where we are coming from - we need this to determine if the objects are in fact to be deleted. Allow also skipping parent -> child -> parent chain preventing fast delete of the child. """ if from_field and from_field.remote_field.on_delete is not CASCADE: return False if hasattr(objs, '_meta'): model = objs._meta.model elif hasattr(objs, 'model') and hasattr(objs, '_raw_delete'): model = objs.model else: return False if self._has_signal_listeners(model): return False # The use of from_field comes from the need to avoid cascade back to # parent when parent delete is cascading to child. opts = model._meta return ( all(link == from_field for link in opts.concrete_model._meta.parents.values()) and # Foreign keys pointing to this model. all( related.field.remote_field.on_delete is DO_NOTHING for related in get_candidate_relations_to_delete(opts) ) and ( # Something like generic foreign key. not any(hasattr(field, 'bulk_related_objects') for field in opts.private_fields) ) ) def get_del_batches(self, objs, fields): """ Return the objs in suitably sized batches for the used connection. """ field_names = [field.name for field in fields] conn_batch_size = max( connections[self.using].ops.bulk_batch_size(field_names, objs), 1) if len(objs) > conn_batch_size: return [objs[i:i + conn_batch_size] for i in range(0, len(objs), conn_batch_size)] else: return [objs] def collect(self, objs, source=None, nullable=False, collect_related=True, source_attr=None, reverse_dependency=False, keep_parents=False, fail_on_restricted=True): """ Add 'objs' to the collection of objects to be deleted as well as all parent instances. 'objs' must be a homogeneous iterable collection of model instances (e.g. a QuerySet). If 'collect_related' is True, related objects will be handled by their respective on_delete handler. If the call is the result of a cascade, 'source' should be the model that caused it and 'nullable' should be set to True, if the relation can be null. If 'reverse_dependency' is True, 'source' will be deleted before the current model, rather than after. (Needed for cascading to parent models, the one case in which the cascade follows the forwards direction of an FK rather than the reverse direction.) If 'keep_parents' is True, data of parent model's will be not deleted. If 'fail_on_restricted' is False, error won't be raised even if it's prohibited to delete such objects due to RESTRICT, that defers restricted object checking in recursive calls where the top-level call may need to collect more objects to determine whether restricted ones can be deleted. """ if self.can_fast_delete(objs): self.fast_deletes.append(objs) return new_objs = self.add(objs, source, nullable, reverse_dependency=reverse_dependency) if not new_objs: return model = new_objs[0].__class__ if not keep_parents: # Recursively collect concrete model's parent models, but not their # related objects. These will be found by meta.get_fields() concrete_model = model._meta.concrete_model for ptr in concrete_model._meta.parents.values(): if ptr: parent_objs = [getattr(obj, ptr.name) for obj in new_objs] self.collect(parent_objs, source=model, source_attr=ptr.remote_field.related_name, collect_related=False, reverse_dependency=True, fail_on_restricted=False) if not collect_related: return if keep_parents: parents = set(model._meta.get_parent_list()) model_fast_deletes = defaultdict(list) protected_objects = defaultdict(list) for related in get_candidate_relations_to_delete(model._meta): # Preserve parent reverse relationships if keep_parents=True. if keep_parents and related.model in parents: continue field = related.field if field.remote_field.on_delete == DO_NOTHING: continue related_model = related.related_model if self.can_fast_delete(related_model, from_field=field): model_fast_deletes[related_model].append(field) continue batches = self.get_del_batches(new_objs, [field]) for batch in batches: sub_objs = self.related_objects(related_model, [field], batch) # Non-referenced fields can be deferred if no signal receivers # are connected for the related model as they'll never be # exposed to the user. Skip field deferring when some # relationships are select_related as interactions between both # features are hard to get right. This should only happen in # the rare cases where .related_objects is overridden anyway. if not (sub_objs.query.select_related or self._has_signal_listeners(related_model)): referenced_fields = set(chain.from_iterable( (rf.attname for rf in rel.field.foreign_related_fields) for rel in get_candidate_relations_to_delete(related_model._meta) )) sub_objs = sub_objs.only(*tuple(referenced_fields)) if sub_objs: try: field.remote_field.on_delete(self, field, sub_objs, self.using) except ProtectedError as error: key = "'%s.%s'" % (field.model.__name__, field.name) protected_objects[key] += error.protected_objects if protected_objects: raise ProtectedError( 'Cannot delete some instances of model %r because they are ' 'referenced through protected foreign keys: %s.' % ( model.__name__, ', '.join(protected_objects), ), set(chain.from_iterable(protected_objects.values())), ) for related_model, related_fields in model_fast_deletes.items(): batches = self.get_del_batches(new_objs, related_fields) for batch in batches: sub_objs = self.related_objects(related_model, related_fields, batch) self.fast_deletes.append(sub_objs) for field in model._meta.private_fields: if hasattr(field, 'bulk_related_objects'): # It's something like generic foreign key. sub_objs = field.bulk_related_objects(new_objs, self.using) self.collect(sub_objs, source=model, nullable=True, fail_on_restricted=False) if fail_on_restricted: # Raise an error if collected restricted objects (RESTRICT) aren't # candidates for deletion also collected via CASCADE. for related_model, instances in self.data.items(): self.clear_restricted_objects_from_set(related_model, instances) for qs in self.fast_deletes: self.clear_restricted_objects_from_queryset(qs.model, qs) if self.restricted_objects.values(): restricted_objects = defaultdict(list) for related_model, fields in self.restricted_objects.items(): for field, objs in fields.items(): if objs: key = "'%s.%s'" % (related_model.__name__, field.name) restricted_objects[key] += objs if restricted_objects: raise RestrictedError( 'Cannot delete some instances of model %r because ' 'they are referenced through restricted foreign keys: ' '%s.' % ( model.__name__, ', '.join(restricted_objects), ), set(chain.from_iterable(restricted_objects.values())), ) def related_objects(self, related_model, related_fields, objs): """ Get a QuerySet of the related model to objs via related fields. """ predicate = query_utils.Q( *( (f'{related_field.name}__in', objs) for related_field in related_fields ), _connector=query_utils.Q.OR, ) return related_model._base_manager.using(self.using).filter(predicate) def instances_with_model(self): for model, instances in self.data.items(): for obj in instances: yield model, obj def sort(self): sorted_models = [] concrete_models = set() models = list(self.data) while len(sorted_models) < len(models): found = False for model in models: if model in sorted_models: continue dependencies = self.dependencies.get(model._meta.concrete_model) if not (dependencies and dependencies.difference(concrete_models)): sorted_models.append(model) concrete_models.add(model._meta.concrete_model) found = True if not found: return self.data = {model: self.data[model] for model in sorted_models} def delete(self): # sort instance collections for model, instances in self.data.items(): self.data[model] = sorted(instances, key=attrgetter("pk")) # if possible, bring the models in an order suitable for databases that # don't support transactions or cannot defer constraint checks until the # end of a transaction. self.sort() # number of objects deleted for each model label deleted_counter = Counter() # Optimize for the case with a single obj and no dependencies if len(self.data) == 1 and len(instances) == 1: instance = list(instances)[0] if self.can_fast_delete(instance): with transaction.mark_for_rollback_on_error(self.using): count = sql.DeleteQuery(model).delete_batch([instance.pk], self.using) setattr(instance, model._meta.pk.attname, None) return count, {model._meta.label: count} with transaction.atomic(using=self.using, savepoint=False): # send pre_delete signals for model, obj in self.instances_with_model(): if not model._meta.auto_created: signals.pre_delete.send( sender=model, instance=obj, using=self.using ) # fast deletes for qs in self.fast_deletes: count = qs._raw_delete(using=self.using) if count: deleted_counter[qs.model._meta.label] += count # update fields for model, instances_for_fieldvalues in self.field_updates.items(): for (field, value), instances in instances_for_fieldvalues.items(): query = sql.UpdateQuery(model) query.update_batch([obj.pk for obj in instances], {field.name: value}, self.using) # reverse instance collections for instances in self.data.values(): instances.reverse() # delete instances for model, instances in self.data.items(): query = sql.DeleteQuery(model) pk_list = [obj.pk for obj in instances] count = query.delete_batch(pk_list, self.using) if count: deleted_counter[model._meta.label] += count if not model._meta.auto_created: for obj in instances: signals.post_delete.send( sender=model, instance=obj, using=self.using ) # update collected instances for instances_for_fieldvalues in self.field_updates.values(): for (field, value), instances in instances_for_fieldvalues.items(): for obj in instances: setattr(obj, field.attname, value) for model, instances in self.data.items(): for instance in instances: setattr(instance, model._meta.pk.attname, None) return sum(deleted_counter.values()), dict(deleted_counter)
7aa3ca034b33c576bb2a8232e55efa79734ab9310ba8fed4202da9e79aa8205b
import itertools import math from django.core.exceptions import EmptyResultSet from django.db.models.expressions import Case, Expression, Func, Value, When from django.db.models.fields import ( BooleanField, CharField, DateTimeField, Field, IntegerField, UUIDField, ) from django.db.models.query_utils import RegisterLookupMixin from django.utils.datastructures import OrderedSet from django.utils.functional import cached_property from django.utils.hashable import make_hashable class Lookup(Expression): lookup_name = None prepare_rhs = True can_use_none_as_rhs = False def __init__(self, lhs, rhs): self.lhs, self.rhs = lhs, rhs self.rhs = self.get_prep_lookup() self.lhs = self.get_prep_lhs() if hasattr(self.lhs, 'get_bilateral_transforms'): bilateral_transforms = self.lhs.get_bilateral_transforms() else: bilateral_transforms = [] if bilateral_transforms: # Warn the user as soon as possible if they are trying to apply # a bilateral transformation on a nested QuerySet: that won't work. from django.db.models.sql.query import ( # avoid circular import Query, ) if isinstance(rhs, Query): raise NotImplementedError("Bilateral transformations on nested querysets are not implemented.") self.bilateral_transforms = bilateral_transforms def apply_bilateral_transforms(self, value): for transform in self.bilateral_transforms: value = transform(value) return value def __repr__(self): return f'{self.__class__.__name__}({self.lhs!r}, {self.rhs!r})' def batch_process_rhs(self, compiler, connection, rhs=None): if rhs is None: rhs = self.rhs if self.bilateral_transforms: sqls, sqls_params = [], [] for p in rhs: value = Value(p, output_field=self.lhs.output_field) value = self.apply_bilateral_transforms(value) value = value.resolve_expression(compiler.query) sql, sql_params = compiler.compile(value) sqls.append(sql) sqls_params.extend(sql_params) else: _, params = self.get_db_prep_lookup(rhs, connection) sqls, sqls_params = ['%s'] * len(params), params return sqls, sqls_params def get_source_expressions(self): if self.rhs_is_direct_value(): return [self.lhs] return [self.lhs, self.rhs] def set_source_expressions(self, new_exprs): if len(new_exprs) == 1: self.lhs = new_exprs[0] else: self.lhs, self.rhs = new_exprs def get_prep_lookup(self): if not self.prepare_rhs or hasattr(self.rhs, 'resolve_expression'): return self.rhs if hasattr(self.lhs, 'output_field'): if hasattr(self.lhs.output_field, 'get_prep_value'): return self.lhs.output_field.get_prep_value(self.rhs) elif self.rhs_is_direct_value(): return Value(self.rhs) return self.rhs def get_prep_lhs(self): if hasattr(self.lhs, 'resolve_expression'): return self.lhs return Value(self.lhs) def get_db_prep_lookup(self, value, connection): return ('%s', [value]) def process_lhs(self, compiler, connection, lhs=None): lhs = lhs or self.lhs if hasattr(lhs, 'resolve_expression'): lhs = lhs.resolve_expression(compiler.query) sql, params = compiler.compile(lhs) if isinstance(lhs, Lookup): # Wrapped in parentheses to respect operator precedence. sql = f'({sql})' return sql, params def process_rhs(self, compiler, connection): value = self.rhs if self.bilateral_transforms: if self.rhs_is_direct_value(): # Do not call get_db_prep_lookup here as the value will be # transformed before being used for lookup value = Value(value, output_field=self.lhs.output_field) value = self.apply_bilateral_transforms(value) value = value.resolve_expression(compiler.query) if hasattr(value, 'as_sql'): sql, params = compiler.compile(value) # Ensure expression is wrapped in parentheses to respect operator # precedence but avoid double wrapping as it can be misinterpreted # on some backends (e.g. subqueries on SQLite). if sql and sql[0] != '(': sql = '(%s)' % sql return sql, params else: return self.get_db_prep_lookup(value, connection) def rhs_is_direct_value(self): return not hasattr(self.rhs, 'as_sql') def get_group_by_cols(self, alias=None): cols = [] for source in self.get_source_expressions(): cols.extend(source.get_group_by_cols()) return cols def as_oracle(self, compiler, connection): # Oracle doesn't allow EXISTS() and filters to be compared to another # expression unless they're wrapped in a CASE WHEN. wrapped = False exprs = [] for expr in (self.lhs, self.rhs): if connection.ops.conditional_expression_supported_in_where_clause(expr): expr = Case(When(expr, then=True), default=False) wrapped = True exprs.append(expr) lookup = type(self)(*exprs) if wrapped else self return lookup.as_sql(compiler, connection) @cached_property def output_field(self): return BooleanField() @property def identity(self): return self.__class__, self.lhs, self.rhs def __eq__(self, other): if not isinstance(other, Lookup): return NotImplemented return self.identity == other.identity def __hash__(self): return hash(make_hashable(self.identity)) def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): c = self.copy() c.is_summary = summarize c.lhs = self.lhs.resolve_expression(query, allow_joins, reuse, summarize, for_save) c.rhs = self.rhs.resolve_expression(query, allow_joins, reuse, summarize, for_save) return c def select_format(self, compiler, sql, params): # Wrap filters with a CASE WHEN expression if a database backend # (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP # BY list. if not compiler.connection.features.supports_boolean_expr_in_select_clause: sql = f'CASE WHEN {sql} THEN 1 ELSE 0 END' return sql, params class Transform(RegisterLookupMixin, Func): """ RegisterLookupMixin() is first so that get_lookup() and get_transform() first examine self and then check output_field. """ bilateral = False arity = 1 @property def lhs(self): return self.get_source_expressions()[0] def get_bilateral_transforms(self): if hasattr(self.lhs, 'get_bilateral_transforms'): bilateral_transforms = self.lhs.get_bilateral_transforms() else: bilateral_transforms = [] if self.bilateral: bilateral_transforms.append(self.__class__) return bilateral_transforms class BuiltinLookup(Lookup): def process_lhs(self, compiler, connection, lhs=None): lhs_sql, params = super().process_lhs(compiler, connection, lhs) field_internal_type = self.lhs.output_field.get_internal_type() db_type = self.lhs.output_field.db_type(connection=connection) lhs_sql = connection.ops.field_cast_sql( db_type, field_internal_type) % lhs_sql lhs_sql = connection.ops.lookup_cast(self.lookup_name, field_internal_type) % lhs_sql return lhs_sql, list(params) def as_sql(self, compiler, connection): lhs_sql, params = self.process_lhs(compiler, connection) rhs_sql, rhs_params = self.process_rhs(compiler, connection) params.extend(rhs_params) rhs_sql = self.get_rhs_op(connection, rhs_sql) return '%s %s' % (lhs_sql, rhs_sql), params def get_rhs_op(self, connection, rhs): return connection.operators[self.lookup_name] % rhs class FieldGetDbPrepValueMixin: """ Some lookups require Field.get_db_prep_value() to be called on their inputs. """ get_db_prep_lookup_value_is_iterable = False def get_db_prep_lookup(self, value, connection): # For relational fields, use the 'target_field' attribute of the # output_field. field = getattr(self.lhs.output_field, 'target_field', None) get_db_prep_value = getattr(field, 'get_db_prep_value', None) or self.lhs.output_field.get_db_prep_value return ( '%s', [get_db_prep_value(v, connection, prepared=True) for v in value] if self.get_db_prep_lookup_value_is_iterable else [get_db_prep_value(value, connection, prepared=True)] ) class FieldGetDbPrepValueIterableMixin(FieldGetDbPrepValueMixin): """ Some lookups require Field.get_db_prep_value() to be called on each value in an iterable. """ get_db_prep_lookup_value_is_iterable = True def get_prep_lookup(self): if hasattr(self.rhs, 'resolve_expression'): return self.rhs prepared_values = [] for rhs_value in self.rhs: if hasattr(rhs_value, 'resolve_expression'): # An expression will be handled by the database but can coexist # alongside real values. pass elif self.prepare_rhs and hasattr(self.lhs.output_field, 'get_prep_value'): rhs_value = self.lhs.output_field.get_prep_value(rhs_value) prepared_values.append(rhs_value) return prepared_values def process_rhs(self, compiler, connection): if self.rhs_is_direct_value(): # rhs should be an iterable of values. Use batch_process_rhs() # to prepare/transform those values. return self.batch_process_rhs(compiler, connection) else: return super().process_rhs(compiler, connection) def resolve_expression_parameter(self, compiler, connection, sql, param): params = [param] if hasattr(param, 'resolve_expression'): param = param.resolve_expression(compiler.query) if hasattr(param, 'as_sql'): sql, params = compiler.compile(param) return sql, params def batch_process_rhs(self, compiler, connection, rhs=None): pre_processed = super().batch_process_rhs(compiler, connection, rhs) # The params list may contain expressions which compile to a # sql/param pair. Zip them to get sql and param pairs that refer to the # same argument and attempt to replace them with the result of # compiling the param step. sql, params = zip(*( self.resolve_expression_parameter(compiler, connection, sql, param) for sql, param in zip(*pre_processed) )) params = itertools.chain.from_iterable(params) return sql, tuple(params) class PostgresOperatorLookup(FieldGetDbPrepValueMixin, Lookup): """Lookup defined by operators on PostgreSQL.""" postgres_operator = None def as_postgresql(self, compiler, connection): lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) params = tuple(lhs_params) + tuple(rhs_params) return '%s %s %s' % (lhs, self.postgres_operator, rhs), params @Field.register_lookup class Exact(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = 'exact' def process_rhs(self, compiler, connection): from django.db.models.sql.query import Query if isinstance(self.rhs, Query): if self.rhs.has_limit_one(): if not self.rhs.has_select_fields: self.rhs.clear_select_clause() self.rhs.add_fields(['pk']) else: raise ValueError( 'The QuerySet value for an exact lookup must be limited to ' 'one result using slicing.' ) return super().process_rhs(compiler, connection) def as_sql(self, compiler, connection): # Avoid comparison against direct rhs if lhs is a boolean value. That # turns "boolfield__exact=True" into "WHERE boolean_field" instead of # "WHERE boolean_field = True" when allowed. if ( isinstance(self.rhs, bool) and getattr(self.lhs, 'conditional', False) and connection.ops.conditional_expression_supported_in_where_clause(self.lhs) ): lhs_sql, params = self.process_lhs(compiler, connection) template = '%s' if self.rhs else 'NOT %s' return template % lhs_sql, params return super().as_sql(compiler, connection) @Field.register_lookup class IExact(BuiltinLookup): lookup_name = 'iexact' prepare_rhs = False def process_rhs(self, qn, connection): rhs, params = super().process_rhs(qn, connection) if params: params[0] = connection.ops.prep_for_iexact_query(params[0]) return rhs, params @Field.register_lookup class GreaterThan(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = 'gt' @Field.register_lookup class GreaterThanOrEqual(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = 'gte' @Field.register_lookup class LessThan(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = 'lt' @Field.register_lookup class LessThanOrEqual(FieldGetDbPrepValueMixin, BuiltinLookup): lookup_name = 'lte' class IntegerFieldFloatRounding: """ Allow floats to work as query values for IntegerField. Without this, the decimal portion of the float would always be discarded. """ def get_prep_lookup(self): if isinstance(self.rhs, float): self.rhs = math.ceil(self.rhs) return super().get_prep_lookup() @IntegerField.register_lookup class IntegerGreaterThanOrEqual(IntegerFieldFloatRounding, GreaterThanOrEqual): pass @IntegerField.register_lookup class IntegerLessThan(IntegerFieldFloatRounding, LessThan): pass @Field.register_lookup class In(FieldGetDbPrepValueIterableMixin, BuiltinLookup): lookup_name = 'in' def process_rhs(self, compiler, connection): db_rhs = getattr(self.rhs, '_db', None) if db_rhs is not None and db_rhs != connection.alias: raise ValueError( "Subqueries aren't allowed across different databases. Force " "the inner query to be evaluated using `list(inner_query)`." ) if self.rhs_is_direct_value(): # Remove None from the list as NULL is never equal to anything. try: rhs = OrderedSet(self.rhs) rhs.discard(None) except TypeError: # Unhashable items in self.rhs rhs = [r for r in self.rhs if r is not None] if not rhs: raise EmptyResultSet # rhs should be an iterable; use batch_process_rhs() to # prepare/transform those values. sqls, sqls_params = self.batch_process_rhs(compiler, connection, rhs) placeholder = '(' + ', '.join(sqls) + ')' return (placeholder, sqls_params) else: from django.db.models.sql.query import ( # avoid circular import Query, ) if isinstance(self.rhs, Query): query = self.rhs query.clear_ordering(clear_default=True) if not query.has_select_fields: query.clear_select_clause() query.add_fields(['pk']) return super().process_rhs(compiler, connection) def get_group_by_cols(self, alias=None): cols = self.lhs.get_group_by_cols() if hasattr(self.rhs, 'get_group_by_cols'): if not getattr(self.rhs, 'has_select_fields', True): self.rhs.clear_select_clause() self.rhs.add_fields(['pk']) cols.extend(self.rhs.get_group_by_cols()) return cols def get_rhs_op(self, connection, rhs): return 'IN %s' % rhs def as_sql(self, compiler, connection): max_in_list_size = connection.ops.max_in_list_size() if self.rhs_is_direct_value() and max_in_list_size and len(self.rhs) > max_in_list_size: return self.split_parameter_list_as_sql(compiler, connection) return super().as_sql(compiler, connection) def split_parameter_list_as_sql(self, compiler, connection): # This is a special case for databases which limit the number of # elements which can appear in an 'IN' clause. max_in_list_size = connection.ops.max_in_list_size() lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.batch_process_rhs(compiler, connection) in_clause_elements = ['('] params = [] for offset in range(0, len(rhs_params), max_in_list_size): if offset > 0: in_clause_elements.append(' OR ') in_clause_elements.append('%s IN (' % lhs) params.extend(lhs_params) sqls = rhs[offset: offset + max_in_list_size] sqls_params = rhs_params[offset: offset + max_in_list_size] param_group = ', '.join(sqls) in_clause_elements.append(param_group) in_clause_elements.append(')') params.extend(sqls_params) in_clause_elements.append(')') return ''.join(in_clause_elements), params class PatternLookup(BuiltinLookup): param_pattern = '%%%s%%' prepare_rhs = False def get_rhs_op(self, connection, rhs): # Assume we are in startswith. We need to produce SQL like: # col LIKE %s, ['thevalue%'] # For python values we can (and should) do that directly in Python, # but if the value is for example reference to other column, then # we need to add the % pattern match to the lookup by something like # col LIKE othercol || '%%' # So, for Python values we don't need any special pattern, but for # SQL reference values or SQL transformations we need the correct # pattern added. if hasattr(self.rhs, 'as_sql') or self.bilateral_transforms: pattern = connection.pattern_ops[self.lookup_name].format(connection.pattern_esc) return pattern.format(rhs) else: return super().get_rhs_op(connection, rhs) def process_rhs(self, qn, connection): rhs, params = super().process_rhs(qn, connection) if self.rhs_is_direct_value() and params and not self.bilateral_transforms: params[0] = self.param_pattern % connection.ops.prep_for_like_query(params[0]) return rhs, params @Field.register_lookup class Contains(PatternLookup): lookup_name = 'contains' @Field.register_lookup class IContains(Contains): lookup_name = 'icontains' @Field.register_lookup class StartsWith(PatternLookup): lookup_name = 'startswith' param_pattern = '%s%%' @Field.register_lookup class IStartsWith(StartsWith): lookup_name = 'istartswith' @Field.register_lookup class EndsWith(PatternLookup): lookup_name = 'endswith' param_pattern = '%%%s' @Field.register_lookup class IEndsWith(EndsWith): lookup_name = 'iendswith' @Field.register_lookup class Range(FieldGetDbPrepValueIterableMixin, BuiltinLookup): lookup_name = 'range' def get_rhs_op(self, connection, rhs): return "BETWEEN %s AND %s" % (rhs[0], rhs[1]) @Field.register_lookup class IsNull(BuiltinLookup): lookup_name = 'isnull' prepare_rhs = False def as_sql(self, compiler, connection): if not isinstance(self.rhs, bool): raise ValueError( 'The QuerySet value for an isnull lookup must be True or ' 'False.' ) sql, params = compiler.compile(self.lhs) if self.rhs: return "%s IS NULL" % sql, params else: return "%s IS NOT NULL" % sql, params @Field.register_lookup class Regex(BuiltinLookup): lookup_name = 'regex' prepare_rhs = False def as_sql(self, compiler, connection): if self.lookup_name in connection.operators: return super().as_sql(compiler, connection) else: lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) sql_template = connection.ops.regex_lookup(self.lookup_name) return sql_template % (lhs, rhs), lhs_params + rhs_params @Field.register_lookup class IRegex(Regex): lookup_name = 'iregex' class YearLookup(Lookup): def year_lookup_bounds(self, connection, year): from django.db.models.functions import ExtractIsoYear iso_year = isinstance(self.lhs, ExtractIsoYear) output_field = self.lhs.lhs.output_field if isinstance(output_field, DateTimeField): bounds = connection.ops.year_lookup_bounds_for_datetime_field( year, iso_year=iso_year, ) else: bounds = connection.ops.year_lookup_bounds_for_date_field( year, iso_year=iso_year, ) return bounds def as_sql(self, compiler, connection): # Avoid the extract operation if the rhs is a direct value to allow # indexes to be used. if self.rhs_is_direct_value(): # Skip the extract part by directly using the originating field, # that is self.lhs.lhs. lhs_sql, params = self.process_lhs(compiler, connection, self.lhs.lhs) rhs_sql, _ = self.process_rhs(compiler, connection) rhs_sql = self.get_direct_rhs_sql(connection, rhs_sql) start, finish = self.year_lookup_bounds(connection, self.rhs) params.extend(self.get_bound_params(start, finish)) return '%s %s' % (lhs_sql, rhs_sql), params return super().as_sql(compiler, connection) def get_direct_rhs_sql(self, connection, rhs): return connection.operators[self.lookup_name] % rhs def get_bound_params(self, start, finish): raise NotImplementedError( 'subclasses of YearLookup must provide a get_bound_params() method' ) class YearExact(YearLookup, Exact): def get_direct_rhs_sql(self, connection, rhs): return 'BETWEEN %s AND %s' def get_bound_params(self, start, finish): return (start, finish) class YearGt(YearLookup, GreaterThan): def get_bound_params(self, start, finish): return (finish,) class YearGte(YearLookup, GreaterThanOrEqual): def get_bound_params(self, start, finish): return (start,) class YearLt(YearLookup, LessThan): def get_bound_params(self, start, finish): return (start,) class YearLte(YearLookup, LessThanOrEqual): def get_bound_params(self, start, finish): return (finish,) class UUIDTextMixin: """ Strip hyphens from a value when filtering a UUIDField on backends without a native datatype for UUID. """ def process_rhs(self, qn, connection): if not connection.features.has_native_uuid_field: from django.db.models.functions import Replace if self.rhs_is_direct_value(): self.rhs = Value(self.rhs) self.rhs = Replace(self.rhs, Value('-'), Value(''), output_field=CharField()) rhs, params = super().process_rhs(qn, connection) return rhs, params @UUIDField.register_lookup class UUIDIExact(UUIDTextMixin, IExact): pass @UUIDField.register_lookup class UUIDContains(UUIDTextMixin, Contains): pass @UUIDField.register_lookup class UUIDIContains(UUIDTextMixin, IContains): pass @UUIDField.register_lookup class UUIDStartsWith(UUIDTextMixin, StartsWith): pass @UUIDField.register_lookup class UUIDIStartsWith(UUIDTextMixin, IStartsWith): pass @UUIDField.register_lookup class UUIDEndsWith(UUIDTextMixin, EndsWith): pass @UUIDField.register_lookup class UUIDIEndsWith(UUIDTextMixin, IEndsWith): pass
4c76ca31940ab36d721e5d95eee70b2c31f6a33361e913319c46cb6b4d2051d9
from enum import Enum from django.db.models.expressions import ExpressionList, F from django.db.models.indexes import IndexExpression from django.db.models.query_utils import Q from django.db.models.sql.query import Query __all__ = ['CheckConstraint', 'Deferrable', 'UniqueConstraint'] class BaseConstraint: def __init__(self, name): self.name = name @property def contains_expressions(self): return False def constraint_sql(self, model, schema_editor): raise NotImplementedError('This method must be implemented by a subclass.') def create_sql(self, model, schema_editor): raise NotImplementedError('This method must be implemented by a subclass.') def remove_sql(self, model, schema_editor): raise NotImplementedError('This method must be implemented by a subclass.') def deconstruct(self): path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__) path = path.replace('django.db.models.constraints', 'django.db.models') return (path, (), {'name': self.name}) def clone(self): _, args, kwargs = self.deconstruct() return self.__class__(*args, **kwargs) class CheckConstraint(BaseConstraint): def __init__(self, *, check, name): self.check = check if not getattr(check, 'conditional', False): raise TypeError( 'CheckConstraint.check must be a Q instance or boolean ' 'expression.' ) super().__init__(name) def _get_check_sql(self, model, schema_editor): query = Query(model=model, alias_cols=False) where = query.build_where(self.check) compiler = query.get_compiler(connection=schema_editor.connection) sql, params = where.as_sql(compiler, schema_editor.connection) return sql % tuple(schema_editor.quote_value(p) for p in params) def constraint_sql(self, model, schema_editor): check = self._get_check_sql(model, schema_editor) return schema_editor._check_sql(self.name, check) def create_sql(self, model, schema_editor): check = self._get_check_sql(model, schema_editor) return schema_editor._create_check_sql(model, self.name, check) def remove_sql(self, model, schema_editor): return schema_editor._delete_check_sql(model, self.name) def __repr__(self): return '<%s: check=%s name=%s>' % ( self.__class__.__qualname__, self.check, repr(self.name), ) def __eq__(self, other): if isinstance(other, CheckConstraint): return self.name == other.name and self.check == other.check return super().__eq__(other) def deconstruct(self): path, args, kwargs = super().deconstruct() kwargs['check'] = self.check return path, args, kwargs class Deferrable(Enum): DEFERRED = 'deferred' IMMEDIATE = 'immediate' # A similar format was proposed for Python 3.10. def __repr__(self): return f'{self.__class__.__qualname__}.{self._name_}' class UniqueConstraint(BaseConstraint): def __init__( self, *expressions, fields=(), name=None, condition=None, deferrable=None, include=None, opclasses=(), ): if not name: raise ValueError('A unique constraint must be named.') if not expressions and not fields: raise ValueError( 'At least one field or expression is required to define a ' 'unique constraint.' ) if expressions and fields: raise ValueError( 'UniqueConstraint.fields and expressions are mutually exclusive.' ) if not isinstance(condition, (type(None), Q)): raise ValueError('UniqueConstraint.condition must be a Q instance.') if condition and deferrable: raise ValueError( 'UniqueConstraint with conditions cannot be deferred.' ) if include and deferrable: raise ValueError( 'UniqueConstraint with include fields cannot be deferred.' ) if opclasses and deferrable: raise ValueError( 'UniqueConstraint with opclasses cannot be deferred.' ) if expressions and deferrable: raise ValueError( 'UniqueConstraint with expressions cannot be deferred.' ) if expressions and opclasses: raise ValueError( 'UniqueConstraint.opclasses cannot be used with expressions. ' 'Use django.contrib.postgres.indexes.OpClass() instead.' ) if not isinstance(deferrable, (type(None), Deferrable)): raise ValueError( 'UniqueConstraint.deferrable must be a Deferrable instance.' ) if not isinstance(include, (type(None), list, tuple)): raise ValueError('UniqueConstraint.include must be a list or tuple.') if not isinstance(opclasses, (list, tuple)): raise ValueError('UniqueConstraint.opclasses must be a list or tuple.') if opclasses and len(fields) != len(opclasses): raise ValueError( 'UniqueConstraint.fields and UniqueConstraint.opclasses must ' 'have the same number of elements.' ) self.fields = tuple(fields) self.condition = condition self.deferrable = deferrable self.include = tuple(include) if include else () self.opclasses = opclasses self.expressions = tuple( F(expression) if isinstance(expression, str) else expression for expression in expressions ) super().__init__(name) @property def contains_expressions(self): return bool(self.expressions) def _get_condition_sql(self, model, schema_editor): if self.condition is None: return None query = Query(model=model, alias_cols=False) where = query.build_where(self.condition) compiler = query.get_compiler(connection=schema_editor.connection) sql, params = where.as_sql(compiler, schema_editor.connection) return sql % tuple(schema_editor.quote_value(p) for p in params) def _get_index_expressions(self, model, schema_editor): if not self.expressions: return None index_expressions = [] for expression in self.expressions: index_expression = IndexExpression(expression) index_expression.set_wrapper_classes(schema_editor.connection) index_expressions.append(index_expression) return ExpressionList(*index_expressions).resolve_expression( Query(model, alias_cols=False), ) def constraint_sql(self, model, schema_editor): fields = [model._meta.get_field(field_name) for field_name in self.fields] include = [model._meta.get_field(field_name).column for field_name in self.include] condition = self._get_condition_sql(model, schema_editor) expressions = self._get_index_expressions(model, schema_editor) return schema_editor._unique_sql( model, fields, self.name, condition=condition, deferrable=self.deferrable, include=include, opclasses=self.opclasses, expressions=expressions, ) def create_sql(self, model, schema_editor): fields = [model._meta.get_field(field_name) for field_name in self.fields] include = [model._meta.get_field(field_name).column for field_name in self.include] condition = self._get_condition_sql(model, schema_editor) expressions = self._get_index_expressions(model, schema_editor) return schema_editor._create_unique_sql( model, fields, self.name, condition=condition, deferrable=self.deferrable, include=include, opclasses=self.opclasses, expressions=expressions, ) def remove_sql(self, model, schema_editor): condition = self._get_condition_sql(model, schema_editor) include = [model._meta.get_field(field_name).column for field_name in self.include] expressions = self._get_index_expressions(model, schema_editor) return schema_editor._delete_unique_sql( model, self.name, condition=condition, deferrable=self.deferrable, include=include, opclasses=self.opclasses, expressions=expressions, ) def __repr__(self): return '<%s:%s%s%s%s%s%s%s>' % ( self.__class__.__qualname__, '' if not self.fields else ' fields=%s' % repr(self.fields), '' if not self.expressions else ' expressions=%s' % repr(self.expressions), ' name=%s' % repr(self.name), '' if self.condition is None else ' condition=%s' % self.condition, '' if self.deferrable is None else ' deferrable=%r' % self.deferrable, '' if not self.include else ' include=%s' % repr(self.include), '' if not self.opclasses else ' opclasses=%s' % repr(self.opclasses), ) def __eq__(self, other): if isinstance(other, UniqueConstraint): return ( self.name == other.name and self.fields == other.fields and self.condition == other.condition and self.deferrable == other.deferrable and self.include == other.include and self.opclasses == other.opclasses and self.expressions == other.expressions ) return super().__eq__(other) def deconstruct(self): path, args, kwargs = super().deconstruct() if self.fields: kwargs['fields'] = self.fields if self.condition: kwargs['condition'] = self.condition if self.deferrable: kwargs['deferrable'] = self.deferrable if self.include: kwargs['include'] = self.include if self.opclasses: kwargs['opclasses'] = self.opclasses return path, self.expressions, kwargs
b0f203f860fee996fe0d2dabf6551393613d2d1b72c058bc4ef01b16fedf6ab9
import datetime import decimal import functools import hashlib import logging import time from contextlib import contextmanager from django.db import NotSupportedError logger = logging.getLogger('django.db.backends') class CursorWrapper: def __init__(self, cursor, db): self.cursor = cursor self.db = db WRAP_ERROR_ATTRS = frozenset(['fetchone', 'fetchmany', 'fetchall', 'nextset']) def __getattr__(self, attr): cursor_attr = getattr(self.cursor, attr) if attr in CursorWrapper.WRAP_ERROR_ATTRS: return self.db.wrap_database_errors(cursor_attr) else: return cursor_attr def __iter__(self): with self.db.wrap_database_errors: yield from self.cursor def __enter__(self): return self def __exit__(self, type, value, traceback): # Close instead of passing through to avoid backend-specific behavior # (#17671). Catch errors liberally because errors in cleanup code # aren't useful. try: self.close() except self.db.Database.Error: pass # The following methods cannot be implemented in __getattr__, because the # code must run when the method is invoked, not just when it is accessed. def callproc(self, procname, params=None, kparams=None): # Keyword parameters for callproc aren't supported in PEP 249, but the # database driver may support them (e.g. cx_Oracle). if kparams is not None and not self.db.features.supports_callproc_kwargs: raise NotSupportedError( 'Keyword parameters for callproc are not supported on this ' 'database backend.' ) self.db.validate_no_broken_transaction() with self.db.wrap_database_errors: if params is None and kparams is None: return self.cursor.callproc(procname) elif kparams is None: return self.cursor.callproc(procname, params) else: params = params or () return self.cursor.callproc(procname, params, kparams) def execute(self, sql, params=None): return self._execute_with_wrappers(sql, params, many=False, executor=self._execute) def executemany(self, sql, param_list): return self._execute_with_wrappers(sql, param_list, many=True, executor=self._executemany) def _execute_with_wrappers(self, sql, params, many, executor): context = {'connection': self.db, 'cursor': self} for wrapper in reversed(self.db.execute_wrappers): executor = functools.partial(wrapper, executor) return executor(sql, params, many, context) def _execute(self, sql, params, *ignored_wrapper_args): self.db.validate_no_broken_transaction() with self.db.wrap_database_errors: if params is None: # params default might be backend specific. return self.cursor.execute(sql) else: return self.cursor.execute(sql, params) def _executemany(self, sql, param_list, *ignored_wrapper_args): self.db.validate_no_broken_transaction() with self.db.wrap_database_errors: return self.cursor.executemany(sql, param_list) class CursorDebugWrapper(CursorWrapper): # XXX callproc isn't instrumented at this time. def execute(self, sql, params=None): with self.debug_sql(sql, params, use_last_executed_query=True): return super().execute(sql, params) def executemany(self, sql, param_list): with self.debug_sql(sql, param_list, many=True): return super().executemany(sql, param_list) @contextmanager def debug_sql(self, sql=None, params=None, use_last_executed_query=False, many=False): start = time.monotonic() try: yield finally: stop = time.monotonic() duration = stop - start if use_last_executed_query: sql = self.db.ops.last_executed_query(self.cursor, sql, params) try: times = len(params) if many else '' except TypeError: # params could be an iterator. times = '?' self.db.queries_log.append({ 'sql': '%s times: %s' % (times, sql) if many else sql, 'time': '%.3f' % duration, }) logger.debug( '(%.3f) %s; args=%s; alias=%s', duration, sql, params, self.db.alias, extra={'duration': duration, 'sql': sql, 'params': params, 'alias': self.db.alias}, ) ############################################### # Converters from database (string) to Python # ############################################### def typecast_date(s): return datetime.date(*map(int, s.split('-'))) if s else None # return None if s is null def typecast_time(s): # does NOT store time zone information if not s: return None hour, minutes, seconds = s.split(':') if '.' in seconds: # check whether seconds have a fractional part seconds, microseconds = seconds.split('.') else: microseconds = '0' return datetime.time(int(hour), int(minutes), int(seconds), int((microseconds + '000000')[:6])) def typecast_timestamp(s): # does NOT store time zone information # "2005-07-29 15:48:00.590358-05" # "2005-07-29 09:56:00-05" if not s: return None if ' ' not in s: return typecast_date(s) d, t = s.split() # Remove timezone information. if '-' in t: t, _ = t.split('-', 1) elif '+' in t: t, _ = t.split('+', 1) dates = d.split('-') times = t.split(':') seconds = times[2] if '.' in seconds: # check whether seconds have a fractional part seconds, microseconds = seconds.split('.') else: microseconds = '0' return datetime.datetime( int(dates[0]), int(dates[1]), int(dates[2]), int(times[0]), int(times[1]), int(seconds), int((microseconds + '000000')[:6]) ) ############################################### # Converters from Python to database (string) # ############################################### def split_identifier(identifier): """ Split an SQL identifier into a two element tuple of (namespace, name). The identifier could be a table, column, or sequence name might be prefixed by a namespace. """ try: namespace, name = identifier.split('"."') except ValueError: namespace, name = '', identifier return namespace.strip('"'), name.strip('"') def truncate_name(identifier, length=None, hash_len=4): """ Shorten an SQL identifier to a repeatable mangled version with the given length. If a quote stripped name contains a namespace, e.g. USERNAME"."TABLE, truncate the table portion only. """ namespace, name = split_identifier(identifier) if length is None or len(name) <= length: return identifier digest = names_digest(name, length=hash_len) return '%s%s%s' % ('%s"."' % namespace if namespace else '', name[:length - hash_len], digest) def names_digest(*args, length): """ Generate a 32-bit digest of a set of arguments that can be used to shorten identifying names. """ h = hashlib.md5() for arg in args: h.update(arg.encode()) return h.hexdigest()[:length] def format_number(value, max_digits, decimal_places): """ Format a number into a string with the requisite number of digits and decimal places. """ if value is None: return None context = decimal.getcontext().copy() if max_digits is not None: context.prec = max_digits if decimal_places is not None: value = value.quantize(decimal.Decimal(1).scaleb(-decimal_places), context=context) else: context.traps[decimal.Rounded] = 1 value = context.create_decimal(value) return "{:f}".format(value) def strip_quotes(table_name): """ Strip quotes off of quoted table names to make them safe for use in index names, sequence names, etc. For example '"USER"."TABLE"' (an Oracle naming scheme) becomes 'USER"."TABLE'. """ has_quotes = table_name.startswith('"') and table_name.endswith('"') return table_name[1:-1] if has_quotes else table_name
09caa2eb9a019d08af8a903deaa8847ebf88f8921fa5c029e55dc676839e3c88
from django.db import models from django.db.migrations.operations.base import Operation from django.db.migrations.state import ModelState from django.db.migrations.utils import field_references, resolve_relation from django.db.models.options import normalize_together from django.utils.functional import cached_property from .fields import ( AddField, AlterField, FieldOperation, RemoveField, RenameField, ) def _check_for_duplicates(arg_name, objs): used_vals = set() for val in objs: if val in used_vals: raise ValueError( "Found duplicate value %s in CreateModel %s argument." % (val, arg_name) ) used_vals.add(val) class ModelOperation(Operation): def __init__(self, name): self.name = name @cached_property def name_lower(self): return self.name.lower() def references_model(self, name, app_label): return name.lower() == self.name_lower def reduce(self, operation, app_label): return ( super().reduce(operation, app_label) or not operation.references_model(self.name, app_label) ) class CreateModel(ModelOperation): """Create a model's table.""" serialization_expand_args = ['fields', 'options', 'managers'] def __init__(self, name, fields, options=None, bases=None, managers=None): self.fields = fields self.options = options or {} self.bases = bases or (models.Model,) self.managers = managers or [] super().__init__(name) # Sanity-check that there are no duplicated field names, bases, or # manager names _check_for_duplicates('fields', (name for name, _ in self.fields)) _check_for_duplicates('bases', ( base._meta.label_lower if hasattr(base, '_meta') else base.lower() if isinstance(base, str) else base for base in self.bases )) _check_for_duplicates('managers', (name for name, _ in self.managers)) def deconstruct(self): kwargs = { 'name': self.name, 'fields': self.fields, } if self.options: kwargs['options'] = self.options if self.bases and self.bases != (models.Model,): kwargs['bases'] = self.bases if self.managers and self.managers != [('objects', models.Manager())]: kwargs['managers'] = self.managers return ( self.__class__.__qualname__, [], kwargs ) def state_forwards(self, app_label, state): state.add_model(ModelState( app_label, self.name, list(self.fields), dict(self.options), tuple(self.bases), list(self.managers), )) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.create_model(model) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = from_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.delete_model(model) def describe(self): return "Create %smodel %s" % ("proxy " if self.options.get("proxy", False) else "", self.name) @property def migration_name_fragment(self): return self.name_lower def references_model(self, name, app_label): name_lower = name.lower() if name_lower == self.name_lower: return True # Check we didn't inherit from the model reference_model_tuple = (app_label, name_lower) for base in self.bases: if (base is not models.Model and isinstance(base, (models.base.ModelBase, str)) and resolve_relation(base, app_label) == reference_model_tuple): return True # Check we have no FKs/M2Ms with it for _name, field in self.fields: if field_references((app_label, self.name_lower), field, reference_model_tuple): return True return False def reduce(self, operation, app_label): if (isinstance(operation, DeleteModel) and self.name_lower == operation.name_lower and not self.options.get("proxy", False)): return [] elif isinstance(operation, RenameModel) and self.name_lower == operation.old_name_lower: return [ CreateModel( operation.new_name, fields=self.fields, options=self.options, bases=self.bases, managers=self.managers, ), ] elif isinstance(operation, AlterModelOptions) and self.name_lower == operation.name_lower: options = {**self.options, **operation.options} for key in operation.ALTER_OPTION_KEYS: if key not in operation.options: options.pop(key, None) return [ CreateModel( self.name, fields=self.fields, options=options, bases=self.bases, managers=self.managers, ), ] elif isinstance(operation, AlterTogetherOptionOperation) and self.name_lower == operation.name_lower: return [ CreateModel( self.name, fields=self.fields, options={**self.options, **{operation.option_name: operation.option_value}}, bases=self.bases, managers=self.managers, ), ] elif isinstance(operation, AlterOrderWithRespectTo) and self.name_lower == operation.name_lower: return [ CreateModel( self.name, fields=self.fields, options={**self.options, 'order_with_respect_to': operation.order_with_respect_to}, bases=self.bases, managers=self.managers, ), ] elif isinstance(operation, FieldOperation) and self.name_lower == operation.model_name_lower: if isinstance(operation, AddField): return [ CreateModel( self.name, fields=self.fields + [(operation.name, operation.field)], options=self.options, bases=self.bases, managers=self.managers, ), ] elif isinstance(operation, AlterField): return [ CreateModel( self.name, fields=[ (n, operation.field if n == operation.name else v) for n, v in self.fields ], options=self.options, bases=self.bases, managers=self.managers, ), ] elif isinstance(operation, RemoveField): options = self.options.copy() for option_name in ('unique_together', 'index_together'): option = options.pop(option_name, None) if option: option = set(filter(bool, ( tuple(f for f in fields if f != operation.name_lower) for fields in option ))) if option: options[option_name] = option order_with_respect_to = options.get('order_with_respect_to') if order_with_respect_to == operation.name_lower: del options['order_with_respect_to'] return [ CreateModel( self.name, fields=[ (n, v) for n, v in self.fields if n.lower() != operation.name_lower ], options=options, bases=self.bases, managers=self.managers, ), ] elif isinstance(operation, RenameField): options = self.options.copy() for option_name in ('unique_together', 'index_together'): option = options.get(option_name) if option: options[option_name] = { tuple(operation.new_name if f == operation.old_name else f for f in fields) for fields in option } order_with_respect_to = options.get('order_with_respect_to') if order_with_respect_to == operation.old_name: options['order_with_respect_to'] = operation.new_name return [ CreateModel( self.name, fields=[ (operation.new_name if n == operation.old_name else n, v) for n, v in self.fields ], options=options, bases=self.bases, managers=self.managers, ), ] return super().reduce(operation, app_label) class DeleteModel(ModelOperation): """Drop a model's table.""" def deconstruct(self): kwargs = { 'name': self.name, } return ( self.__class__.__qualname__, [], kwargs ) def state_forwards(self, app_label, state): state.remove_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = from_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.delete_model(model) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.create_model(model) def references_model(self, name, app_label): # The deleted model could be referencing the specified model through # related fields. return True def describe(self): return "Delete model %s" % self.name @property def migration_name_fragment(self): return 'delete_%s' % self.name_lower class RenameModel(ModelOperation): """Rename a model.""" def __init__(self, old_name, new_name): self.old_name = old_name self.new_name = new_name super().__init__(old_name) @cached_property def old_name_lower(self): return self.old_name.lower() @cached_property def new_name_lower(self): return self.new_name.lower() def deconstruct(self): kwargs = { 'old_name': self.old_name, 'new_name': self.new_name, } return ( self.__class__.__qualname__, [], kwargs ) def state_forwards(self, app_label, state): state.rename_model(app_label, self.old_name, self.new_name) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.new_name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.old_name) # Move the main table schema_editor.alter_db_table( new_model, old_model._meta.db_table, new_model._meta.db_table, ) # Alter the fields pointing to us for related_object in old_model._meta.related_objects: if related_object.related_model == old_model: model = new_model related_key = (app_label, self.new_name_lower) else: model = related_object.related_model related_key = ( related_object.related_model._meta.app_label, related_object.related_model._meta.model_name, ) to_field = to_state.apps.get_model( *related_key )._meta.get_field(related_object.field.name) schema_editor.alter_field( model, related_object.field, to_field, ) # Rename M2M fields whose name is based on this model's name. fields = zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many) for (old_field, new_field) in fields: # Skip self-referential fields as these are renamed above. if new_field.model == new_field.related_model or not new_field.remote_field.through._meta.auto_created: continue # Rename the M2M table that's based on this model's name. old_m2m_model = old_field.remote_field.through new_m2m_model = new_field.remote_field.through schema_editor.alter_db_table( new_m2m_model, old_m2m_model._meta.db_table, new_m2m_model._meta.db_table, ) # Rename the column in the M2M table that's based on this # model's name. schema_editor.alter_field( new_m2m_model, old_m2m_model._meta.get_field(old_model._meta.model_name), new_m2m_model._meta.get_field(new_model._meta.model_name), ) def database_backwards(self, app_label, schema_editor, from_state, to_state): self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower self.new_name, self.old_name = self.old_name, self.new_name self.database_forwards(app_label, schema_editor, from_state, to_state) self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower self.new_name, self.old_name = self.old_name, self.new_name def references_model(self, name, app_label): return ( name.lower() == self.old_name_lower or name.lower() == self.new_name_lower ) def describe(self): return "Rename model %s to %s" % (self.old_name, self.new_name) @property def migration_name_fragment(self): return 'rename_%s_%s' % (self.old_name_lower, self.new_name_lower) def reduce(self, operation, app_label): if (isinstance(operation, RenameModel) and self.new_name_lower == operation.old_name_lower): return [ RenameModel( self.old_name, operation.new_name, ), ] # Skip `ModelOperation.reduce` as we want to run `references_model` # against self.new_name. return ( super(ModelOperation, self).reduce(operation, app_label) or not operation.references_model(self.new_name, app_label) ) class ModelOptionOperation(ModelOperation): def reduce(self, operation, app_label): if isinstance(operation, (self.__class__, DeleteModel)) and self.name_lower == operation.name_lower: return [operation] return super().reduce(operation, app_label) class AlterModelTable(ModelOptionOperation): """Rename a model's table.""" def __init__(self, name, table): self.table = table super().__init__(name) def deconstruct(self): kwargs = { 'name': self.name, 'table': self.table, } return ( self.__class__.__qualname__, [], kwargs ) def state_forwards(self, app_label, state): state.alter_model_options(app_label, self.name_lower, {'db_table': self.table}) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.name) schema_editor.alter_db_table( new_model, old_model._meta.db_table, new_model._meta.db_table, ) # Rename M2M fields whose name is based on this model's db_table for (old_field, new_field) in zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many): if new_field.remote_field.through._meta.auto_created: schema_editor.alter_db_table( new_field.remote_field.through, old_field.remote_field.through._meta.db_table, new_field.remote_field.through._meta.db_table, ) def database_backwards(self, app_label, schema_editor, from_state, to_state): return self.database_forwards(app_label, schema_editor, from_state, to_state) def describe(self): return "Rename table for %s to %s" % ( self.name, self.table if self.table is not None else "(default)" ) @property def migration_name_fragment(self): return 'alter_%s_table' % self.name_lower class AlterTogetherOptionOperation(ModelOptionOperation): option_name = None def __init__(self, name, option_value): if option_value: option_value = set(normalize_together(option_value)) setattr(self, self.option_name, option_value) super().__init__(name) @cached_property def option_value(self): return getattr(self, self.option_name) def deconstruct(self): kwargs = { 'name': self.name, self.option_name: self.option_value, } return ( self.__class__.__qualname__, [], kwargs ) def state_forwards(self, app_label, state): state.alter_model_options( app_label, self.name_lower, {self.option_name: self.option_value}, ) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.name) alter_together = getattr(schema_editor, 'alter_%s' % self.option_name) alter_together( new_model, getattr(old_model._meta, self.option_name, set()), getattr(new_model._meta, self.option_name, set()), ) def database_backwards(self, app_label, schema_editor, from_state, to_state): return self.database_forwards(app_label, schema_editor, from_state, to_state) def references_field(self, model_name, name, app_label): return ( self.references_model(model_name, app_label) and ( not self.option_value or any((name in fields) for fields in self.option_value) ) ) def describe(self): return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.option_value or '')) @property def migration_name_fragment(self): return 'alter_%s_%s' % (self.name_lower, self.option_name) class AlterUniqueTogether(AlterTogetherOptionOperation): """ Change the value of unique_together to the target one. Input value of unique_together must be a set of tuples. """ option_name = 'unique_together' def __init__(self, name, unique_together): super().__init__(name, unique_together) class AlterIndexTogether(AlterTogetherOptionOperation): """ Change the value of index_together to the target one. Input value of index_together must be a set of tuples. """ option_name = "index_together" def __init__(self, name, index_together): super().__init__(name, index_together) class AlterOrderWithRespectTo(ModelOptionOperation): """Represent a change with the order_with_respect_to option.""" option_name = 'order_with_respect_to' def __init__(self, name, order_with_respect_to): self.order_with_respect_to = order_with_respect_to super().__init__(name) def deconstruct(self): kwargs = { 'name': self.name, 'order_with_respect_to': self.order_with_respect_to, } return ( self.__class__.__qualname__, [], kwargs ) def state_forwards(self, app_label, state): state.alter_model_options( app_label, self.name_lower, {self.option_name: self.order_with_respect_to}, ) def database_forwards(self, app_label, schema_editor, from_state, to_state): to_model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, to_model): from_model = from_state.apps.get_model(app_label, self.name) # Remove a field if we need to if from_model._meta.order_with_respect_to and not to_model._meta.order_with_respect_to: schema_editor.remove_field(from_model, from_model._meta.get_field("_order")) # Add a field if we need to (altering the column is untouched as # it's likely a rename) elif to_model._meta.order_with_respect_to and not from_model._meta.order_with_respect_to: field = to_model._meta.get_field("_order") if not field.has_default(): field.default = 0 schema_editor.add_field( from_model, field, ) def database_backwards(self, app_label, schema_editor, from_state, to_state): self.database_forwards(app_label, schema_editor, from_state, to_state) def references_field(self, model_name, name, app_label): return ( self.references_model(model_name, app_label) and ( self.order_with_respect_to is None or name == self.order_with_respect_to ) ) def describe(self): return "Set order_with_respect_to on %s to %s" % (self.name, self.order_with_respect_to) @property def migration_name_fragment(self): return 'alter_%s_order_with_respect_to' % self.name_lower class AlterModelOptions(ModelOptionOperation): """ Set new model options that don't directly affect the database schema (like verbose_name, permissions, ordering). Python code in migrations may still need them. """ # Model options we want to compare and preserve in an AlterModelOptions op ALTER_OPTION_KEYS = [ "base_manager_name", "default_manager_name", "default_related_name", "get_latest_by", "managed", "ordering", "permissions", "default_permissions", "select_on_save", "verbose_name", "verbose_name_plural", ] def __init__(self, name, options): self.options = options super().__init__(name) def deconstruct(self): kwargs = { 'name': self.name, 'options': self.options, } return ( self.__class__.__qualname__, [], kwargs ) def state_forwards(self, app_label, state): state.alter_model_options( app_label, self.name_lower, self.options, self.ALTER_OPTION_KEYS, ) def database_forwards(self, app_label, schema_editor, from_state, to_state): pass def database_backwards(self, app_label, schema_editor, from_state, to_state): pass def describe(self): return "Change Meta options on %s" % self.name @property def migration_name_fragment(self): return 'alter_%s_options' % self.name_lower class AlterModelManagers(ModelOptionOperation): """Alter the model's managers.""" serialization_expand_args = ['managers'] def __init__(self, name, managers): self.managers = managers super().__init__(name) def deconstruct(self): return ( self.__class__.__qualname__, [self.name, self.managers], {} ) def state_forwards(self, app_label, state): state.alter_model_managers(app_label, self.name_lower, self.managers) def database_forwards(self, app_label, schema_editor, from_state, to_state): pass def database_backwards(self, app_label, schema_editor, from_state, to_state): pass def describe(self): return "Change managers on %s" % self.name @property def migration_name_fragment(self): return 'alter_%s_managers' % self.name_lower class IndexOperation(Operation): option_name = 'indexes' @cached_property def model_name_lower(self): return self.model_name.lower() class AddIndex(IndexOperation): """Add an index on a model.""" def __init__(self, model_name, index): self.model_name = model_name if not index.name: raise ValueError( "Indexes passed to AddIndex operations require a name " "argument. %r doesn't have one." % index ) self.index = index def state_forwards(self, app_label, state): state.add_index(app_label, self.model_name_lower, self.index) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.add_index(model, self.index) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = from_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.remove_index(model, self.index) def deconstruct(self): kwargs = { 'model_name': self.model_name, 'index': self.index, } return ( self.__class__.__qualname__, [], kwargs, ) def describe(self): if self.index.expressions: return 'Create index %s on %s on model %s' % ( self.index.name, ', '.join([str(expression) for expression in self.index.expressions]), self.model_name, ) return 'Create index %s on field(s) %s of model %s' % ( self.index.name, ', '.join(self.index.fields), self.model_name, ) @property def migration_name_fragment(self): return '%s_%s' % (self.model_name_lower, self.index.name.lower()) class RemoveIndex(IndexOperation): """Remove an index from a model.""" def __init__(self, model_name, name): self.model_name = model_name self.name = name def state_forwards(self, app_label, state): state.remove_index(app_label, self.model_name_lower, self.name) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = from_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): from_model_state = from_state.models[app_label, self.model_name_lower] index = from_model_state.get_index_by_name(self.name) schema_editor.remove_index(model, index) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): to_model_state = to_state.models[app_label, self.model_name_lower] index = to_model_state.get_index_by_name(self.name) schema_editor.add_index(model, index) def deconstruct(self): kwargs = { 'model_name': self.model_name, 'name': self.name, } return ( self.__class__.__qualname__, [], kwargs, ) def describe(self): return 'Remove index %s from %s' % (self.name, self.model_name) @property def migration_name_fragment(self): return 'remove_%s_%s' % (self.model_name_lower, self.name.lower()) class AddConstraint(IndexOperation): option_name = 'constraints' def __init__(self, model_name, constraint): self.model_name = model_name self.constraint = constraint def state_forwards(self, app_label, state): state.add_constraint(app_label, self.model_name_lower, self.constraint) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.add_constraint(model, self.constraint) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.remove_constraint(model, self.constraint) def deconstruct(self): return self.__class__.__name__, [], { 'model_name': self.model_name, 'constraint': self.constraint, } def describe(self): return 'Create constraint %s on model %s' % (self.constraint.name, self.model_name) @property def migration_name_fragment(self): return '%s_%s' % (self.model_name_lower, self.constraint.name.lower()) class RemoveConstraint(IndexOperation): option_name = 'constraints' def __init__(self, model_name, name): self.model_name = model_name self.name = name def state_forwards(self, app_label, state): state.remove_constraint(app_label, self.model_name_lower, self.name) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): from_model_state = from_state.models[app_label, self.model_name_lower] constraint = from_model_state.get_constraint_by_name(self.name) schema_editor.remove_constraint(model, constraint) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): to_model_state = to_state.models[app_label, self.model_name_lower] constraint = to_model_state.get_constraint_by_name(self.name) schema_editor.add_constraint(model, constraint) def deconstruct(self): return self.__class__.__name__, [], { 'model_name': self.model_name, 'name': self.name, } def describe(self): return 'Remove constraint %s from model %s' % (self.name, self.model_name) @property def migration_name_fragment(self): return 'remove_%s_%s' % (self.model_name_lower, self.name.lower())
8dc47ee887cb834a5109dca617abdfc22c42642e612210aea2b54b839607138d
from django.db.migrations.utils import field_references from django.db.models import NOT_PROVIDED from django.utils.functional import cached_property from .base import Operation class FieldOperation(Operation): def __init__(self, model_name, name, field=None): self.model_name = model_name self.name = name self.field = field @cached_property def model_name_lower(self): return self.model_name.lower() @cached_property def name_lower(self): return self.name.lower() def is_same_model_operation(self, operation): return self.model_name_lower == operation.model_name_lower def is_same_field_operation(self, operation): return self.is_same_model_operation(operation) and self.name_lower == operation.name_lower def references_model(self, name, app_label): name_lower = name.lower() if name_lower == self.model_name_lower: return True if self.field: return bool(field_references( (app_label, self.model_name_lower), self.field, (app_label, name_lower) )) return False def references_field(self, model_name, name, app_label): model_name_lower = model_name.lower() # Check if this operation locally references the field. if model_name_lower == self.model_name_lower: if name == self.name: return True elif self.field and hasattr(self.field, 'from_fields') and name in self.field.from_fields: return True # Check if this operation remotely references the field. if self.field is None: return False return bool(field_references( (app_label, self.model_name_lower), self.field, (app_label, model_name_lower), name, )) def reduce(self, operation, app_label): return ( super().reduce(operation, app_label) or not operation.references_field(self.model_name, self.name, app_label) ) class AddField(FieldOperation): """Add a field to a model.""" def __init__(self, model_name, name, field, preserve_default=True): self.preserve_default = preserve_default super().__init__(model_name, name, field) def deconstruct(self): kwargs = { 'model_name': self.model_name, 'name': self.name, 'field': self.field, } if self.preserve_default is not True: kwargs['preserve_default'] = self.preserve_default return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): state.add_field( app_label, self.model_name_lower, self.name, self.field, self.preserve_default, ) def database_forwards(self, app_label, schema_editor, from_state, to_state): to_model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, to_model): from_model = from_state.apps.get_model(app_label, self.model_name) field = to_model._meta.get_field(self.name) if not self.preserve_default: field.default = self.field.default schema_editor.add_field( from_model, field, ) if not self.preserve_default: field.default = NOT_PROVIDED def database_backwards(self, app_label, schema_editor, from_state, to_state): from_model = from_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, from_model): schema_editor.remove_field(from_model, from_model._meta.get_field(self.name)) def describe(self): return "Add field %s to %s" % (self.name, self.model_name) @property def migration_name_fragment(self): return '%s_%s' % (self.model_name_lower, self.name_lower) def reduce(self, operation, app_label): if isinstance(operation, FieldOperation) and self.is_same_field_operation(operation): if isinstance(operation, AlterField): return [ AddField( model_name=self.model_name, name=operation.name, field=operation.field, ), ] elif isinstance(operation, RemoveField): return [] elif isinstance(operation, RenameField): return [ AddField( model_name=self.model_name, name=operation.new_name, field=self.field, ), ] return super().reduce(operation, app_label) class RemoveField(FieldOperation): """Remove a field from a model.""" def deconstruct(self): kwargs = { 'model_name': self.model_name, 'name': self.name, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): state.remove_field(app_label, self.model_name_lower, self.name) def database_forwards(self, app_label, schema_editor, from_state, to_state): from_model = from_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, from_model): schema_editor.remove_field(from_model, from_model._meta.get_field(self.name)) def database_backwards(self, app_label, schema_editor, from_state, to_state): to_model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, to_model): from_model = from_state.apps.get_model(app_label, self.model_name) schema_editor.add_field(from_model, to_model._meta.get_field(self.name)) def describe(self): return "Remove field %s from %s" % (self.name, self.model_name) @property def migration_name_fragment(self): return 'remove_%s_%s' % (self.model_name_lower, self.name_lower) def reduce(self, operation, app_label): from .models import DeleteModel if isinstance(operation, DeleteModel) and operation.name_lower == self.model_name_lower: return [operation] return super().reduce(operation, app_label) class AlterField(FieldOperation): """ Alter a field's database column (e.g. null, max_length) to the provided new field. """ def __init__(self, model_name, name, field, preserve_default=True): self.preserve_default = preserve_default super().__init__(model_name, name, field) def deconstruct(self): kwargs = { 'model_name': self.model_name, 'name': self.name, 'field': self.field, } if self.preserve_default is not True: kwargs['preserve_default'] = self.preserve_default return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): state.alter_field( app_label, self.model_name_lower, self.name, self.field, self.preserve_default, ) def database_forwards(self, app_label, schema_editor, from_state, to_state): to_model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, to_model): from_model = from_state.apps.get_model(app_label, self.model_name) from_field = from_model._meta.get_field(self.name) to_field = to_model._meta.get_field(self.name) if not self.preserve_default: to_field.default = self.field.default schema_editor.alter_field(from_model, from_field, to_field) if not self.preserve_default: to_field.default = NOT_PROVIDED def database_backwards(self, app_label, schema_editor, from_state, to_state): self.database_forwards(app_label, schema_editor, from_state, to_state) def describe(self): return "Alter field %s on %s" % (self.name, self.model_name) @property def migration_name_fragment(self): return 'alter_%s_%s' % (self.model_name_lower, self.name_lower) def reduce(self, operation, app_label): if isinstance(operation, RemoveField) and self.is_same_field_operation(operation): return [operation] elif isinstance(operation, RenameField) and self.is_same_field_operation(operation): return [ operation, AlterField( model_name=self.model_name, name=operation.new_name, field=self.field, ), ] return super().reduce(operation, app_label) class RenameField(FieldOperation): """Rename a field on the model. Might affect db_column too.""" def __init__(self, model_name, old_name, new_name): self.old_name = old_name self.new_name = new_name super().__init__(model_name, old_name) @cached_property def old_name_lower(self): return self.old_name.lower() @cached_property def new_name_lower(self): return self.new_name.lower() def deconstruct(self): kwargs = { 'model_name': self.model_name, 'old_name': self.old_name, 'new_name': self.new_name, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): state.rename_field(app_label, self.model_name_lower, self.old_name, self.new_name) def database_forwards(self, app_label, schema_editor, from_state, to_state): to_model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, to_model): from_model = from_state.apps.get_model(app_label, self.model_name) schema_editor.alter_field( from_model, from_model._meta.get_field(self.old_name), to_model._meta.get_field(self.new_name), ) def database_backwards(self, app_label, schema_editor, from_state, to_state): to_model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, to_model): from_model = from_state.apps.get_model(app_label, self.model_name) schema_editor.alter_field( from_model, from_model._meta.get_field(self.new_name), to_model._meta.get_field(self.old_name), ) def describe(self): return "Rename field %s on %s to %s" % (self.old_name, self.model_name, self.new_name) @property def migration_name_fragment(self): return 'rename_%s_%s_%s' % ( self.old_name_lower, self.model_name_lower, self.new_name_lower, ) def references_field(self, model_name, name, app_label): return self.references_model(model_name, app_label) and ( name.lower() == self.old_name_lower or name.lower() == self.new_name_lower ) def reduce(self, operation, app_label): if (isinstance(operation, RenameField) and self.is_same_model_operation(operation) and self.new_name_lower == operation.old_name_lower): return [ RenameField( self.model_name, self.old_name, operation.new_name, ), ] # Skip `FieldOperation.reduce` as we want to run `references_field` # against self.old_name and self.new_name. return ( super(FieldOperation, self).reduce(operation, app_label) or not ( operation.references_field(self.model_name, self.old_name, app_label) or operation.references_field(self.model_name, self.new_name, app_label) ) )
8fd41b5dbe7b7f485dc9ed9d45725011b4e9ae484f1be96759a2e9558e3288d3
from django.db.models.lookups import ( Exact, GreaterThan, GreaterThanOrEqual, In, IsNull, LessThan, LessThanOrEqual, ) class MultiColSource: contains_aggregate = False def __init__(self, alias, targets, sources, field): self.targets, self.sources, self.field, self.alias = targets, sources, field, alias self.output_field = self.field def __repr__(self): return "{}({}, {})".format( self.__class__.__name__, self.alias, self.field) def relabeled_clone(self, relabels): return self.__class__(relabels.get(self.alias, self.alias), self.targets, self.sources, self.field) def get_lookup(self, lookup): return self.output_field.get_lookup(lookup) def resolve_expression(self, *args, **kwargs): return self def get_normalized_value(value, lhs): from django.db.models import Model if isinstance(value, Model): value_list = [] sources = lhs.output_field.get_path_info()[-1].target_fields for source in sources: while not isinstance(value, source.model) and source.remote_field: source = source.remote_field.model._meta.get_field(source.remote_field.field_name) try: value_list.append(getattr(value, source.attname)) except AttributeError: # A case like Restaurant.objects.filter(place=restaurant_instance), # where place is a OneToOneField and the primary key of Restaurant. return (value.pk,) return tuple(value_list) if not isinstance(value, tuple): return (value,) return value class RelatedIn(In): def get_prep_lookup(self): if not isinstance(self.lhs, MultiColSource) and self.rhs_is_direct_value(): # If we get here, we are dealing with single-column relations. self.rhs = [get_normalized_value(val, self.lhs)[0] for val in self.rhs] # We need to run the related field's get_prep_value(). Consider case # ForeignKey to IntegerField given value 'abc'. The ForeignKey itself # doesn't have validation for non-integers, so we must run validation # using the target field. if hasattr(self.lhs.output_field, 'get_path_info'): # Run the target field's get_prep_value. We can safely assume there is # only one as we don't get to the direct value branch otherwise. target_field = self.lhs.output_field.get_path_info()[-1].target_fields[-1] self.rhs = [target_field.get_prep_value(v) for v in self.rhs] return super().get_prep_lookup() def as_sql(self, compiler, connection): if isinstance(self.lhs, MultiColSource): # For multicolumn lookups we need to build a multicolumn where clause. # This clause is either a SubqueryConstraint (for values that need to be compiled to # SQL) or an OR-combined list of (col1 = val1 AND col2 = val2 AND ...) clauses. from django.db.models.sql.where import ( AND, OR, SubqueryConstraint, WhereNode, ) root_constraint = WhereNode(connector=OR) if self.rhs_is_direct_value(): values = [get_normalized_value(value, self.lhs) for value in self.rhs] for value in values: value_constraint = WhereNode() for source, target, val in zip(self.lhs.sources, self.lhs.targets, value): lookup_class = target.get_lookup('exact') lookup = lookup_class(target.get_col(self.lhs.alias, source), val) value_constraint.add(lookup, AND) root_constraint.add(value_constraint, OR) else: root_constraint.add( SubqueryConstraint( self.lhs.alias, [target.column for target in self.lhs.targets], [source.name for source in self.lhs.sources], self.rhs), AND) return root_constraint.as_sql(compiler, connection) else: if (not getattr(self.rhs, 'has_select_fields', True) and not getattr(self.lhs.field.target_field, 'primary_key', False)): self.rhs.clear_select_clause() if (getattr(self.lhs.output_field, 'primary_key', False) and self.lhs.output_field.model == self.rhs.model): # A case like Restaurant.objects.filter(place__in=restaurant_qs), # where place is a OneToOneField and the primary key of # Restaurant. target_field = self.lhs.field.name else: target_field = self.lhs.field.target_field.name self.rhs.add_fields([target_field], True) return super().as_sql(compiler, connection) class RelatedLookupMixin: def get_prep_lookup(self): if not isinstance(self.lhs, MultiColSource) and not hasattr(self.rhs, 'resolve_expression'): # If we get here, we are dealing with single-column relations. self.rhs = get_normalized_value(self.rhs, self.lhs)[0] # We need to run the related field's get_prep_value(). Consider case # ForeignKey to IntegerField given value 'abc'. The ForeignKey itself # doesn't have validation for non-integers, so we must run validation # using the target field. if self.prepare_rhs and hasattr(self.lhs.output_field, 'get_path_info'): # Get the target field. We can safely assume there is only one # as we don't get to the direct value branch otherwise. target_field = self.lhs.output_field.get_path_info()[-1].target_fields[-1] self.rhs = target_field.get_prep_value(self.rhs) return super().get_prep_lookup() def as_sql(self, compiler, connection): if isinstance(self.lhs, MultiColSource): assert self.rhs_is_direct_value() self.rhs = get_normalized_value(self.rhs, self.lhs) from django.db.models.sql.where import AND, WhereNode root_constraint = WhereNode() for target, source, val in zip(self.lhs.targets, self.lhs.sources, self.rhs): lookup_class = target.get_lookup(self.lookup_name) root_constraint.add( lookup_class(target.get_col(self.lhs.alias, source), val), AND) return root_constraint.as_sql(compiler, connection) return super().as_sql(compiler, connection) class RelatedExact(RelatedLookupMixin, Exact): pass class RelatedLessThan(RelatedLookupMixin, LessThan): pass class RelatedGreaterThan(RelatedLookupMixin, GreaterThan): pass class RelatedGreaterThanOrEqual(RelatedLookupMixin, GreaterThanOrEqual): pass class RelatedLessThanOrEqual(RelatedLookupMixin, LessThanOrEqual): pass class RelatedIsNull(RelatedLookupMixin, IsNull): pass
1009e42a3c5bbb8a563cf80a05ac2f2dfc734a7073f9634437ac39024cbc46b9
import collections.abc import copy import datetime import decimal import operator import uuid import warnings from base64 import b64decode, b64encode from functools import partialmethod, total_ordering from django import forms from django.apps import apps from django.conf import settings from django.core import checks, exceptions, validators from django.db import connection, connections, router from django.db.models.constants import LOOKUP_SEP from django.db.models.query_utils import DeferredAttribute, RegisterLookupMixin from django.utils import timezone from django.utils.datastructures import DictWrapper from django.utils.dateparse import ( parse_date, parse_datetime, parse_duration, parse_time, ) from django.utils.duration import duration_microseconds, duration_string from django.utils.functional import Promise, cached_property from django.utils.ipv6 import clean_ipv6_address from django.utils.itercompat import is_iterable from django.utils.text import capfirst from django.utils.translation import gettext_lazy as _ __all__ = [ 'AutoField', 'BLANK_CHOICE_DASH', 'BigAutoField', 'BigIntegerField', 'BinaryField', 'BooleanField', 'CharField', 'CommaSeparatedIntegerField', 'DateField', 'DateTimeField', 'DecimalField', 'DurationField', 'EmailField', 'Empty', 'Field', 'FilePathField', 'FloatField', 'GenericIPAddressField', 'IPAddressField', 'IntegerField', 'NOT_PROVIDED', 'NullBooleanField', 'PositiveBigIntegerField', 'PositiveIntegerField', 'PositiveSmallIntegerField', 'SlugField', 'SmallAutoField', 'SmallIntegerField', 'TextField', 'TimeField', 'URLField', 'UUIDField', ] class Empty: pass class NOT_PROVIDED: pass # The values to use for "blank" in SelectFields. Will be appended to the start # of most "choices" lists. BLANK_CHOICE_DASH = [("", "---------")] def _load_field(app_label, model_name, field_name): return apps.get_model(app_label, model_name)._meta.get_field(field_name) # A guide to Field parameters: # # * name: The name of the field specified in the model. # * attname: The attribute to use on the model object. This is the same as # "name", except in the case of ForeignKeys, where "_id" is # appended. # * db_column: The db_column specified in the model (or None). # * column: The database column for this field. This is the same as # "attname", except if db_column is specified. # # Code that introspects values, or does other dynamic things, should use # attname. For example, this gets the primary key value of object "obj": # # getattr(obj, opts.pk.attname) def _empty(of_cls): new = Empty() new.__class__ = of_cls return new def return_None(): return None @total_ordering class Field(RegisterLookupMixin): """Base class for all field types""" # Designates whether empty strings fundamentally are allowed at the # database level. empty_strings_allowed = True empty_values = list(validators.EMPTY_VALUES) # These track each time a Field instance is created. Used to retain order. # The auto_creation_counter is used for fields that Django implicitly # creates, creation_counter is used for all user-specified fields. creation_counter = 0 auto_creation_counter = -1 default_validators = [] # Default set of validators default_error_messages = { 'invalid_choice': _('Value %(value)r is not a valid choice.'), 'null': _('This field cannot be null.'), 'blank': _('This field cannot be blank.'), 'unique': _('%(model_name)s with this %(field_label)s ' 'already exists.'), # Translators: The 'lookup_type' is one of 'date', 'year' or 'month'. # Eg: "Title must be unique for pub_date year" 'unique_for_date': _("%(field_label)s must be unique for " "%(date_field_label)s %(lookup_type)s."), } system_check_deprecated_details = None system_check_removed_details = None # Field flags hidden = False many_to_many = None many_to_one = None one_to_many = None one_to_one = None related_model = None descriptor_class = DeferredAttribute # Generic field type description, usually overridden by subclasses def _description(self): return _('Field of type: %(field_type)s') % { 'field_type': self.__class__.__name__ } description = property(_description) def __init__(self, verbose_name=None, name=None, primary_key=False, max_length=None, unique=False, blank=False, null=False, db_index=False, rel=None, default=NOT_PROVIDED, editable=True, serialize=True, unique_for_date=None, unique_for_month=None, unique_for_year=None, choices=None, help_text='', db_column=None, db_tablespace=None, auto_created=False, validators=(), error_messages=None): self.name = name self.verbose_name = verbose_name # May be set by set_attributes_from_name self._verbose_name = verbose_name # Store original for deconstruction self.primary_key = primary_key self.max_length, self._unique = max_length, unique self.blank, self.null = blank, null self.remote_field = rel self.is_relation = self.remote_field is not None self.default = default self.editable = editable self.serialize = serialize self.unique_for_date = unique_for_date self.unique_for_month = unique_for_month self.unique_for_year = unique_for_year if isinstance(choices, collections.abc.Iterator): choices = list(choices) self.choices = choices self.help_text = help_text self.db_index = db_index self.db_column = db_column self._db_tablespace = db_tablespace self.auto_created = auto_created # Adjust the appropriate creation counter, and save our local copy. if auto_created: self.creation_counter = Field.auto_creation_counter Field.auto_creation_counter -= 1 else: self.creation_counter = Field.creation_counter Field.creation_counter += 1 self._validators = list(validators) # Store for deconstruction later messages = {} for c in reversed(self.__class__.__mro__): messages.update(getattr(c, 'default_error_messages', {})) messages.update(error_messages or {}) self._error_messages = error_messages # Store for deconstruction later self.error_messages = messages def __str__(self): """ Return "app_label.model_label.field_name" for fields attached to models. """ if not hasattr(self, 'model'): return super().__str__() model = self.model return '%s.%s' % (model._meta.label, self.name) def __repr__(self): """Display the module, class, and name of the field.""" path = '%s.%s' % (self.__class__.__module__, self.__class__.__qualname__) name = getattr(self, 'name', None) if name is not None: return '<%s: %s>' % (path, name) return '<%s>' % path def check(self, **kwargs): return [ *self._check_field_name(), *self._check_choices(), *self._check_db_index(), *self._check_null_allowed_for_primary_keys(), *self._check_backend_specific_checks(**kwargs), *self._check_validators(), *self._check_deprecation_details(), ] def _check_field_name(self): """ Check if field name is valid, i.e. 1) does not end with an underscore, 2) does not contain "__" and 3) is not "pk". """ if self.name.endswith('_'): return [ checks.Error( 'Field names must not end with an underscore.', obj=self, id='fields.E001', ) ] elif LOOKUP_SEP in self.name: return [ checks.Error( 'Field names must not contain "%s".' % LOOKUP_SEP, obj=self, id='fields.E002', ) ] elif self.name == 'pk': return [ checks.Error( "'pk' is a reserved word that cannot be used as a field name.", obj=self, id='fields.E003', ) ] else: return [] @classmethod def _choices_is_value(cls, value): return isinstance(value, (str, Promise)) or not is_iterable(value) def _check_choices(self): if not self.choices: return [] if not is_iterable(self.choices) or isinstance(self.choices, str): return [ checks.Error( "'choices' must be an iterable (e.g., a list or tuple).", obj=self, id='fields.E004', ) ] choice_max_length = 0 # Expect [group_name, [value, display]] for choices_group in self.choices: try: group_name, group_choices = choices_group except (TypeError, ValueError): # Containing non-pairs break try: if not all( self._choices_is_value(value) and self._choices_is_value(human_name) for value, human_name in group_choices ): break if self.max_length is not None and group_choices: choice_max_length = max([ choice_max_length, *(len(value) for value, _ in group_choices if isinstance(value, str)), ]) except (TypeError, ValueError): # No groups, choices in the form [value, display] value, human_name = group_name, group_choices if not self._choices_is_value(value) or not self._choices_is_value(human_name): break if self.max_length is not None and isinstance(value, str): choice_max_length = max(choice_max_length, len(value)) # Special case: choices=['ab'] if isinstance(choices_group, str): break else: if self.max_length is not None and choice_max_length > self.max_length: return [ checks.Error( "'max_length' is too small to fit the longest value " "in 'choices' (%d characters)." % choice_max_length, obj=self, id='fields.E009', ), ] return [] return [ checks.Error( "'choices' must be an iterable containing " "(actual value, human readable name) tuples.", obj=self, id='fields.E005', ) ] def _check_db_index(self): if self.db_index not in (None, True, False): return [ checks.Error( "'db_index' must be None, True or False.", obj=self, id='fields.E006', ) ] else: return [] def _check_null_allowed_for_primary_keys(self): if (self.primary_key and self.null and not connection.features.interprets_empty_strings_as_nulls): # We cannot reliably check this for backends like Oracle which # consider NULL and '' to be equal (and thus set up # character-based fields a little differently). return [ checks.Error( 'Primary keys must not have null=True.', hint=('Set null=False on the field, or ' 'remove primary_key=True argument.'), obj=self, id='fields.E007', ) ] else: return [] def _check_backend_specific_checks(self, databases=None, **kwargs): if databases is None: return [] app_label = self.model._meta.app_label errors = [] for alias in databases: if router.allow_migrate(alias, app_label, model_name=self.model._meta.model_name): errors.extend(connections[alias].validation.check_field(self, **kwargs)) return errors def _check_validators(self): errors = [] for i, validator in enumerate(self.validators): if not callable(validator): errors.append( checks.Error( "All 'validators' must be callable.", hint=( "validators[{i}] ({repr}) isn't a function or " "instance of a validator class.".format( i=i, repr=repr(validator), ) ), obj=self, id='fields.E008', ) ) return errors def _check_deprecation_details(self): if self.system_check_removed_details is not None: return [ checks.Error( self.system_check_removed_details.get( 'msg', '%s has been removed except for support in historical ' 'migrations.' % self.__class__.__name__ ), hint=self.system_check_removed_details.get('hint'), obj=self, id=self.system_check_removed_details.get('id', 'fields.EXXX'), ) ] elif self.system_check_deprecated_details is not None: return [ checks.Warning( self.system_check_deprecated_details.get( 'msg', '%s has been deprecated.' % self.__class__.__name__ ), hint=self.system_check_deprecated_details.get('hint'), obj=self, id=self.system_check_deprecated_details.get('id', 'fields.WXXX'), ) ] return [] def get_col(self, alias, output_field=None): if ( alias == self.model._meta.db_table and (output_field is None or output_field == self) ): return self.cached_col from django.db.models.expressions import Col return Col(alias, self, output_field) @cached_property def cached_col(self): from django.db.models.expressions import Col return Col(self.model._meta.db_table, self) def select_format(self, compiler, sql, params): """ Custom format for select clauses. For example, GIS columns need to be selected as AsText(table.col) on MySQL as the table.col data can't be used by Django. """ return sql, params def deconstruct(self): """ Return enough information to recreate the field as a 4-tuple: * The name of the field on the model, if contribute_to_class() has been run. * The import path of the field, including the class, e.g. django.db.models.IntegerField. This should be the most portable version, so less specific may be better. * A list of positional arguments. * A dict of keyword arguments. Note that the positional or keyword arguments must contain values of the following types (including inner values of collection types): * None, bool, str, int, float, complex, set, frozenset, list, tuple, dict * UUID * datetime.datetime (naive), datetime.date * top-level classes, top-level functions - will be referenced by their full import path * Storage instances - these have their own deconstruct() method This is because the values here must be serialized into a text format (possibly new Python code, possibly JSON) and these are the only types with encoding handlers defined. There's no need to return the exact way the field was instantiated this time, just ensure that the resulting field is the same - prefer keyword arguments over positional ones, and omit parameters with their default values. """ # Short-form way of fetching all the default parameters keywords = {} possibles = { "verbose_name": None, "primary_key": False, "max_length": None, "unique": False, "blank": False, "null": False, "db_index": False, "default": NOT_PROVIDED, "editable": True, "serialize": True, "unique_for_date": None, "unique_for_month": None, "unique_for_year": None, "choices": None, "help_text": '', "db_column": None, "db_tablespace": None, "auto_created": False, "validators": [], "error_messages": None, } attr_overrides = { "unique": "_unique", "error_messages": "_error_messages", "validators": "_validators", "verbose_name": "_verbose_name", "db_tablespace": "_db_tablespace", } equals_comparison = {"choices", "validators"} for name, default in possibles.items(): value = getattr(self, attr_overrides.get(name, name)) # Unroll anything iterable for choices into a concrete list if name == "choices" and isinstance(value, collections.abc.Iterable): value = list(value) # Do correct kind of comparison if name in equals_comparison: if value != default: keywords[name] = value else: if value is not default: keywords[name] = value # Work out path - we shorten it for known Django core fields path = "%s.%s" % (self.__class__.__module__, self.__class__.__qualname__) if path.startswith("django.db.models.fields.related"): path = path.replace("django.db.models.fields.related", "django.db.models") elif path.startswith("django.db.models.fields.files"): path = path.replace("django.db.models.fields.files", "django.db.models") elif path.startswith('django.db.models.fields.json'): path = path.replace('django.db.models.fields.json', 'django.db.models') elif path.startswith("django.db.models.fields.proxy"): path = path.replace("django.db.models.fields.proxy", "django.db.models") elif path.startswith("django.db.models.fields"): path = path.replace("django.db.models.fields", "django.db.models") # Return basic info - other fields should override this. return (self.name, path, [], keywords) def clone(self): """ Uses deconstruct() to clone a new copy of this Field. Will not preserve any class attachments/attribute names. """ name, path, args, kwargs = self.deconstruct() return self.__class__(*args, **kwargs) def __eq__(self, other): # Needed for @total_ordering if isinstance(other, Field): return ( self.creation_counter == other.creation_counter and getattr(self, 'model', None) == getattr(other, 'model', None) ) return NotImplemented def __lt__(self, other): # This is needed because bisect does not take a comparison function. # Order by creation_counter first for backward compatibility. if isinstance(other, Field): if ( self.creation_counter != other.creation_counter or not hasattr(self, 'model') and not hasattr(other, 'model') ): return self.creation_counter < other.creation_counter elif hasattr(self, 'model') != hasattr(other, 'model'): return not hasattr(self, 'model') # Order no-model fields first else: # creation_counter's are equal, compare only models. return ( (self.model._meta.app_label, self.model._meta.model_name) < (other.model._meta.app_label, other.model._meta.model_name) ) return NotImplemented def __hash__(self): return hash(( self.creation_counter, self.model._meta.app_label if hasattr(self, 'model') else None, self.model._meta.model_name if hasattr(self, 'model') else None, )) def __deepcopy__(self, memodict): # We don't have to deepcopy very much here, since most things are not # intended to be altered after initial creation. obj = copy.copy(self) if self.remote_field: obj.remote_field = copy.copy(self.remote_field) if hasattr(self.remote_field, 'field') and self.remote_field.field is self: obj.remote_field.field = obj memodict[id(self)] = obj return obj def __copy__(self): # We need to avoid hitting __reduce__, so define this # slightly weird copy construct. obj = Empty() obj.__class__ = self.__class__ obj.__dict__ = self.__dict__.copy() return obj def __reduce__(self): """ Pickling should return the model._meta.fields instance of the field, not a new copy of that field. So, use the app registry to load the model and then the field back. """ if not hasattr(self, 'model'): # Fields are sometimes used without attaching them to models (for # example in aggregation). In this case give back a plain field # instance. The code below will create a new empty instance of # class self.__class__, then update its dict with self.__dict__ # values - so, this is very close to normal pickle. state = self.__dict__.copy() # The _get_default cached_property can't be pickled due to lambda # usage. state.pop('_get_default', None) return _empty, (self.__class__,), state return _load_field, (self.model._meta.app_label, self.model._meta.object_name, self.name) def get_pk_value_on_save(self, instance): """ Hook to generate new PK values on save. This method is called when saving instances with no primary key value set. If this method returns something else than None, then the returned value is used when saving the new instance. """ if self.default: return self.get_default() return None def to_python(self, value): """ Convert the input value into the expected Python data type, raising django.core.exceptions.ValidationError if the data can't be converted. Return the converted value. Subclasses should override this. """ return value @cached_property def validators(self): """ Some validators can't be created at field initialization time. This method provides a way to delay their creation until required. """ return [*self.default_validators, *self._validators] def run_validators(self, value): if value in self.empty_values: return errors = [] for v in self.validators: try: v(value) except exceptions.ValidationError as e: if hasattr(e, 'code') and e.code in self.error_messages: e.message = self.error_messages[e.code] errors.extend(e.error_list) if errors: raise exceptions.ValidationError(errors) def validate(self, value, model_instance): """ Validate value and raise ValidationError if necessary. Subclasses should override this to provide validation logic. """ if not self.editable: # Skip validation for non-editable fields. return if self.choices is not None and value not in self.empty_values: for option_key, option_value in self.choices: if isinstance(option_value, (list, tuple)): # This is an optgroup, so look inside the group for # options. for optgroup_key, optgroup_value in option_value: if value == optgroup_key: return elif value == option_key: return raise exceptions.ValidationError( self.error_messages['invalid_choice'], code='invalid_choice', params={'value': value}, ) if value is None and not self.null: raise exceptions.ValidationError(self.error_messages['null'], code='null') if not self.blank and value in self.empty_values: raise exceptions.ValidationError(self.error_messages['blank'], code='blank') def clean(self, value, model_instance): """ Convert the value's type and run validation. Validation errors from to_python() and validate() are propagated. Return the correct value if no error is raised. """ value = self.to_python(value) self.validate(value, model_instance) self.run_validators(value) return value def db_type_parameters(self, connection): return DictWrapper(self.__dict__, connection.ops.quote_name, 'qn_') def db_check(self, connection): """ Return the database column check constraint for this field, for the provided connection. Works the same way as db_type() for the case that get_internal_type() does not map to a preexisting model field. """ data = self.db_type_parameters(connection) try: return connection.data_type_check_constraints[self.get_internal_type()] % data except KeyError: return None def db_type(self, connection): """ Return the database column data type for this field, for the provided connection. """ # The default implementation of this method looks at the # backend-specific data_types dictionary, looking up the field by its # "internal type". # # A Field class can implement the get_internal_type() method to specify # which *preexisting* Django Field class it's most similar to -- i.e., # a custom field might be represented by a TEXT column type, which is # the same as the TextField Django field type, which means the custom # field's get_internal_type() returns 'TextField'. # # But the limitation of the get_internal_type() / data_types approach # is that it cannot handle database column types that aren't already # mapped to one of the built-in Django field types. In this case, you # can implement db_type() instead of get_internal_type() to specify # exactly which wacky database column type you want to use. data = self.db_type_parameters(connection) try: return connection.data_types[self.get_internal_type()] % data except KeyError: return None def rel_db_type(self, connection): """ Return the data type that a related field pointing to this field should use. For example, this method is called by ForeignKey and OneToOneField to determine its data type. """ return self.db_type(connection) def cast_db_type(self, connection): """Return the data type to use in the Cast() function.""" db_type = connection.ops.cast_data_types.get(self.get_internal_type()) if db_type: return db_type % self.db_type_parameters(connection) return self.db_type(connection) def db_parameters(self, connection): """ Extension of db_type(), providing a range of different return values (type, checks). This will look at db_type(), allowing custom model fields to override it. """ type_string = self.db_type(connection) check_string = self.db_check(connection) return { "type": type_string, "check": check_string, } def db_type_suffix(self, connection): return connection.data_types_suffix.get(self.get_internal_type()) def get_db_converters(self, connection): if hasattr(self, 'from_db_value'): return [self.from_db_value] return [] @property def unique(self): return self._unique or self.primary_key @property def db_tablespace(self): return self._db_tablespace or settings.DEFAULT_INDEX_TABLESPACE @property def db_returning(self): """ Private API intended only to be used by Django itself. Currently only the PostgreSQL backend supports returning multiple fields on a model. """ return False def set_attributes_from_name(self, name): self.name = self.name or name self.attname, self.column = self.get_attname_column() self.concrete = self.column is not None if self.verbose_name is None and self.name: self.verbose_name = self.name.replace('_', ' ') def contribute_to_class(self, cls, name, private_only=False): """ Register the field with the model class it belongs to. If private_only is True, create a separate instance of this field for every subclass of cls, even if cls is not an abstract model. """ self.set_attributes_from_name(name) self.model = cls cls._meta.add_field(self, private=private_only) if self.column: setattr(cls, self.attname, self.descriptor_class(self)) if self.choices is not None: # Don't override a get_FOO_display() method defined explicitly on # this class, but don't check methods derived from inheritance, to # allow overriding inherited choices. For more complex inheritance # structures users should override contribute_to_class(). if 'get_%s_display' % self.name not in cls.__dict__: setattr( cls, 'get_%s_display' % self.name, partialmethod(cls._get_FIELD_display, field=self), ) def get_filter_kwargs_for_object(self, obj): """ Return a dict that when passed as kwargs to self.model.filter(), would yield all instances having the same value for this field as obj has. """ return {self.name: getattr(obj, self.attname)} def get_attname(self): return self.name def get_attname_column(self): attname = self.get_attname() column = self.db_column or attname return attname, column def get_internal_type(self): return self.__class__.__name__ def pre_save(self, model_instance, add): """Return field's value just before saving.""" return getattr(model_instance, self.attname) def get_prep_value(self, value): """Perform preliminary non-db specific value checks and conversions.""" if isinstance(value, Promise): value = value._proxy____cast() return value def get_db_prep_value(self, value, connection, prepared=False): """ Return field's value prepared for interacting with the database backend. Used by the default implementations of get_db_prep_save(). """ if not prepared: value = self.get_prep_value(value) return value def get_db_prep_save(self, value, connection): """Return field's value prepared for saving into a database.""" return self.get_db_prep_value(value, connection=connection, prepared=False) def has_default(self): """Return a boolean of whether this field has a default value.""" return self.default is not NOT_PROVIDED def get_default(self): """Return the default value for this field.""" return self._get_default() @cached_property def _get_default(self): if self.has_default(): if callable(self.default): return self.default return lambda: self.default if not self.empty_strings_allowed or self.null and not connection.features.interprets_empty_strings_as_nulls: return return_None return str # return empty string def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None, ordering=()): """ Return choices with a default blank choices included, for use as <select> choices for this field. """ if self.choices is not None: choices = list(self.choices) if include_blank: blank_defined = any(choice in ('', None) for choice, _ in self.flatchoices) if not blank_defined: choices = blank_choice + choices return choices rel_model = self.remote_field.model limit_choices_to = limit_choices_to or self.get_limit_choices_to() choice_func = operator.attrgetter( self.remote_field.get_related_field().attname if hasattr(self.remote_field, 'get_related_field') else 'pk' ) qs = rel_model._default_manager.complex_filter(limit_choices_to) if ordering: qs = qs.order_by(*ordering) return (blank_choice if include_blank else []) + [ (choice_func(x), str(x)) for x in qs ] def value_to_string(self, obj): """ Return a string value of this field from the passed obj. This is used by the serialization framework. """ return str(self.value_from_object(obj)) def _get_flatchoices(self): """Flattened version of choices tuple.""" if self.choices is None: return [] flat = [] for choice, value in self.choices: if isinstance(value, (list, tuple)): flat.extend(value) else: flat.append((choice, value)) return flat flatchoices = property(_get_flatchoices) def save_form_data(self, instance, data): setattr(instance, self.name, data) def formfield(self, form_class=None, choices_form_class=None, **kwargs): """Return a django.forms.Field instance for this field.""" defaults = { 'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text, } if self.has_default(): if callable(self.default): defaults['initial'] = self.default defaults['show_hidden_initial'] = True else: defaults['initial'] = self.get_default() if self.choices is not None: # Fields with choices get special treatment. include_blank = (self.blank or not (self.has_default() or 'initial' in kwargs)) defaults['choices'] = self.get_choices(include_blank=include_blank) defaults['coerce'] = self.to_python if self.null: defaults['empty_value'] = None if choices_form_class is not None: form_class = choices_form_class else: form_class = forms.TypedChoiceField # Many of the subclass-specific formfield arguments (min_value, # max_value) don't apply for choice fields, so be sure to only pass # the values that TypedChoiceField will understand. for k in list(kwargs): if k not in ('coerce', 'empty_value', 'choices', 'required', 'widget', 'label', 'initial', 'help_text', 'error_messages', 'show_hidden_initial', 'disabled'): del kwargs[k] defaults.update(kwargs) if form_class is None: form_class = forms.CharField return form_class(**defaults) def value_from_object(self, obj): """Return the value of this field in the given model instance.""" return getattr(obj, self.attname) class BooleanField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value must be either True or False.'), 'invalid_nullable': _('“%(value)s” value must be either True, False, or None.'), } description = _("Boolean (Either True or False)") def get_internal_type(self): return "BooleanField" def to_python(self, value): if self.null and value in self.empty_values: return None if value in (True, False): # 1/0 are equal to True/False. bool() converts former to latter. return bool(value) if value in ('t', 'True', '1'): return True if value in ('f', 'False', '0'): return False raise exceptions.ValidationError( self.error_messages['invalid_nullable' if self.null else 'invalid'], code='invalid', params={'value': value}, ) def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return self.to_python(value) def formfield(self, **kwargs): if self.choices is not None: include_blank = not (self.has_default() or 'initial' in kwargs) defaults = {'choices': self.get_choices(include_blank=include_blank)} else: form_class = forms.NullBooleanField if self.null else forms.BooleanField # In HTML checkboxes, 'required' means "must be checked" which is # different from the choices case ("must select some value"). # required=False allows unchecked checkboxes. defaults = {'form_class': form_class, 'required': False} return super().formfield(**{**defaults, **kwargs}) class CharField(Field): description = _("String (up to %(max_length)s)") def __init__(self, *args, db_collation=None, **kwargs): super().__init__(*args, **kwargs) self.db_collation = db_collation self.validators.append(validators.MaxLengthValidator(self.max_length)) def check(self, **kwargs): databases = kwargs.get('databases') or [] return [ *super().check(**kwargs), *self._check_db_collation(databases), *self._check_max_length_attribute(**kwargs), ] def _check_max_length_attribute(self, **kwargs): if self.max_length is None: return [ checks.Error( "CharFields must define a 'max_length' attribute.", obj=self, id='fields.E120', ) ] elif (not isinstance(self.max_length, int) or isinstance(self.max_length, bool) or self.max_length <= 0): return [ checks.Error( "'max_length' must be a positive integer.", obj=self, id='fields.E121', ) ] else: return [] def _check_db_collation(self, databases): errors = [] for db in databases: if not router.allow_migrate_model(db, self.model): continue connection = connections[db] if not ( self.db_collation is None or 'supports_collation_on_charfield' in self.model._meta.required_db_features or connection.features.supports_collation_on_charfield ): errors.append( checks.Error( '%s does not support a database collation on ' 'CharFields.' % connection.display_name, obj=self, id='fields.E190', ), ) return errors def cast_db_type(self, connection): if self.max_length is None: return connection.ops.cast_char_field_without_max_length return super().cast_db_type(connection) def get_internal_type(self): return "CharField" def to_python(self, value): if isinstance(value, str) or value is None: return value return str(value) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): # Passing max_length to forms.CharField means that the value's length # will be validated twice. This is considered acceptable since we want # the value in the form field (to pass into widget for example). defaults = {'max_length': self.max_length} # TODO: Handle multiple backends with different feature flags. if self.null and not connection.features.interprets_empty_strings_as_nulls: defaults['empty_value'] = None defaults.update(kwargs) return super().formfield(**defaults) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.db_collation: kwargs['db_collation'] = self.db_collation return name, path, args, kwargs class CommaSeparatedIntegerField(CharField): default_validators = [validators.validate_comma_separated_integer_list] description = _("Comma-separated integers") system_check_removed_details = { 'msg': ( 'CommaSeparatedIntegerField is removed except for support in ' 'historical migrations.' ), 'hint': ( 'Use CharField(validators=[validate_comma_separated_integer_list]) ' 'instead.' ), 'id': 'fields.E901', } def _to_naive(value): if timezone.is_aware(value): value = timezone.make_naive(value, timezone.utc) return value def _get_naive_now(): return _to_naive(timezone.now()) class DateTimeCheckMixin: def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_mutually_exclusive_options(), *self._check_fix_default_value(), ] def _check_mutually_exclusive_options(self): # auto_now, auto_now_add, and default are mutually exclusive # options. The use of more than one of these options together # will trigger an Error mutually_exclusive_options = [self.auto_now_add, self.auto_now, self.has_default()] enabled_options = [option not in (None, False) for option in mutually_exclusive_options].count(True) if enabled_options > 1: return [ checks.Error( "The options auto_now, auto_now_add, and default " "are mutually exclusive. Only one of these options " "may be present.", obj=self, id='fields.E160', ) ] else: return [] def _check_fix_default_value(self): return [] # Concrete subclasses use this in their implementations of # _check_fix_default_value(). def _check_if_value_fixed(self, value, now=None): """ Check if the given value appears to have been provided as a "fixed" time value, and include a warning in the returned list if it does. The value argument must be a date object or aware/naive datetime object. If now is provided, it must be a naive datetime object. """ if now is None: now = _get_naive_now() offset = datetime.timedelta(seconds=10) lower = now - offset upper = now + offset if isinstance(value, datetime.datetime): value = _to_naive(value) else: assert isinstance(value, datetime.date) lower = lower.date() upper = upper.date() if lower <= value <= upper: return [ checks.Warning( 'Fixed default value provided.', hint=( 'It seems you set a fixed date / time / datetime ' 'value as default for this field. This may not be ' 'what you want. If you want to have the current date ' 'as default, use `django.utils.timezone.now`' ), obj=self, id='fields.W161', ) ] return [] class DateField(DateTimeCheckMixin, Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value has an invalid date format. It must be ' 'in YYYY-MM-DD format.'), 'invalid_date': _('“%(value)s” value has the correct format (YYYY-MM-DD) ' 'but it is an invalid date.'), } description = _("Date (without time)") def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs): self.auto_now, self.auto_now_add = auto_now, auto_now_add if auto_now or auto_now_add: kwargs['editable'] = False kwargs['blank'] = True super().__init__(verbose_name, name, **kwargs) def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] value = self.default if isinstance(value, datetime.datetime): value = _to_naive(value).date() elif isinstance(value, datetime.date): pass else: # No explicit date / datetime value -- no checks necessary return [] # At this point, value is a date object. return self._check_if_value_fixed(value) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.auto_now: kwargs['auto_now'] = True if self.auto_now_add: kwargs['auto_now_add'] = True if self.auto_now or self.auto_now_add: del kwargs['editable'] del kwargs['blank'] return name, path, args, kwargs def get_internal_type(self): return "DateField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.datetime): if settings.USE_TZ and timezone.is_aware(value): # Convert aware datetimes to the default time zone # before casting them to dates (#17742). default_timezone = timezone.get_default_timezone() value = timezone.make_naive(value, default_timezone) return value.date() if isinstance(value, datetime.date): return value try: parsed = parse_date(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_date'], code='invalid_date', params={'value': value}, ) raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = datetime.date.today() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) if not self.null: setattr( cls, 'get_next_by_%s' % self.name, partialmethod(cls._get_next_or_previous_by_FIELD, field=self, is_next=True) ) setattr( cls, 'get_previous_by_%s' % self.name, partialmethod(cls._get_next_or_previous_by_FIELD, field=self, is_next=False) ) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): # Casts dates into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_datefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return '' if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.DateField, **kwargs, }) class DateTimeField(DateField): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value has an invalid format. It must be in ' 'YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format.'), 'invalid_date': _("“%(value)s” value has the correct format " "(YYYY-MM-DD) but it is an invalid date."), 'invalid_datetime': _('“%(value)s” value has the correct format ' '(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) ' 'but it is an invalid date/time.'), } description = _("Date (with time)") # __init__ is inherited from DateField def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] value = self.default if isinstance(value, (datetime.datetime, datetime.date)): return self._check_if_value_fixed(value) # No explicit date / datetime value -- no checks necessary. return [] def get_internal_type(self): return "DateTimeField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.datetime): return value if isinstance(value, datetime.date): value = datetime.datetime(value.year, value.month, value.day) if settings.USE_TZ: # For backwards compatibility, interpret naive datetimes in # local time. This won't work during DST change, but we can't # do much about it, so we let the exceptions percolate up the # call stack. warnings.warn("DateTimeField %s.%s received a naive datetime " "(%s) while time zone support is active." % (self.model.__name__, self.name, value), RuntimeWarning) default_timezone = timezone.get_default_timezone() value = timezone.make_aware(value, default_timezone) return value try: parsed = parse_datetime(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_datetime'], code='invalid_datetime', params={'value': value}, ) try: parsed = parse_date(value) if parsed is not None: return datetime.datetime(parsed.year, parsed.month, parsed.day) except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_date'], code='invalid_date', params={'value': value}, ) raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = timezone.now() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) # contribute_to_class is inherited from DateField, it registers # get_next_by_FOO and get_prev_by_FOO def get_prep_value(self, value): value = super().get_prep_value(value) value = self.to_python(value) if value is not None and settings.USE_TZ and timezone.is_naive(value): # For backwards compatibility, interpret naive datetimes in local # time. This won't work during DST change, but we can't do much # about it, so we let the exceptions percolate up the call stack. try: name = '%s.%s' % (self.model.__name__, self.name) except AttributeError: name = '(unbound)' warnings.warn("DateTimeField %s received a naive datetime (%s)" " while time zone support is active." % (name, value), RuntimeWarning) default_timezone = timezone.get_default_timezone() value = timezone.make_aware(value, default_timezone) return value def get_db_prep_value(self, value, connection, prepared=False): # Casts datetimes into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_datetimefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return '' if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.DateTimeField, **kwargs, }) class DecimalField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value must be a decimal number.'), } description = _("Decimal number") def __init__(self, verbose_name=None, name=None, max_digits=None, decimal_places=None, **kwargs): self.max_digits, self.decimal_places = max_digits, decimal_places super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): errors = super().check(**kwargs) digits_errors = [ *self._check_decimal_places(), *self._check_max_digits(), ] if not digits_errors: errors.extend(self._check_decimal_places_and_max_digits(**kwargs)) else: errors.extend(digits_errors) return errors def _check_decimal_places(self): try: decimal_places = int(self.decimal_places) if decimal_places < 0: raise ValueError() except TypeError: return [ checks.Error( "DecimalFields must define a 'decimal_places' attribute.", obj=self, id='fields.E130', ) ] except ValueError: return [ checks.Error( "'decimal_places' must be a non-negative integer.", obj=self, id='fields.E131', ) ] else: return [] def _check_max_digits(self): try: max_digits = int(self.max_digits) if max_digits <= 0: raise ValueError() except TypeError: return [ checks.Error( "DecimalFields must define a 'max_digits' attribute.", obj=self, id='fields.E132', ) ] except ValueError: return [ checks.Error( "'max_digits' must be a positive integer.", obj=self, id='fields.E133', ) ] else: return [] def _check_decimal_places_and_max_digits(self, **kwargs): if int(self.decimal_places) > int(self.max_digits): return [ checks.Error( "'max_digits' must be greater or equal to 'decimal_places'.", obj=self, id='fields.E134', ) ] return [] @cached_property def validators(self): return super().validators + [ validators.DecimalValidator(self.max_digits, self.decimal_places) ] @cached_property def context(self): return decimal.Context(prec=self.max_digits) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.max_digits is not None: kwargs['max_digits'] = self.max_digits if self.decimal_places is not None: kwargs['decimal_places'] = self.decimal_places return name, path, args, kwargs def get_internal_type(self): return "DecimalField" def to_python(self, value): if value is None: return value if isinstance(value, float): return self.context.create_decimal_from_float(value) try: return decimal.Decimal(value) except (decimal.InvalidOperation, TypeError, ValueError): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def get_db_prep_save(self, value, connection): return connection.ops.adapt_decimalfield_value(self.to_python(value), self.max_digits, self.decimal_places) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): return super().formfield(**{ 'max_digits': self.max_digits, 'decimal_places': self.decimal_places, 'form_class': forms.DecimalField, **kwargs, }) class DurationField(Field): """ Store timedelta objects. Use interval on PostgreSQL, INTERVAL DAY TO SECOND on Oracle, and bigint of microseconds on other databases. """ empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value has an invalid format. It must be in ' '[DD] [[HH:]MM:]ss[.uuuuuu] format.') } description = _("Duration") def get_internal_type(self): return "DurationField" def to_python(self, value): if value is None: return value if isinstance(value, datetime.timedelta): return value try: parsed = parse_duration(value) except ValueError: pass else: if parsed is not None: return parsed raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def get_db_prep_value(self, value, connection, prepared=False): if connection.features.has_native_duration_field: return value if value is None: return None return duration_microseconds(value) def get_db_converters(self, connection): converters = [] if not connection.features.has_native_duration_field: converters.append(connection.ops.convert_durationfield_value) return converters + super().get_db_converters(connection) def value_to_string(self, obj): val = self.value_from_object(obj) return '' if val is None else duration_string(val) def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.DurationField, **kwargs, }) class EmailField(CharField): default_validators = [validators.validate_email] description = _("Email address") def __init__(self, *args, **kwargs): # max_length=254 to be compliant with RFCs 3696 and 5321 kwargs.setdefault('max_length', 254) super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() # We do not exclude max_length if it matches default as we want to change # the default in future. return name, path, args, kwargs def formfield(self, **kwargs): # As with CharField, this will cause email validation to be performed # twice. return super().formfield(**{ 'form_class': forms.EmailField, **kwargs, }) class FilePathField(Field): description = _("File path") def __init__(self, verbose_name=None, name=None, path='', match=None, recursive=False, allow_files=True, allow_folders=False, **kwargs): self.path, self.match, self.recursive = path, match, recursive self.allow_files, self.allow_folders = allow_files, allow_folders kwargs.setdefault('max_length', 100) super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_allowing_files_or_folders(**kwargs), ] def _check_allowing_files_or_folders(self, **kwargs): if not self.allow_files and not self.allow_folders: return [ checks.Error( "FilePathFields must have either 'allow_files' or 'allow_folders' set to True.", obj=self, id='fields.E140', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.path != '': kwargs['path'] = self.path if self.match is not None: kwargs['match'] = self.match if self.recursive is not False: kwargs['recursive'] = self.recursive if self.allow_files is not True: kwargs['allow_files'] = self.allow_files if self.allow_folders is not False: kwargs['allow_folders'] = self.allow_folders if kwargs.get("max_length") == 100: del kwargs["max_length"] return name, path, args, kwargs def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return str(value) def formfield(self, **kwargs): return super().formfield(**{ 'path': self.path() if callable(self.path) else self.path, 'match': self.match, 'recursive': self.recursive, 'form_class': forms.FilePathField, 'allow_files': self.allow_files, 'allow_folders': self.allow_folders, **kwargs, }) def get_internal_type(self): return "FilePathField" class FloatField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value must be a float.'), } description = _("Floating point number") def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None try: return float(value) except (TypeError, ValueError) as e: raise e.__class__( "Field '%s' expected a number but got %r." % (self.name, value), ) from e def get_internal_type(self): return "FloatField" def to_python(self, value): if value is None: return value try: return float(value) except (TypeError, ValueError): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.FloatField, **kwargs, }) class IntegerField(Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value must be an integer.'), } description = _("Integer") def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_max_length_warning(), ] def _check_max_length_warning(self): if self.max_length is not None: return [ checks.Warning( "'max_length' is ignored when used with %s." % self.__class__.__name__, hint="Remove 'max_length' from field", obj=self, id='fields.W122', ) ] return [] @cached_property def validators(self): # These validators can't be added at field initialization time since # they're based on values retrieved from `connection`. validators_ = super().validators internal_type = self.get_internal_type() min_value, max_value = connection.ops.integer_field_range(internal_type) if min_value is not None and not any( ( isinstance(validator, validators.MinValueValidator) and ( validator.limit_value() if callable(validator.limit_value) else validator.limit_value ) >= min_value ) for validator in validators_ ): validators_.append(validators.MinValueValidator(min_value)) if max_value is not None and not any( ( isinstance(validator, validators.MaxValueValidator) and ( validator.limit_value() if callable(validator.limit_value) else validator.limit_value ) <= max_value ) for validator in validators_ ): validators_.append(validators.MaxValueValidator(max_value)) return validators_ def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None try: return int(value) except (TypeError, ValueError) as e: raise e.__class__( "Field '%s' expected a number but got %r." % (self.name, value), ) from e def get_internal_type(self): return "IntegerField" def to_python(self, value): if value is None: return value try: return int(value) except (TypeError, ValueError): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.IntegerField, **kwargs, }) class BigIntegerField(IntegerField): description = _("Big (8 byte) integer") MAX_BIGINT = 9223372036854775807 def get_internal_type(self): return "BigIntegerField" def formfield(self, **kwargs): return super().formfield(**{ 'min_value': -BigIntegerField.MAX_BIGINT - 1, 'max_value': BigIntegerField.MAX_BIGINT, **kwargs, }) class SmallIntegerField(IntegerField): description = _('Small integer') def get_internal_type(self): return 'SmallIntegerField' class IPAddressField(Field): empty_strings_allowed = False description = _("IPv4 address") system_check_removed_details = { 'msg': ( 'IPAddressField has been removed except for support in ' 'historical migrations.' ), 'hint': 'Use GenericIPAddressField instead.', 'id': 'fields.E900', } def __init__(self, *args, **kwargs): kwargs['max_length'] = 15 super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs['max_length'] return name, path, args, kwargs def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None return str(value) def get_internal_type(self): return "IPAddressField" class GenericIPAddressField(Field): empty_strings_allowed = False description = _("IP address") default_error_messages = {} def __init__(self, verbose_name=None, name=None, protocol='both', unpack_ipv4=False, *args, **kwargs): self.unpack_ipv4 = unpack_ipv4 self.protocol = protocol self.default_validators, invalid_error_message = \ validators.ip_address_validators(protocol, unpack_ipv4) self.default_error_messages['invalid'] = invalid_error_message kwargs['max_length'] = 39 super().__init__(verbose_name, name, *args, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_blank_and_null_values(**kwargs), ] def _check_blank_and_null_values(self, **kwargs): if not getattr(self, 'null', False) and getattr(self, 'blank', False): return [ checks.Error( 'GenericIPAddressFields cannot have blank=True if null=False, ' 'as blank values are stored as nulls.', obj=self, id='fields.E150', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.unpack_ipv4 is not False: kwargs['unpack_ipv4'] = self.unpack_ipv4 if self.protocol != "both": kwargs['protocol'] = self.protocol if kwargs.get("max_length") == 39: del kwargs['max_length'] return name, path, args, kwargs def get_internal_type(self): return "GenericIPAddressField" def to_python(self, value): if value is None: return None if not isinstance(value, str): value = str(value) value = value.strip() if ':' in value: return clean_ipv6_address(value, self.unpack_ipv4, self.error_messages['invalid']) return value def get_db_prep_value(self, value, connection, prepared=False): if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_ipaddressfield_value(value) def get_prep_value(self, value): value = super().get_prep_value(value) if value is None: return None if value and ':' in value: try: return clean_ipv6_address(value, self.unpack_ipv4) except exceptions.ValidationError: pass return str(value) def formfield(self, **kwargs): return super().formfield(**{ 'protocol': self.protocol, 'form_class': forms.GenericIPAddressField, **kwargs, }) class NullBooleanField(BooleanField): default_error_messages = { 'invalid': _('“%(value)s” value must be either None, True or False.'), 'invalid_nullable': _('“%(value)s” value must be either None, True or False.'), } description = _("Boolean (Either True, False or None)") system_check_removed_details = { 'msg': ( 'NullBooleanField is removed except for support in historical ' 'migrations.' ), 'hint': 'Use BooleanField(null=True) instead.', 'id': 'fields.E903', } def __init__(self, *args, **kwargs): kwargs['null'] = True kwargs['blank'] = True super().__init__(*args, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs['null'] del kwargs['blank'] return name, path, args, kwargs def get_internal_type(self): return "NullBooleanField" class PositiveIntegerRelDbTypeMixin: def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) if not hasattr(cls, 'integer_field_class'): cls.integer_field_class = next( ( parent for parent in cls.__mro__[1:] if issubclass(parent, IntegerField) ), None, ) def rel_db_type(self, connection): """ Return the data type that a related field pointing to this field should use. In most cases, a foreign key pointing to a positive integer primary key will have an integer column data type but some databases (e.g. MySQL) have an unsigned integer type. In that case (related_fields_match_type=True), the primary key should return its db_type. """ if connection.features.related_fields_match_type: return self.db_type(connection) else: return self.integer_field_class().db_type(connection=connection) class PositiveBigIntegerField(PositiveIntegerRelDbTypeMixin, BigIntegerField): description = _('Positive big integer') def get_internal_type(self): return 'PositiveBigIntegerField' def formfield(self, **kwargs): return super().formfield(**{ 'min_value': 0, **kwargs, }) class PositiveIntegerField(PositiveIntegerRelDbTypeMixin, IntegerField): description = _("Positive integer") def get_internal_type(self): return "PositiveIntegerField" def formfield(self, **kwargs): return super().formfield(**{ 'min_value': 0, **kwargs, }) class PositiveSmallIntegerField(PositiveIntegerRelDbTypeMixin, SmallIntegerField): description = _("Positive small integer") def get_internal_type(self): return "PositiveSmallIntegerField" def formfield(self, **kwargs): return super().formfield(**{ 'min_value': 0, **kwargs, }) class SlugField(CharField): default_validators = [validators.validate_slug] description = _("Slug (up to %(max_length)s)") def __init__(self, *args, max_length=50, db_index=True, allow_unicode=False, **kwargs): self.allow_unicode = allow_unicode if self.allow_unicode: self.default_validators = [validators.validate_unicode_slug] super().__init__(*args, max_length=max_length, db_index=db_index, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 50: del kwargs['max_length'] if self.db_index is False: kwargs['db_index'] = False else: del kwargs['db_index'] if self.allow_unicode is not False: kwargs['allow_unicode'] = self.allow_unicode return name, path, args, kwargs def get_internal_type(self): return "SlugField" def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.SlugField, 'allow_unicode': self.allow_unicode, **kwargs, }) class TextField(Field): description = _("Text") def __init__(self, *args, db_collation=None, **kwargs): super().__init__(*args, **kwargs) self.db_collation = db_collation def check(self, **kwargs): databases = kwargs.get('databases') or [] return [ *super().check(**kwargs), *self._check_db_collation(databases), ] def _check_db_collation(self, databases): errors = [] for db in databases: if not router.allow_migrate_model(db, self.model): continue connection = connections[db] if not ( self.db_collation is None or 'supports_collation_on_textfield' in self.model._meta.required_db_features or connection.features.supports_collation_on_textfield ): errors.append( checks.Error( '%s does not support a database collation on ' 'TextFields.' % connection.display_name, obj=self, id='fields.E190', ), ) return errors def get_internal_type(self): return "TextField" def to_python(self, value): if isinstance(value, str) or value is None: return value return str(value) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def formfield(self, **kwargs): # Passing max_length to forms.CharField means that the value's length # will be validated twice. This is considered acceptable since we want # the value in the form field (to pass into widget for example). return super().formfield(**{ 'max_length': self.max_length, **({} if self.choices is not None else {'widget': forms.Textarea}), **kwargs, }) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.db_collation: kwargs['db_collation'] = self.db_collation return name, path, args, kwargs class TimeField(DateTimeCheckMixin, Field): empty_strings_allowed = False default_error_messages = { 'invalid': _('“%(value)s” value has an invalid format. It must be in ' 'HH:MM[:ss[.uuuuuu]] format.'), 'invalid_time': _('“%(value)s” value has the correct format ' '(HH:MM[:ss[.uuuuuu]]) but it is an invalid time.'), } description = _("Time") def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs): self.auto_now, self.auto_now_add = auto_now, auto_now_add if auto_now or auto_now_add: kwargs['editable'] = False kwargs['blank'] = True super().__init__(verbose_name, name, **kwargs) def _check_fix_default_value(self): """ Warn that using an actual date or datetime value is probably wrong; it's only evaluated on server startup. """ if not self.has_default(): return [] value = self.default if isinstance(value, datetime.datetime): now = None elif isinstance(value, datetime.time): now = _get_naive_now() # This will not use the right date in the race condition where now # is just before the date change and value is just past 0:00. value = datetime.datetime.combine(now.date(), value) else: # No explicit time / datetime value -- no checks necessary return [] # At this point, value is a datetime object. return self._check_if_value_fixed(value, now=now) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.auto_now is not False: kwargs["auto_now"] = self.auto_now if self.auto_now_add is not False: kwargs["auto_now_add"] = self.auto_now_add if self.auto_now or self.auto_now_add: del kwargs['blank'] del kwargs['editable'] return name, path, args, kwargs def get_internal_type(self): return "TimeField" def to_python(self, value): if value is None: return None if isinstance(value, datetime.time): return value if isinstance(value, datetime.datetime): # Not usually a good idea to pass in a datetime here (it loses # information), but this can be a side-effect of interacting with a # database backend (e.g. Oracle), so we'll be accommodating. return value.time() try: parsed = parse_time(value) if parsed is not None: return parsed except ValueError: raise exceptions.ValidationError( self.error_messages['invalid_time'], code='invalid_time', params={'value': value}, ) raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) def pre_save(self, model_instance, add): if self.auto_now or (self.auto_now_add and add): value = datetime.datetime.now().time() setattr(model_instance, self.attname, value) return value else: return super().pre_save(model_instance, add) def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): # Casts times into the format expected by the backend if not prepared: value = self.get_prep_value(value) return connection.ops.adapt_timefield_value(value) def value_to_string(self, obj): val = self.value_from_object(obj) return '' if val is None else val.isoformat() def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.TimeField, **kwargs, }) class URLField(CharField): default_validators = [validators.URLValidator()] description = _("URL") def __init__(self, verbose_name=None, name=None, **kwargs): kwargs.setdefault('max_length', 200) super().__init__(verbose_name, name, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 200: del kwargs['max_length'] return name, path, args, kwargs def formfield(self, **kwargs): # As with CharField, this will cause URL validation to be performed # twice. return super().formfield(**{ 'form_class': forms.URLField, **kwargs, }) class BinaryField(Field): description = _("Raw binary data") empty_values = [None, b''] def __init__(self, *args, **kwargs): kwargs.setdefault('editable', False) super().__init__(*args, **kwargs) if self.max_length is not None: self.validators.append(validators.MaxLengthValidator(self.max_length)) def check(self, **kwargs): return [*super().check(**kwargs), *self._check_str_default_value()] def _check_str_default_value(self): if self.has_default() and isinstance(self.default, str): return [ checks.Error( "BinaryField's default cannot be a string. Use bytes " "content instead.", obj=self, id='fields.E170', ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.editable: kwargs['editable'] = True else: del kwargs['editable'] return name, path, args, kwargs def get_internal_type(self): return "BinaryField" def get_placeholder(self, value, compiler, connection): return connection.ops.binary_placeholder_sql(value) def get_default(self): if self.has_default() and not callable(self.default): return self.default default = super().get_default() if default == '': return b'' return default def get_db_prep_value(self, value, connection, prepared=False): value = super().get_db_prep_value(value, connection, prepared) if value is not None: return connection.Database.Binary(value) return value def value_to_string(self, obj): """Binary data is serialized as base64""" return b64encode(self.value_from_object(obj)).decode('ascii') def to_python(self, value): # If it's a string, it should be base64-encoded data if isinstance(value, str): return memoryview(b64decode(value.encode('ascii'))) return value class UUIDField(Field): default_error_messages = { 'invalid': _('“%(value)s” is not a valid UUID.'), } description = _('Universally unique identifier') empty_strings_allowed = False def __init__(self, verbose_name=None, **kwargs): kwargs['max_length'] = 32 super().__init__(verbose_name, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs['max_length'] return name, path, args, kwargs def get_internal_type(self): return "UUIDField" def get_prep_value(self, value): value = super().get_prep_value(value) return self.to_python(value) def get_db_prep_value(self, value, connection, prepared=False): if value is None: return None if not isinstance(value, uuid.UUID): value = self.to_python(value) if connection.features.has_native_uuid_field: return value return value.hex def to_python(self, value): if value is not None and not isinstance(value, uuid.UUID): input_form = 'int' if isinstance(value, int) else 'hex' try: return uuid.UUID(**{input_form: value}) except (AttributeError, ValueError): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={'value': value}, ) return value def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.UUIDField, **kwargs, }) class AutoFieldMixin: db_returning = True def __init__(self, *args, **kwargs): kwargs['blank'] = True super().__init__(*args, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_primary_key(), ] def _check_primary_key(self): if not self.primary_key: return [ checks.Error( 'AutoFields must set primary_key=True.', obj=self, id='fields.E100', ), ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs['blank'] kwargs['primary_key'] = True return name, path, args, kwargs def validate(self, value, model_instance): pass def get_db_prep_value(self, value, connection, prepared=False): if not prepared: value = self.get_prep_value(value) value = connection.ops.validate_autopk_value(value) return value def contribute_to_class(self, cls, name, **kwargs): if cls._meta.auto_field: raise ValueError( "Model %s can't have more than one auto-generated field." % cls._meta.label ) super().contribute_to_class(cls, name, **kwargs) cls._meta.auto_field = self def formfield(self, **kwargs): return None class AutoFieldMeta(type): """ Metaclass to maintain backward inheritance compatibility for AutoField. It is intended that AutoFieldMixin become public API when it is possible to create a non-integer automatically-generated field using column defaults stored in the database. In many areas Django also relies on using isinstance() to check for an automatically-generated field as a subclass of AutoField. A new flag needs to be implemented on Field to be used instead. When these issues have been addressed, this metaclass could be used to deprecate inheritance from AutoField and use of isinstance() with AutoField for detecting automatically-generated fields. """ @property def _subclasses(self): return (BigAutoField, SmallAutoField) def __instancecheck__(self, instance): return isinstance(instance, self._subclasses) or super().__instancecheck__(instance) def __subclasscheck__(self, subclass): return issubclass(subclass, self._subclasses) or super().__subclasscheck__(subclass) class AutoField(AutoFieldMixin, IntegerField, metaclass=AutoFieldMeta): def get_internal_type(self): return 'AutoField' def rel_db_type(self, connection): return IntegerField().db_type(connection=connection) class BigAutoField(AutoFieldMixin, BigIntegerField): def get_internal_type(self): return 'BigAutoField' def rel_db_type(self, connection): return BigIntegerField().db_type(connection=connection) class SmallAutoField(AutoFieldMixin, SmallIntegerField): def get_internal_type(self): return 'SmallAutoField' def rel_db_type(self, connection): return SmallIntegerField().db_type(connection=connection)
2388508b0698b74c3bae77a28d383806765e3ff2f4ef657f5bab7254753a21fa
import functools import inspect from functools import partial from django import forms from django.apps import apps from django.conf import SettingsReference, settings from django.core import checks, exceptions from django.db import connection, router from django.db.backends import utils from django.db.models import Q from django.db.models.constants import LOOKUP_SEP from django.db.models.deletion import CASCADE, SET_DEFAULT, SET_NULL from django.db.models.query_utils import PathInfo from django.db.models.utils import make_model_tuple from django.utils.functional import cached_property from django.utils.translation import gettext_lazy as _ from . import Field from .mixins import FieldCacheMixin from .related_descriptors import ( ForeignKeyDeferredAttribute, ForwardManyToOneDescriptor, ForwardOneToOneDescriptor, ManyToManyDescriptor, ReverseManyToOneDescriptor, ReverseOneToOneDescriptor, ) from .related_lookups import ( RelatedExact, RelatedGreaterThan, RelatedGreaterThanOrEqual, RelatedIn, RelatedIsNull, RelatedLessThan, RelatedLessThanOrEqual, ) from .reverse_related import ( ForeignObjectRel, ManyToManyRel, ManyToOneRel, OneToOneRel, ) RECURSIVE_RELATIONSHIP_CONSTANT = 'self' def resolve_relation(scope_model, relation): """ Transform relation into a model or fully-qualified model string of the form "app_label.ModelName", relative to scope_model. The relation argument can be: * RECURSIVE_RELATIONSHIP_CONSTANT, i.e. the string "self", in which case the model argument will be returned. * A bare model name without an app_label, in which case scope_model's app_label will be prepended. * An "app_label.ModelName" string. * A model class, which will be returned unchanged. """ # Check for recursive relations if relation == RECURSIVE_RELATIONSHIP_CONSTANT: relation = scope_model # Look for an "app.Model" relation if isinstance(relation, str): if "." not in relation: relation = "%s.%s" % (scope_model._meta.app_label, relation) return relation def lazy_related_operation(function, model, *related_models, **kwargs): """ Schedule `function` to be called once `model` and all `related_models` have been imported and registered with the app registry. `function` will be called with the newly-loaded model classes as its positional arguments, plus any optional keyword arguments. The `model` argument must be a model class. Each subsequent positional argument is another model, or a reference to another model - see `resolve_relation()` for the various forms these may take. Any relative references will be resolved relative to `model`. This is a convenience wrapper for `Apps.lazy_model_operation` - the app registry model used is the one found in `model._meta.apps`. """ models = [model] + [resolve_relation(model, rel) for rel in related_models] model_keys = (make_model_tuple(m) for m in models) apps = model._meta.apps return apps.lazy_model_operation(partial(function, **kwargs), *model_keys) class RelatedField(FieldCacheMixin, Field): """Base class that all relational fields inherit from.""" # Field flags one_to_many = False one_to_one = False many_to_many = False many_to_one = False def __init__( self, related_name=None, related_query_name=None, limit_choices_to=None, **kwargs, ): self._related_name = related_name self._related_query_name = related_query_name self._limit_choices_to = limit_choices_to super().__init__(**kwargs) @cached_property def related_model(self): # Can't cache this property until all the models are loaded. apps.check_models_ready() return self.remote_field.model def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_related_name_is_valid(), *self._check_related_query_name_is_valid(), *self._check_relation_model_exists(), *self._check_referencing_to_swapped_model(), *self._check_clashes(), ] def _check_related_name_is_valid(self): import keyword related_name = self.remote_field.related_name if related_name is None: return [] is_valid_id = not keyword.iskeyword(related_name) and related_name.isidentifier() if not (is_valid_id or related_name.endswith('+')): return [ checks.Error( "The name '%s' is invalid related_name for field %s.%s" % (self.remote_field.related_name, self.model._meta.object_name, self.name), hint="Related name must be a valid Python identifier or end with a '+'", obj=self, id='fields.E306', ) ] return [] def _check_related_query_name_is_valid(self): if self.remote_field.is_hidden(): return [] rel_query_name = self.related_query_name() errors = [] if rel_query_name.endswith('_'): errors.append( checks.Error( "Reverse query name '%s' must not end with an underscore." % rel_query_name, hint=("Add or change a related_name or related_query_name " "argument for this field."), obj=self, id='fields.E308', ) ) if LOOKUP_SEP in rel_query_name: errors.append( checks.Error( "Reverse query name '%s' must not contain '%s'." % (rel_query_name, LOOKUP_SEP), hint=("Add or change a related_name or related_query_name " "argument for this field."), obj=self, id='fields.E309', ) ) return errors def _check_relation_model_exists(self): rel_is_missing = self.remote_field.model not in self.opts.apps.get_models() rel_is_string = isinstance(self.remote_field.model, str) model_name = self.remote_field.model if rel_is_string else self.remote_field.model._meta.object_name if rel_is_missing and (rel_is_string or not self.remote_field.model._meta.swapped): return [ checks.Error( "Field defines a relation with model '%s', which is either " "not installed, or is abstract." % model_name, obj=self, id='fields.E300', ) ] return [] def _check_referencing_to_swapped_model(self): if (self.remote_field.model not in self.opts.apps.get_models() and not isinstance(self.remote_field.model, str) and self.remote_field.model._meta.swapped): return [ checks.Error( "Field defines a relation with the model '%s', which has " "been swapped out." % self.remote_field.model._meta.label, hint="Update the relation to point at 'settings.%s'." % self.remote_field.model._meta.swappable, obj=self, id='fields.E301', ) ] return [] def _check_clashes(self): """Check accessor and reverse query name clashes.""" from django.db.models.base import ModelBase errors = [] opts = self.model._meta # `f.remote_field.model` may be a string instead of a model. Skip if model name is # not resolved. if not isinstance(self.remote_field.model, ModelBase): return [] # Consider that we are checking field `Model.foreign` and the models # are: # # class Target(models.Model): # model = models.IntegerField() # model_set = models.IntegerField() # # class Model(models.Model): # foreign = models.ForeignKey(Target) # m2m = models.ManyToManyField(Target) # rel_opts.object_name == "Target" rel_opts = self.remote_field.model._meta # If the field doesn't install a backward relation on the target model # (so `is_hidden` returns True), then there are no clashes to check # and we can skip these fields. rel_is_hidden = self.remote_field.is_hidden() rel_name = self.remote_field.get_accessor_name() # i. e. "model_set" rel_query_name = self.related_query_name() # i. e. "model" # i.e. "app_label.Model.field". field_name = '%s.%s' % (opts.label, self.name) # Check clashes between accessor or reverse query name of `field` # and any other field name -- i.e. accessor for Model.foreign is # model_set and it clashes with Target.model_set. potential_clashes = rel_opts.fields + rel_opts.many_to_many for clash_field in potential_clashes: # i.e. "app_label.Target.model_set". clash_name = '%s.%s' % (rel_opts.label, clash_field.name) if not rel_is_hidden and clash_field.name == rel_name: errors.append( checks.Error( "Reverse accessor for '%s' clashes with field name '%s'." % (field_name, clash_name), hint=("Rename field '%s', or add/change a related_name " "argument to the definition for field '%s'.") % (clash_name, field_name), obj=self, id='fields.E302', ) ) if clash_field.name == rel_query_name: errors.append( checks.Error( "Reverse query name for '%s' clashes with field name '%s'." % (field_name, clash_name), hint=("Rename field '%s', or add/change a related_name " "argument to the definition for field '%s'.") % (clash_name, field_name), obj=self, id='fields.E303', ) ) # Check clashes between accessors/reverse query names of `field` and # any other field accessor -- i. e. Model.foreign accessor clashes with # Model.m2m accessor. potential_clashes = (r for r in rel_opts.related_objects if r.field is not self) for clash_field in potential_clashes: # i.e. "app_label.Model.m2m". clash_name = '%s.%s' % ( clash_field.related_model._meta.label, clash_field.field.name, ) if not rel_is_hidden and clash_field.get_accessor_name() == rel_name: errors.append( checks.Error( "Reverse accessor for '%s' clashes with reverse accessor for '%s'." % (field_name, clash_name), hint=("Add or change a related_name argument " "to the definition for '%s' or '%s'.") % (field_name, clash_name), obj=self, id='fields.E304', ) ) if clash_field.get_accessor_name() == rel_query_name: errors.append( checks.Error( "Reverse query name for '%s' clashes with reverse query name for '%s'." % (field_name, clash_name), hint=("Add or change a related_name argument " "to the definition for '%s' or '%s'.") % (field_name, clash_name), obj=self, id='fields.E305', ) ) return errors def db_type(self, connection): # By default related field will not have a column as it relates to # columns from another table. return None def contribute_to_class(self, cls, name, private_only=False, **kwargs): super().contribute_to_class(cls, name, private_only=private_only, **kwargs) self.opts = cls._meta if not cls._meta.abstract: if self.remote_field.related_name: related_name = self.remote_field.related_name else: related_name = self.opts.default_related_name if related_name: related_name = related_name % { 'class': cls.__name__.lower(), 'model_name': cls._meta.model_name.lower(), 'app_label': cls._meta.app_label.lower() } self.remote_field.related_name = related_name if self.remote_field.related_query_name: related_query_name = self.remote_field.related_query_name % { 'class': cls.__name__.lower(), 'app_label': cls._meta.app_label.lower(), } self.remote_field.related_query_name = related_query_name def resolve_related_class(model, related, field): field.remote_field.model = related field.do_related_class(related, model) lazy_related_operation(resolve_related_class, cls, self.remote_field.model, field=self) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self._limit_choices_to: kwargs['limit_choices_to'] = self._limit_choices_to if self._related_name is not None: kwargs['related_name'] = self._related_name if self._related_query_name is not None: kwargs['related_query_name'] = self._related_query_name return name, path, args, kwargs def get_forward_related_filter(self, obj): """ Return the keyword arguments that when supplied to self.model.object.filter(), would select all instances related through this field to the remote obj. This is used to build the querysets returned by related descriptors. obj is an instance of self.related_field.model. """ return { '%s__%s' % (self.name, rh_field.name): getattr(obj, rh_field.attname) for _, rh_field in self.related_fields } def get_reverse_related_filter(self, obj): """ Complement to get_forward_related_filter(). Return the keyword arguments that when passed to self.related_field.model.object.filter() select all instances of self.related_field.model related through this field to obj. obj is an instance of self.model. """ base_filter = ( (rh_field.attname, getattr(obj, lh_field.attname)) for lh_field, rh_field in self.related_fields ) descriptor_filter = self.get_extra_descriptor_filter(obj) base_q = Q(*base_filter) if isinstance(descriptor_filter, dict): return base_q & Q(**descriptor_filter) elif descriptor_filter: return base_q & descriptor_filter return base_q @property def swappable_setting(self): """ Get the setting that this is powered from for swapping, or None if it's not swapped in / marked with swappable=False. """ if self.swappable: # Work out string form of "to" if isinstance(self.remote_field.model, str): to_string = self.remote_field.model else: to_string = self.remote_field.model._meta.label return apps.get_swappable_settings_name(to_string) return None def set_attributes_from_rel(self): self.name = ( self.name or (self.remote_field.model._meta.model_name + '_' + self.remote_field.model._meta.pk.name) ) if self.verbose_name is None: self.verbose_name = self.remote_field.model._meta.verbose_name self.remote_field.set_field_name() def do_related_class(self, other, cls): self.set_attributes_from_rel() self.contribute_to_related_class(other, self.remote_field) def get_limit_choices_to(self): """ Return ``limit_choices_to`` for this model field. If it is a callable, it will be invoked and the result will be returned. """ if callable(self.remote_field.limit_choices_to): return self.remote_field.limit_choices_to() return self.remote_field.limit_choices_to def formfield(self, **kwargs): """ Pass ``limit_choices_to`` to the field being constructed. Only passes it if there is a type that supports related fields. This is a similar strategy used to pass the ``queryset`` to the field being constructed. """ defaults = {} if hasattr(self.remote_field, 'get_related_field'): # If this is a callable, do not invoke it here. Just pass # it in the defaults for when the form class will later be # instantiated. limit_choices_to = self.remote_field.limit_choices_to defaults.update({ 'limit_choices_to': limit_choices_to, }) defaults.update(kwargs) return super().formfield(**defaults) def related_query_name(self): """ Define the name that can be used to identify this related object in a table-spanning query. """ return self.remote_field.related_query_name or self.remote_field.related_name or self.opts.model_name @property def target_field(self): """ When filtering against this relation, return the field on the remote model against which the filtering should happen. """ target_fields = self.get_path_info()[-1].target_fields if len(target_fields) > 1: raise exceptions.FieldError( "The relation has multiple target fields, but only single target field was asked for") return target_fields[0] def get_cache_name(self): return self.name class ForeignObject(RelatedField): """ Abstraction of the ForeignKey relation to support multi-column relations. """ # Field flags many_to_many = False many_to_one = True one_to_many = False one_to_one = False requires_unique_target = True related_accessor_class = ReverseManyToOneDescriptor forward_related_accessor_class = ForwardManyToOneDescriptor rel_class = ForeignObjectRel def __init__(self, to, on_delete, from_fields, to_fields, rel=None, related_name=None, related_query_name=None, limit_choices_to=None, parent_link=False, swappable=True, **kwargs): if rel is None: rel = self.rel_class( self, to, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, parent_link=parent_link, on_delete=on_delete, ) super().__init__( rel=rel, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, **kwargs, ) self.from_fields = from_fields self.to_fields = to_fields self.swappable = swappable def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_to_fields_exist(), *self._check_unique_target(), ] def _check_to_fields_exist(self): # Skip nonexistent models. if isinstance(self.remote_field.model, str): return [] errors = [] for to_field in self.to_fields: if to_field: try: self.remote_field.model._meta.get_field(to_field) except exceptions.FieldDoesNotExist: errors.append( checks.Error( "The to_field '%s' doesn't exist on the related " "model '%s'." % (to_field, self.remote_field.model._meta.label), obj=self, id='fields.E312', ) ) return errors def _check_unique_target(self): rel_is_string = isinstance(self.remote_field.model, str) if rel_is_string or not self.requires_unique_target: return [] try: self.foreign_related_fields except exceptions.FieldDoesNotExist: return [] if not self.foreign_related_fields: return [] unique_foreign_fields = { frozenset([f.name]) for f in self.remote_field.model._meta.get_fields() if getattr(f, 'unique', False) } unique_foreign_fields.update({ frozenset(ut) for ut in self.remote_field.model._meta.unique_together }) unique_foreign_fields.update({ frozenset(uc.fields) for uc in self.remote_field.model._meta.total_unique_constraints }) foreign_fields = {f.name for f in self.foreign_related_fields} has_unique_constraint = any(u <= foreign_fields for u in unique_foreign_fields) if not has_unique_constraint and len(self.foreign_related_fields) > 1: field_combination = ', '.join( "'%s'" % rel_field.name for rel_field in self.foreign_related_fields ) model_name = self.remote_field.model.__name__ return [ checks.Error( "No subset of the fields %s on model '%s' is unique." % (field_combination, model_name), hint=( 'Mark a single field as unique=True or add a set of ' 'fields to a unique constraint (via unique_together ' 'or a UniqueConstraint (without condition) in the ' 'model Meta.constraints).' ), obj=self, id='fields.E310', ) ] elif not has_unique_constraint: field_name = self.foreign_related_fields[0].name model_name = self.remote_field.model.__name__ return [ checks.Error( "'%s.%s' must be unique because it is referenced by " "a foreign key." % (model_name, field_name), hint=( 'Add unique=True to this field or add a ' 'UniqueConstraint (without condition) in the model ' 'Meta.constraints.' ), obj=self, id='fields.E311', ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() kwargs['on_delete'] = self.remote_field.on_delete kwargs['from_fields'] = self.from_fields kwargs['to_fields'] = self.to_fields if self.remote_field.parent_link: kwargs['parent_link'] = self.remote_field.parent_link if isinstance(self.remote_field.model, str): if '.' in self.remote_field.model: app_label, model_name = self.remote_field.model.split('.') kwargs['to'] = '%s.%s' % (app_label, model_name.lower()) else: kwargs['to'] = self.remote_field.model.lower() else: kwargs['to'] = self.remote_field.model._meta.label_lower # If swappable is True, then see if we're actually pointing to the target # of a swap. swappable_setting = self.swappable_setting if swappable_setting is not None: # If it's already a settings reference, error if hasattr(kwargs['to'], "setting_name"): if kwargs['to'].setting_name != swappable_setting: raise ValueError( "Cannot deconstruct a ForeignKey pointing to a model " "that is swapped in place of more than one model (%s and %s)" % (kwargs['to'].setting_name, swappable_setting) ) # Set it kwargs['to'] = SettingsReference( kwargs['to'], swappable_setting, ) return name, path, args, kwargs def resolve_related_fields(self): if not self.from_fields or len(self.from_fields) != len(self.to_fields): raise ValueError('Foreign Object from and to fields must be the same non-zero length') if isinstance(self.remote_field.model, str): raise ValueError('Related model %r cannot be resolved' % self.remote_field.model) related_fields = [] for index in range(len(self.from_fields)): from_field_name = self.from_fields[index] to_field_name = self.to_fields[index] from_field = ( self if from_field_name == RECURSIVE_RELATIONSHIP_CONSTANT else self.opts.get_field(from_field_name) ) to_field = (self.remote_field.model._meta.pk if to_field_name is None else self.remote_field.model._meta.get_field(to_field_name)) related_fields.append((from_field, to_field)) return related_fields @cached_property def related_fields(self): return self.resolve_related_fields() @cached_property def reverse_related_fields(self): return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields] @cached_property def local_related_fields(self): return tuple(lhs_field for lhs_field, rhs_field in self.related_fields) @cached_property def foreign_related_fields(self): return tuple(rhs_field for lhs_field, rhs_field in self.related_fields if rhs_field) def get_local_related_value(self, instance): return self.get_instance_value_for_fields(instance, self.local_related_fields) def get_foreign_related_value(self, instance): return self.get_instance_value_for_fields(instance, self.foreign_related_fields) @staticmethod def get_instance_value_for_fields(instance, fields): ret = [] opts = instance._meta for field in fields: # Gotcha: in some cases (like fixture loading) a model can have # different values in parent_ptr_id and parent's id. So, use # instance.pk (that is, parent_ptr_id) when asked for instance.id. if field.primary_key: possible_parent_link = opts.get_ancestor_link(field.model) if (not possible_parent_link or possible_parent_link.primary_key or possible_parent_link.model._meta.abstract): ret.append(instance.pk) continue ret.append(getattr(instance, field.attname)) return tuple(ret) def get_attname_column(self): attname, column = super().get_attname_column() return attname, None def get_joining_columns(self, reverse_join=False): source = self.reverse_related_fields if reverse_join else self.related_fields return tuple((lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source) def get_reverse_joining_columns(self): return self.get_joining_columns(reverse_join=True) def get_extra_descriptor_filter(self, instance): """ Return an extra filter condition for related object fetching when user does 'instance.fieldname', that is the extra filter is used in the descriptor of the field. The filter should be either a dict usable in .filter(**kwargs) call or a Q-object. The condition will be ANDed together with the relation's joining columns. A parallel method is get_extra_restriction() which is used in JOIN and subquery conditions. """ return {} def get_extra_restriction(self, alias, related_alias): """ Return a pair condition used for joining and subquery pushdown. The condition is something that responds to as_sql(compiler, connection) method. Note that currently referring both the 'alias' and 'related_alias' will not work in some conditions, like subquery pushdown. A parallel method is get_extra_descriptor_filter() which is used in instance.fieldname related object fetching. """ return None def get_path_info(self, filtered_relation=None): """Get path from this field to the related model.""" opts = self.remote_field.model._meta from_opts = self.model._meta return [PathInfo( from_opts=from_opts, to_opts=opts, target_fields=self.foreign_related_fields, join_field=self, m2m=False, direct=True, filtered_relation=filtered_relation, )] def get_reverse_path_info(self, filtered_relation=None): """Get path from the related model to this field's model.""" opts = self.model._meta from_opts = self.remote_field.model._meta return [PathInfo( from_opts=from_opts, to_opts=opts, target_fields=(opts.pk,), join_field=self.remote_field, m2m=not self.unique, direct=False, filtered_relation=filtered_relation, )] @classmethod @functools.lru_cache(maxsize=None) def get_lookups(cls): bases = inspect.getmro(cls) bases = bases[:bases.index(ForeignObject) + 1] class_lookups = [parent.__dict__.get('class_lookups', {}) for parent in bases] return cls.merge_dicts(class_lookups) def contribute_to_class(self, cls, name, private_only=False, **kwargs): super().contribute_to_class(cls, name, private_only=private_only, **kwargs) setattr(cls, self.name, self.forward_related_accessor_class(self)) def contribute_to_related_class(self, cls, related): # Internal FK's - i.e., those with a related name ending with '+' - # and swapped models don't get a related descriptor. if not self.remote_field.is_hidden() and not related.related_model._meta.swapped: setattr(cls._meta.concrete_model, related.get_accessor_name(), self.related_accessor_class(related)) # While 'limit_choices_to' might be a callable, simply pass # it along for later - this is too early because it's still # model load time. if self.remote_field.limit_choices_to: cls._meta.related_fkey_lookups.append(self.remote_field.limit_choices_to) ForeignObject.register_lookup(RelatedIn) ForeignObject.register_lookup(RelatedExact) ForeignObject.register_lookup(RelatedLessThan) ForeignObject.register_lookup(RelatedGreaterThan) ForeignObject.register_lookup(RelatedGreaterThanOrEqual) ForeignObject.register_lookup(RelatedLessThanOrEqual) ForeignObject.register_lookup(RelatedIsNull) class ForeignKey(ForeignObject): """ Provide a many-to-one relation by adding a column to the local model to hold the remote value. By default ForeignKey will target the pk of the remote model but this behavior can be changed by using the ``to_field`` argument. """ descriptor_class = ForeignKeyDeferredAttribute # Field flags many_to_many = False many_to_one = True one_to_many = False one_to_one = False rel_class = ManyToOneRel empty_strings_allowed = False default_error_messages = { 'invalid': _('%(model)s instance with %(field)s %(value)r does not exist.') } description = _("Foreign Key (type determined by related field)") def __init__(self, to, on_delete, related_name=None, related_query_name=None, limit_choices_to=None, parent_link=False, to_field=None, db_constraint=True, **kwargs): try: to._meta.model_name except AttributeError: if not isinstance(to, str): raise TypeError( '%s(%r) is invalid. First parameter to ForeignKey must be ' 'either a model, a model name, or the string %r' % ( self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT, ) ) else: # For backwards compatibility purposes, we need to *try* and set # the to_field during FK construction. It won't be guaranteed to # be correct until contribute_to_class is called. Refs #12190. to_field = to_field or (to._meta.pk and to._meta.pk.name) if not callable(on_delete): raise TypeError('on_delete must be callable.') kwargs['rel'] = self.rel_class( self, to, to_field, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, parent_link=parent_link, on_delete=on_delete, ) kwargs.setdefault('db_index', True) super().__init__( to, on_delete, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, from_fields=[RECURSIVE_RELATIONSHIP_CONSTANT], to_fields=[to_field], **kwargs, ) self.db_constraint = db_constraint def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_on_delete(), *self._check_unique(), ] def _check_on_delete(self): on_delete = getattr(self.remote_field, 'on_delete', None) if on_delete == SET_NULL and not self.null: return [ checks.Error( 'Field specifies on_delete=SET_NULL, but cannot be null.', hint='Set null=True argument on the field, or change the on_delete rule.', obj=self, id='fields.E320', ) ] elif on_delete == SET_DEFAULT and not self.has_default(): return [ checks.Error( 'Field specifies on_delete=SET_DEFAULT, but has no default value.', hint='Set a default value, or change the on_delete rule.', obj=self, id='fields.E321', ) ] else: return [] def _check_unique(self, **kwargs): return [ checks.Warning( 'Setting unique=True on a ForeignKey has the same effect as using a OneToOneField.', hint='ForeignKey(unique=True) is usually better served by a OneToOneField.', obj=self, id='fields.W342', ) ] if self.unique else [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() del kwargs['to_fields'] del kwargs['from_fields'] # Handle the simpler arguments if self.db_index: del kwargs['db_index'] else: kwargs['db_index'] = False if self.db_constraint is not True: kwargs['db_constraint'] = self.db_constraint # Rel needs more work. to_meta = getattr(self.remote_field.model, "_meta", None) if self.remote_field.field_name and ( not to_meta or (to_meta.pk and self.remote_field.field_name != to_meta.pk.name)): kwargs['to_field'] = self.remote_field.field_name return name, path, args, kwargs def to_python(self, value): return self.target_field.to_python(value) @property def target_field(self): return self.foreign_related_fields[0] def get_reverse_path_info(self, filtered_relation=None): """Get path from the related model to this field's model.""" opts = self.model._meta from_opts = self.remote_field.model._meta return [PathInfo( from_opts=from_opts, to_opts=opts, target_fields=(opts.pk,), join_field=self.remote_field, m2m=not self.unique, direct=False, filtered_relation=filtered_relation, )] def validate(self, value, model_instance): if self.remote_field.parent_link: return super().validate(value, model_instance) if value is None: return using = router.db_for_read(self.remote_field.model, instance=model_instance) qs = self.remote_field.model._base_manager.using(using).filter( **{self.remote_field.field_name: value} ) qs = qs.complex_filter(self.get_limit_choices_to()) if not qs.exists(): raise exceptions.ValidationError( self.error_messages['invalid'], code='invalid', params={ 'model': self.remote_field.model._meta.verbose_name, 'pk': value, 'field': self.remote_field.field_name, 'value': value, }, # 'pk' is included for backwards compatibility ) def resolve_related_fields(self): related_fields = super().resolve_related_fields() for from_field, to_field in related_fields: if to_field and to_field.model != self.remote_field.model._meta.concrete_model: raise exceptions.FieldError( "'%s.%s' refers to field '%s' which is not local to model " "'%s'." % ( self.model._meta.label, self.name, to_field.name, self.remote_field.model._meta.concrete_model._meta.label, ) ) return related_fields def get_attname(self): return '%s_id' % self.name def get_attname_column(self): attname = self.get_attname() column = self.db_column or attname return attname, column def get_default(self): """Return the to_field if the default value is an object.""" field_default = super().get_default() if isinstance(field_default, self.remote_field.model): return getattr(field_default, self.target_field.attname) return field_default def get_db_prep_save(self, value, connection): if value is None or (value == '' and (not self.target_field.empty_strings_allowed or connection.features.interprets_empty_strings_as_nulls)): return None else: return self.target_field.get_db_prep_save(value, connection=connection) def get_db_prep_value(self, value, connection, prepared=False): return self.target_field.get_db_prep_value(value, connection, prepared) def get_prep_value(self, value): return self.target_field.get_prep_value(value) def contribute_to_related_class(self, cls, related): super().contribute_to_related_class(cls, related) if self.remote_field.field_name is None: self.remote_field.field_name = cls._meta.pk.name def formfield(self, *, using=None, **kwargs): if isinstance(self.remote_field.model, str): raise ValueError("Cannot create form field for %r yet, because " "its related model %r has not been loaded yet" % (self.name, self.remote_field.model)) return super().formfield(**{ 'form_class': forms.ModelChoiceField, 'queryset': self.remote_field.model._default_manager.using(using), 'to_field_name': self.remote_field.field_name, **kwargs, 'blank': self.blank, }) def db_check(self, connection): return [] def db_type(self, connection): return self.target_field.rel_db_type(connection=connection) def db_parameters(self, connection): return {"type": self.db_type(connection), "check": self.db_check(connection)} def convert_empty_strings(self, value, expression, connection): if (not value) and isinstance(value, str): return None return value def get_db_converters(self, connection): converters = super().get_db_converters(connection) if connection.features.interprets_empty_strings_as_nulls: converters += [self.convert_empty_strings] return converters def get_col(self, alias, output_field=None): if output_field is None: output_field = self.target_field while isinstance(output_field, ForeignKey): output_field = output_field.target_field if output_field is self: raise ValueError('Cannot resolve output_field.') return super().get_col(alias, output_field) class OneToOneField(ForeignKey): """ A OneToOneField is essentially the same as a ForeignKey, with the exception that it always carries a "unique" constraint with it and the reverse relation always returns the object pointed to (since there will only ever be one), rather than returning a list. """ # Field flags many_to_many = False many_to_one = False one_to_many = False one_to_one = True related_accessor_class = ReverseOneToOneDescriptor forward_related_accessor_class = ForwardOneToOneDescriptor rel_class = OneToOneRel description = _("One-to-one relationship") def __init__(self, to, on_delete, to_field=None, **kwargs): kwargs['unique'] = True super().__init__(to, on_delete, to_field=to_field, **kwargs) def deconstruct(self): name, path, args, kwargs = super().deconstruct() if "unique" in kwargs: del kwargs['unique'] return name, path, args, kwargs def formfield(self, **kwargs): if self.remote_field.parent_link: return None return super().formfield(**kwargs) def save_form_data(self, instance, data): if isinstance(data, self.remote_field.model): setattr(instance, self.name, data) else: setattr(instance, self.attname, data) # Remote field object must be cleared otherwise Model.save() # will reassign attname using the related object pk. if data is None: setattr(instance, self.name, data) def _check_unique(self, **kwargs): # Override ForeignKey since check isn't applicable here. return [] def create_many_to_many_intermediary_model(field, klass): from django.db import models def set_managed(model, related, through): through._meta.managed = model._meta.managed or related._meta.managed to_model = resolve_relation(klass, field.remote_field.model) name = '%s_%s' % (klass._meta.object_name, field.name) lazy_related_operation(set_managed, klass, to_model, name) to = make_model_tuple(to_model)[1] from_ = klass._meta.model_name if to == from_: to = 'to_%s' % to from_ = 'from_%s' % from_ meta = type('Meta', (), { 'db_table': field._get_m2m_db_table(klass._meta), 'auto_created': klass, 'app_label': klass._meta.app_label, 'db_tablespace': klass._meta.db_tablespace, 'unique_together': (from_, to), 'verbose_name': _('%(from)s-%(to)s relationship') % {'from': from_, 'to': to}, 'verbose_name_plural': _('%(from)s-%(to)s relationships') % {'from': from_, 'to': to}, 'apps': field.model._meta.apps, }) # Construct and return the new class. return type(name, (models.Model,), { 'Meta': meta, '__module__': klass.__module__, from_: models.ForeignKey( klass, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.remote_field.db_constraint, on_delete=CASCADE, ), to: models.ForeignKey( to_model, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.remote_field.db_constraint, on_delete=CASCADE, ) }) class ManyToManyField(RelatedField): """ Provide a many-to-many relation by using an intermediary model that holds two ForeignKey fields pointed at the two sides of the relation. Unless a ``through`` model was provided, ManyToManyField will use the create_many_to_many_intermediary_model factory to automatically generate the intermediary model. """ # Field flags many_to_many = True many_to_one = False one_to_many = False one_to_one = False rel_class = ManyToManyRel description = _("Many-to-many relationship") def __init__(self, to, related_name=None, related_query_name=None, limit_choices_to=None, symmetrical=None, through=None, through_fields=None, db_constraint=True, db_table=None, swappable=True, **kwargs): try: to._meta except AttributeError: if not isinstance(to, str): raise TypeError( '%s(%r) is invalid. First parameter to ManyToManyField ' 'must be either a model, a model name, or the string %r' % ( self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT, ) ) if symmetrical is None: symmetrical = (to == RECURSIVE_RELATIONSHIP_CONSTANT) if through is not None and db_table is not None: raise ValueError( 'Cannot specify a db_table if an intermediary model is used.' ) kwargs['rel'] = self.rel_class( self, to, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, symmetrical=symmetrical, through=through, through_fields=through_fields, db_constraint=db_constraint, ) self.has_null_arg = 'null' in kwargs super().__init__( related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, **kwargs, ) self.db_table = db_table self.swappable = swappable def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_unique(**kwargs), *self._check_relationship_model(**kwargs), *self._check_ignored_options(**kwargs), *self._check_table_uniqueness(**kwargs), ] def _check_unique(self, **kwargs): if self.unique: return [ checks.Error( 'ManyToManyFields cannot be unique.', obj=self, id='fields.E330', ) ] return [] def _check_ignored_options(self, **kwargs): warnings = [] if self.has_null_arg: warnings.append( checks.Warning( 'null has no effect on ManyToManyField.', obj=self, id='fields.W340', ) ) if self._validators: warnings.append( checks.Warning( 'ManyToManyField does not support validators.', obj=self, id='fields.W341', ) ) if (self.remote_field.limit_choices_to and self.remote_field.through and not self.remote_field.through._meta.auto_created): warnings.append( checks.Warning( 'limit_choices_to has no effect on ManyToManyField ' 'with a through model.', obj=self, id='fields.W343', ) ) return warnings def _check_relationship_model(self, from_model=None, **kwargs): if hasattr(self.remote_field.through, '_meta'): qualified_model_name = "%s.%s" % ( self.remote_field.through._meta.app_label, self.remote_field.through.__name__) else: qualified_model_name = self.remote_field.through errors = [] if self.remote_field.through not in self.opts.apps.get_models(include_auto_created=True): # The relationship model is not installed. errors.append( checks.Error( "Field specifies a many-to-many relation through model " "'%s', which has not been installed." % qualified_model_name, obj=self, id='fields.E331', ) ) else: assert from_model is not None, ( "ManyToManyField with intermediate " "tables cannot be checked if you don't pass the model " "where the field is attached to." ) # Set some useful local variables to_model = resolve_relation(from_model, self.remote_field.model) from_model_name = from_model._meta.object_name if isinstance(to_model, str): to_model_name = to_model else: to_model_name = to_model._meta.object_name relationship_model_name = self.remote_field.through._meta.object_name self_referential = from_model == to_model # Count foreign keys in intermediate model if self_referential: seen_self = sum( from_model == getattr(field.remote_field, 'model', None) for field in self.remote_field.through._meta.fields ) if seen_self > 2 and not self.remote_field.through_fields: errors.append( checks.Error( "The model is used as an intermediate model by " "'%s', but it has more than two foreign keys " "to '%s', which is ambiguous. You must specify " "which two foreign keys Django should use via the " "through_fields keyword argument." % (self, from_model_name), hint="Use through_fields to specify which two foreign keys Django should use.", obj=self.remote_field.through, id='fields.E333', ) ) else: # Count foreign keys in relationship model seen_from = sum( from_model == getattr(field.remote_field, 'model', None) for field in self.remote_field.through._meta.fields ) seen_to = sum( to_model == getattr(field.remote_field, 'model', None) for field in self.remote_field.through._meta.fields ) if seen_from > 1 and not self.remote_field.through_fields: errors.append( checks.Error( ("The model is used as an intermediate model by " "'%s', but it has more than one foreign key " "from '%s', which is ambiguous. You must specify " "which foreign key Django should use via the " "through_fields keyword argument.") % (self, from_model_name), hint=( 'If you want to create a recursive relationship, ' 'use ManyToManyField("%s", through="%s").' ) % ( RECURSIVE_RELATIONSHIP_CONSTANT, relationship_model_name, ), obj=self, id='fields.E334', ) ) if seen_to > 1 and not self.remote_field.through_fields: errors.append( checks.Error( "The model is used as an intermediate model by " "'%s', but it has more than one foreign key " "to '%s', which is ambiguous. You must specify " "which foreign key Django should use via the " "through_fields keyword argument." % (self, to_model_name), hint=( 'If you want to create a recursive relationship, ' 'use ManyToManyField("%s", through="%s").' ) % ( RECURSIVE_RELATIONSHIP_CONSTANT, relationship_model_name, ), obj=self, id='fields.E335', ) ) if seen_from == 0 or seen_to == 0: errors.append( checks.Error( "The model is used as an intermediate model by " "'%s', but it does not have a foreign key to '%s' or '%s'." % ( self, from_model_name, to_model_name ), obj=self.remote_field.through, id='fields.E336', ) ) # Validate `through_fields`. if self.remote_field.through_fields is not None: # Validate that we're given an iterable of at least two items # and that none of them is "falsy". if not (len(self.remote_field.through_fields) >= 2 and self.remote_field.through_fields[0] and self.remote_field.through_fields[1]): errors.append( checks.Error( "Field specifies 'through_fields' but does not provide " "the names of the two link fields that should be used " "for the relation through model '%s'." % qualified_model_name, hint="Make sure you specify 'through_fields' as through_fields=('field1', 'field2')", obj=self, id='fields.E337', ) ) # Validate the given through fields -- they should be actual # fields on the through model, and also be foreign keys to the # expected models. else: assert from_model is not None, ( "ManyToManyField with intermediate " "tables cannot be checked if you don't pass the model " "where the field is attached to." ) source, through, target = from_model, self.remote_field.through, self.remote_field.model source_field_name, target_field_name = self.remote_field.through_fields[:2] for field_name, related_model in ((source_field_name, source), (target_field_name, target)): possible_field_names = [] for f in through._meta.fields: if hasattr(f, 'remote_field') and getattr(f.remote_field, 'model', None) == related_model: possible_field_names.append(f.name) if possible_field_names: hint = "Did you mean one of the following foreign keys to '%s': %s?" % ( related_model._meta.object_name, ', '.join(possible_field_names), ) else: hint = None try: field = through._meta.get_field(field_name) except exceptions.FieldDoesNotExist: errors.append( checks.Error( "The intermediary model '%s' has no field '%s'." % (qualified_model_name, field_name), hint=hint, obj=self, id='fields.E338', ) ) else: if not (hasattr(field, 'remote_field') and getattr(field.remote_field, 'model', None) == related_model): errors.append( checks.Error( "'%s.%s' is not a foreign key to '%s'." % ( through._meta.object_name, field_name, related_model._meta.object_name, ), hint=hint, obj=self, id='fields.E339', ) ) return errors def _check_table_uniqueness(self, **kwargs): if isinstance(self.remote_field.through, str) or not self.remote_field.through._meta.managed: return [] registered_tables = { model._meta.db_table: model for model in self.opts.apps.get_models(include_auto_created=True) if model != self.remote_field.through and model._meta.managed } m2m_db_table = self.m2m_db_table() model = registered_tables.get(m2m_db_table) # The second condition allows multiple m2m relations on a model if # some point to a through model that proxies another through model. if model and model._meta.concrete_model != self.remote_field.through._meta.concrete_model: if model._meta.auto_created: def _get_field_name(model): for field in model._meta.auto_created._meta.many_to_many: if field.remote_field.through is model: return field.name opts = model._meta.auto_created._meta clashing_obj = '%s.%s' % (opts.label, _get_field_name(model)) else: clashing_obj = model._meta.label if settings.DATABASE_ROUTERS: error_class, error_id = checks.Warning, 'fields.W344' error_hint = ( 'You have configured settings.DATABASE_ROUTERS. Verify ' 'that the table of %r is correctly routed to a separate ' 'database.' % clashing_obj ) else: error_class, error_id = checks.Error, 'fields.E340' error_hint = None return [ error_class( "The field's intermediary table '%s' clashes with the " "table name of '%s'." % (m2m_db_table, clashing_obj), obj=self, hint=error_hint, id=error_id, ) ] return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() # Handle the simpler arguments. if self.db_table is not None: kwargs['db_table'] = self.db_table if self.remote_field.db_constraint is not True: kwargs['db_constraint'] = self.remote_field.db_constraint # Rel needs more work. if isinstance(self.remote_field.model, str): kwargs['to'] = self.remote_field.model else: kwargs['to'] = self.remote_field.model._meta.label if getattr(self.remote_field, 'through', None) is not None: if isinstance(self.remote_field.through, str): kwargs['through'] = self.remote_field.through elif not self.remote_field.through._meta.auto_created: kwargs['through'] = self.remote_field.through._meta.label # If swappable is True, then see if we're actually pointing to the target # of a swap. swappable_setting = self.swappable_setting if swappable_setting is not None: # If it's already a settings reference, error. if hasattr(kwargs['to'], "setting_name"): if kwargs['to'].setting_name != swappable_setting: raise ValueError( "Cannot deconstruct a ManyToManyField pointing to a " "model that is swapped in place of more than one model " "(%s and %s)" % (kwargs['to'].setting_name, swappable_setting) ) kwargs['to'] = SettingsReference( kwargs['to'], swappable_setting, ) return name, path, args, kwargs def _get_path_info(self, direct=False, filtered_relation=None): """Called by both direct and indirect m2m traversal.""" int_model = self.remote_field.through linkfield1 = int_model._meta.get_field(self.m2m_field_name()) linkfield2 = int_model._meta.get_field(self.m2m_reverse_field_name()) if direct: join1infos = linkfield1.get_reverse_path_info() join2infos = linkfield2.get_path_info(filtered_relation) else: join1infos = linkfield2.get_reverse_path_info() join2infos = linkfield1.get_path_info(filtered_relation) # Get join infos between the last model of join 1 and the first model # of join 2. Assume the only reason these may differ is due to model # inheritance. join1_final = join1infos[-1].to_opts join2_initial = join2infos[0].from_opts if join1_final is join2_initial: intermediate_infos = [] elif issubclass(join1_final.model, join2_initial.model): intermediate_infos = join1_final.get_path_to_parent(join2_initial.model) else: intermediate_infos = join2_initial.get_path_from_parent(join1_final.model) return [*join1infos, *intermediate_infos, *join2infos] def get_path_info(self, filtered_relation=None): return self._get_path_info(direct=True, filtered_relation=filtered_relation) def get_reverse_path_info(self, filtered_relation=None): return self._get_path_info(direct=False, filtered_relation=filtered_relation) def _get_m2m_db_table(self, opts): """ Function that can be curried to provide the m2m table name for this relation. """ if self.remote_field.through is not None: return self.remote_field.through._meta.db_table elif self.db_table: return self.db_table else: m2m_table_name = '%s_%s' % (utils.strip_quotes(opts.db_table), self.name) return utils.truncate_name(m2m_table_name, connection.ops.max_name_length()) def _get_m2m_attr(self, related, attr): """ Function that can be curried to provide the source accessor or DB column name for the m2m table. """ cache_attr = '_m2m_%s_cache' % attr if hasattr(self, cache_attr): return getattr(self, cache_attr) if self.remote_field.through_fields is not None: link_field_name = self.remote_field.through_fields[0] else: link_field_name = None for f in self.remote_field.through._meta.fields: if (f.is_relation and f.remote_field.model == related.related_model and (link_field_name is None or link_field_name == f.name)): setattr(self, cache_attr, getattr(f, attr)) return getattr(self, cache_attr) def _get_m2m_reverse_attr(self, related, attr): """ Function that can be curried to provide the related accessor or DB column name for the m2m table. """ cache_attr = '_m2m_reverse_%s_cache' % attr if hasattr(self, cache_attr): return getattr(self, cache_attr) found = False if self.remote_field.through_fields is not None: link_field_name = self.remote_field.through_fields[1] else: link_field_name = None for f in self.remote_field.through._meta.fields: if f.is_relation and f.remote_field.model == related.model: if link_field_name is None and related.related_model == related.model: # If this is an m2m-intermediate to self, # the first foreign key you find will be # the source column. Keep searching for # the second foreign key. if found: setattr(self, cache_attr, getattr(f, attr)) break else: found = True elif link_field_name is None or link_field_name == f.name: setattr(self, cache_attr, getattr(f, attr)) break return getattr(self, cache_attr) def contribute_to_class(self, cls, name, **kwargs): # To support multiple relations to self, it's useful to have a non-None # related name on symmetrical relations for internal reasons. The # concept doesn't make a lot of sense externally ("you want me to # specify *what* on my non-reversible relation?!"), so we set it up # automatically. The funky name reduces the chance of an accidental # clash. if self.remote_field.symmetrical and ( self.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT or self.remote_field.model == cls._meta.object_name ): self.remote_field.related_name = "%s_rel_+" % name elif self.remote_field.is_hidden(): # If the backwards relation is disabled, replace the original # related_name with one generated from the m2m field name. Django # still uses backwards relations internally and we need to avoid # clashes between multiple m2m fields with related_name == '+'. self.remote_field.related_name = '_%s_%s_%s_+' % ( cls._meta.app_label, cls.__name__.lower(), name, ) super().contribute_to_class(cls, name, **kwargs) # The intermediate m2m model is not auto created if: # 1) There is a manually specified intermediate, or # 2) The class owning the m2m field is abstract. # 3) The class owning the m2m field has been swapped out. if not cls._meta.abstract: if self.remote_field.through: def resolve_through_model(_, model, field): field.remote_field.through = model lazy_related_operation(resolve_through_model, cls, self.remote_field.through, field=self) elif not cls._meta.swapped: self.remote_field.through = create_many_to_many_intermediary_model(self, cls) # Add the descriptor for the m2m relation. setattr(cls, self.name, ManyToManyDescriptor(self.remote_field, reverse=False)) # Set up the accessor for the m2m table name for the relation. self.m2m_db_table = partial(self._get_m2m_db_table, cls._meta) def contribute_to_related_class(self, cls, related): # Internal M2Ms (i.e., those with a related name ending with '+') # and swapped models don't get a related descriptor. if not self.remote_field.is_hidden() and not related.related_model._meta.swapped: setattr(cls, related.get_accessor_name(), ManyToManyDescriptor(self.remote_field, reverse=True)) # Set up the accessors for the column names on the m2m table. self.m2m_column_name = partial(self._get_m2m_attr, related, 'column') self.m2m_reverse_name = partial(self._get_m2m_reverse_attr, related, 'column') self.m2m_field_name = partial(self._get_m2m_attr, related, 'name') self.m2m_reverse_field_name = partial(self._get_m2m_reverse_attr, related, 'name') get_m2m_rel = partial(self._get_m2m_attr, related, 'remote_field') self.m2m_target_field_name = lambda: get_m2m_rel().field_name get_m2m_reverse_rel = partial(self._get_m2m_reverse_attr, related, 'remote_field') self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name def set_attributes_from_rel(self): pass def value_from_object(self, obj): return [] if obj.pk is None else list(getattr(obj, self.attname).all()) def save_form_data(self, instance, data): getattr(instance, self.attname).set(data) def formfield(self, *, using=None, **kwargs): defaults = { 'form_class': forms.ModelMultipleChoiceField, 'queryset': self.remote_field.model._default_manager.using(using), **kwargs, } # If initial is passed in, it's a list of related objects, but the # MultipleChoiceField takes a list of IDs. if defaults.get('initial') is not None: initial = defaults['initial'] if callable(initial): initial = initial() defaults['initial'] = [i.pk for i in initial] return super().formfield(**defaults) def db_check(self, connection): return None def db_type(self, connection): # A ManyToManyField is not represented by a single column, # so return None. return None def db_parameters(self, connection): return {"type": None, "check": None}
2bae041c9fb0a25dc254177e32fa39764222e4ff6ae353caedd1cad5f06115c6
import datetime import posixpath from django import forms from django.core import checks from django.core.files.base import File from django.core.files.images import ImageFile from django.core.files.storage import Storage, default_storage from django.core.files.utils import validate_file_name from django.db.models import signals from django.db.models.fields import Field from django.db.models.query_utils import DeferredAttribute from django.utils.translation import gettext_lazy as _ class FieldFile(File): def __init__(self, instance, field, name): super().__init__(None, name) self.instance = instance self.field = field self.storage = field.storage self._committed = True def __eq__(self, other): # Older code may be expecting FileField values to be simple strings. # By overriding the == operator, it can remain backwards compatibility. if hasattr(other, 'name'): return self.name == other.name return self.name == other def __hash__(self): return hash(self.name) # The standard File contains most of the necessary properties, but # FieldFiles can be instantiated without a name, so that needs to # be checked for here. def _require_file(self): if not self: raise ValueError("The '%s' attribute has no file associated with it." % self.field.name) def _get_file(self): self._require_file() if getattr(self, '_file', None) is None: self._file = self.storage.open(self.name, 'rb') return self._file def _set_file(self, file): self._file = file def _del_file(self): del self._file file = property(_get_file, _set_file, _del_file) @property def path(self): self._require_file() return self.storage.path(self.name) @property def url(self): self._require_file() return self.storage.url(self.name) @property def size(self): self._require_file() if not self._committed: return self.file.size return self.storage.size(self.name) def open(self, mode='rb'): self._require_file() if getattr(self, '_file', None) is None: self.file = self.storage.open(self.name, mode) else: self.file.open(mode) return self # open() doesn't alter the file's contents, but it does reset the pointer open.alters_data = True # In addition to the standard File API, FieldFiles have extra methods # to further manipulate the underlying file, as well as update the # associated model instance. def save(self, name, content, save=True): name = self.field.generate_filename(self.instance, name) self.name = self.storage.save(name, content, max_length=self.field.max_length) setattr(self.instance, self.field.attname, self.name) self._committed = True # Save the object because it has changed, unless save is False if save: self.instance.save() save.alters_data = True def delete(self, save=True): if not self: return # Only close the file if it's already open, which we know by the # presence of self._file if hasattr(self, '_file'): self.close() del self.file self.storage.delete(self.name) self.name = None setattr(self.instance, self.field.attname, self.name) self._committed = False if save: self.instance.save() delete.alters_data = True @property def closed(self): file = getattr(self, '_file', None) return file is None or file.closed def close(self): file = getattr(self, '_file', None) if file is not None: file.close() def __getstate__(self): # FieldFile needs access to its associated model field, an instance and # the file's name. Everything else will be restored later, by # FileDescriptor below. return { 'name': self.name, 'closed': False, '_committed': True, '_file': None, 'instance': self.instance, 'field': self.field, } def __setstate__(self, state): self.__dict__.update(state) self.storage = self.field.storage class FileDescriptor(DeferredAttribute): """ The descriptor for the file attribute on the model instance. Return a FieldFile when accessed so you can write code like:: >>> from myapp.models import MyModel >>> instance = MyModel.objects.get(pk=1) >>> instance.file.size Assign a file object on assignment so you can do:: >>> with open('/path/to/hello.world') as f: ... instance.file = File(f) """ def __get__(self, instance, cls=None): if instance is None: return self # This is slightly complicated, so worth an explanation. # instance.file`needs to ultimately return some instance of `File`, # probably a subclass. Additionally, this returned object needs to have # the FieldFile API so that users can easily do things like # instance.file.path and have that delegated to the file storage engine. # Easy enough if we're strict about assignment in __set__, but if you # peek below you can see that we're not. So depending on the current # value of the field we have to dynamically construct some sort of # "thing" to return. # The instance dict contains whatever was originally assigned # in __set__. file = super().__get__(instance, cls) # If this value is a string (instance.file = "path/to/file") or None # then we simply wrap it with the appropriate attribute class according # to the file field. [This is FieldFile for FileFields and # ImageFieldFile for ImageFields; it's also conceivable that user # subclasses might also want to subclass the attribute class]. This # object understands how to convert a path to a file, and also how to # handle None. if isinstance(file, str) or file is None: attr = self.field.attr_class(instance, self.field, file) instance.__dict__[self.field.attname] = attr # Other types of files may be assigned as well, but they need to have # the FieldFile interface added to them. Thus, we wrap any other type of # File inside a FieldFile (well, the field's attr_class, which is # usually FieldFile). elif isinstance(file, File) and not isinstance(file, FieldFile): file_copy = self.field.attr_class(instance, self.field, file.name) file_copy.file = file file_copy._committed = False instance.__dict__[self.field.attname] = file_copy # Finally, because of the (some would say boneheaded) way pickle works, # the underlying FieldFile might not actually itself have an associated # file. So we need to reset the details of the FieldFile in those cases. elif isinstance(file, FieldFile) and not hasattr(file, 'field'): file.instance = instance file.field = self.field file.storage = self.field.storage # Make sure that the instance is correct. elif isinstance(file, FieldFile) and instance is not file.instance: file.instance = instance # That was fun, wasn't it? return instance.__dict__[self.field.attname] def __set__(self, instance, value): instance.__dict__[self.field.attname] = value class FileField(Field): # The class to wrap instance attributes in. Accessing the file object off # the instance will always return an instance of attr_class. attr_class = FieldFile # The descriptor to use for accessing the attribute off of the class. descriptor_class = FileDescriptor description = _("File") def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs): self._primary_key_set_explicitly = 'primary_key' in kwargs self.storage = storage or default_storage if callable(self.storage): # Hold a reference to the callable for deconstruct(). self._storage_callable = self.storage self.storage = self.storage() if not isinstance(self.storage, Storage): raise TypeError( "%s.storage must be a subclass/instance of %s.%s" % (self.__class__.__qualname__, Storage.__module__, Storage.__qualname__) ) self.upload_to = upload_to kwargs.setdefault('max_length', 100) super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_primary_key(), *self._check_upload_to(), ] def _check_primary_key(self): if self._primary_key_set_explicitly: return [ checks.Error( "'primary_key' is not a valid argument for a %s." % self.__class__.__name__, obj=self, id='fields.E201', ) ] else: return [] def _check_upload_to(self): if isinstance(self.upload_to, str) and self.upload_to.startswith('/'): return [ checks.Error( "%s's 'upload_to' argument must be a relative path, not an " "absolute path." % self.__class__.__name__, obj=self, id='fields.E202', hint='Remove the leading slash.', ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if kwargs.get("max_length") == 100: del kwargs["max_length"] kwargs['upload_to'] = self.upload_to if self.storage is not default_storage: kwargs['storage'] = getattr(self, '_storage_callable', self.storage) return name, path, args, kwargs def get_internal_type(self): return "FileField" def get_prep_value(self, value): value = super().get_prep_value(value) # Need to convert File objects provided via a form to string for database insertion if value is None: return None return str(value) def pre_save(self, model_instance, add): file = super().pre_save(model_instance, add) if file and not file._committed: # Commit the file to storage prior to saving the model file.save(file.name, file.file, save=False) return file def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) setattr(cls, self.attname, self.descriptor_class(self)) def generate_filename(self, instance, filename): """ Apply (if callable) or prepend (if a string) upload_to to the filename, then delegate further processing of the name to the storage backend. Until the storage layer, all file paths are expected to be Unix style (with forward slashes). """ if callable(self.upload_to): filename = self.upload_to(instance, filename) else: dirname = datetime.datetime.now().strftime(str(self.upload_to)) filename = posixpath.join(dirname, filename) filename = validate_file_name(filename, allow_relative_path=True) return self.storage.generate_filename(filename) def save_form_data(self, instance, data): # Important: None means "no change", other false value means "clear" # This subtle distinction (rather than a more explicit marker) is # needed because we need to consume values that are also sane for a # regular (non Model-) Form to find in its cleaned_data dictionary. if data is not None: # This value will be converted to str and stored in the # database, so leaving False as-is is not acceptable. setattr(instance, self.name, data or '') def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.FileField, 'max_length': self.max_length, **kwargs, }) class ImageFileDescriptor(FileDescriptor): """ Just like the FileDescriptor, but for ImageFields. The only difference is assigning the width/height to the width_field/height_field, if appropriate. """ def __set__(self, instance, value): previous_file = instance.__dict__.get(self.field.attname) super().__set__(instance, value) # To prevent recalculating image dimensions when we are instantiating # an object from the database (bug #11084), only update dimensions if # the field had a value before this assignment. Since the default # value for FileField subclasses is an instance of field.attr_class, # previous_file will only be None when we are called from # Model.__init__(). The ImageField.update_dimension_fields method # hooked up to the post_init signal handles the Model.__init__() cases. # Assignment happening outside of Model.__init__() will trigger the # update right here. if previous_file is not None: self.field.update_dimension_fields(instance, force=True) class ImageFieldFile(ImageFile, FieldFile): def delete(self, save=True): # Clear the image dimensions cache if hasattr(self, '_dimensions_cache'): del self._dimensions_cache super().delete(save) class ImageField(FileField): attr_class = ImageFieldFile descriptor_class = ImageFileDescriptor description = _("Image") def __init__(self, verbose_name=None, name=None, width_field=None, height_field=None, **kwargs): self.width_field, self.height_field = width_field, height_field super().__init__(verbose_name, name, **kwargs) def check(self, **kwargs): return [ *super().check(**kwargs), *self._check_image_library_installed(), ] def _check_image_library_installed(self): try: from PIL import Image # NOQA except ImportError: return [ checks.Error( 'Cannot use ImageField because Pillow is not installed.', hint=('Get Pillow at https://pypi.org/project/Pillow/ ' 'or run command "python -m pip install Pillow".'), obj=self, id='fields.E210', ) ] else: return [] def deconstruct(self): name, path, args, kwargs = super().deconstruct() if self.width_field: kwargs['width_field'] = self.width_field if self.height_field: kwargs['height_field'] = self.height_field return name, path, args, kwargs def contribute_to_class(self, cls, name, **kwargs): super().contribute_to_class(cls, name, **kwargs) # Attach update_dimension_fields so that dimension fields declared # after their corresponding image field don't stay cleared by # Model.__init__, see bug #11196. # Only run post-initialization dimension update on non-abstract models if not cls._meta.abstract: signals.post_init.connect(self.update_dimension_fields, sender=cls) def update_dimension_fields(self, instance, force=False, *args, **kwargs): """ Update field's width and height fields, if defined. This method is hooked up to model's post_init signal to update dimensions after instantiating a model instance. However, dimensions won't be updated if the dimensions fields are already populated. This avoids unnecessary recalculation when loading an object from the database. Dimensions can be forced to update with force=True, which is how ImageFileDescriptor.__set__ calls this method. """ # Nothing to update if the field doesn't have dimension fields or if # the field is deferred. has_dimension_fields = self.width_field or self.height_field if not has_dimension_fields or self.attname not in instance.__dict__: return # getattr will call the ImageFileDescriptor's __get__ method, which # coerces the assigned value into an instance of self.attr_class # (ImageFieldFile in this case). file = getattr(instance, self.attname) # Nothing to update if we have no file and not being forced to update. if not file and not force: return dimension_fields_filled = not( (self.width_field and not getattr(instance, self.width_field)) or (self.height_field and not getattr(instance, self.height_field)) ) # When both dimension fields have values, we are most likely loading # data from the database or updating an image field that already had # an image stored. In the first case, we don't want to update the # dimension fields because we are already getting their values from the # database. In the second case, we do want to update the dimensions # fields and will skip this return because force will be True since we # were called from ImageFileDescriptor.__set__. if dimension_fields_filled and not force: return # file should be an instance of ImageFieldFile or should be None. if file: width = file.width height = file.height else: # No file, so clear dimensions fields. width = None height = None # Update the width and height fields. if self.width_field: setattr(instance, self.width_field, width) if self.height_field: setattr(instance, self.height_field, height) def formfield(self, **kwargs): return super().formfield(**{ 'form_class': forms.ImageField, **kwargs, })
784c23ef4a0734904eda532c09385e2b7747b5dc8950213a2f94b6e16758987a
""" Accessors for related objects. When a field defines a relation between two models, each model class provides an attribute to access related instances of the other model class (unless the reverse accessor has been disabled with related_name='+'). Accessors are implemented as descriptors in order to customize access and assignment. This module defines the descriptor classes. Forward accessors follow foreign keys. Reverse accessors trace them back. For example, with the following models:: class Parent(Model): pass class Child(Model): parent = ForeignKey(Parent, related_name='children') ``child.parent`` is a forward many-to-one relation. ``parent.children`` is a reverse many-to-one relation. There are three types of relations (many-to-one, one-to-one, and many-to-many) and two directions (forward and reverse) for a total of six combinations. 1. Related instance on the forward side of a many-to-one relation: ``ForwardManyToOneDescriptor``. Uniqueness of foreign key values is irrelevant to accessing the related instance, making the many-to-one and one-to-one cases identical as far as the descriptor is concerned. The constraint is checked upstream (unicity validation in forms) or downstream (unique indexes in the database). 2. Related instance on the forward side of a one-to-one relation: ``ForwardOneToOneDescriptor``. It avoids querying the database when accessing the parent link field in a multi-table inheritance scenario. 3. Related instance on the reverse side of a one-to-one relation: ``ReverseOneToOneDescriptor``. One-to-one relations are asymmetrical, despite the apparent symmetry of the name, because they're implemented in the database with a foreign key from one table to another. As a consequence ``ReverseOneToOneDescriptor`` is slightly different from ``ForwardManyToOneDescriptor``. 4. Related objects manager for related instances on the reverse side of a many-to-one relation: ``ReverseManyToOneDescriptor``. Unlike the previous two classes, this one provides access to a collection of objects. It returns a manager rather than an instance. 5. Related objects manager for related instances on the forward or reverse sides of a many-to-many relation: ``ManyToManyDescriptor``. Many-to-many relations are symmetrical. The syntax of Django models requires declaring them on one side but that's an implementation detail. They could be declared on the other side without any change in behavior. Therefore the forward and reverse descriptors can be the same. If you're looking for ``ForwardManyToManyDescriptor`` or ``ReverseManyToManyDescriptor``, use ``ManyToManyDescriptor`` instead. """ from django.core.exceptions import FieldError from django.db import connections, router, transaction from django.db.models import Q, signals from django.db.models.query import QuerySet from django.db.models.query_utils import DeferredAttribute from django.db.models.utils import resolve_callables from django.utils.functional import cached_property class ForeignKeyDeferredAttribute(DeferredAttribute): def __set__(self, instance, value): if instance.__dict__.get(self.field.attname) != value and self.field.is_cached(instance): self.field.delete_cached_value(instance) instance.__dict__[self.field.attname] = value class ForwardManyToOneDescriptor: """ Accessor to the related object on the forward side of a many-to-one or one-to-one (via ForwardOneToOneDescriptor subclass) relation. In the example:: class Child(Model): parent = ForeignKey(Parent, related_name='children') ``Child.parent`` is a ``ForwardManyToOneDescriptor`` instance. """ def __init__(self, field_with_rel): self.field = field_with_rel @cached_property def RelatedObjectDoesNotExist(self): # The exception can't be created at initialization time since the # related model might not be resolved yet; `self.field.model` might # still be a string model reference. return type( 'RelatedObjectDoesNotExist', (self.field.remote_field.model.DoesNotExist, AttributeError), { '__module__': self.field.model.__module__, '__qualname__': '%s.%s.RelatedObjectDoesNotExist' % ( self.field.model.__qualname__, self.field.name, ), } ) def is_cached(self, instance): return self.field.is_cached(instance) def get_queryset(self, **hints): return self.field.remote_field.model._base_manager.db_manager(hints=hints).all() def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = self.get_queryset() queryset._add_hints(instance=instances[0]) rel_obj_attr = self.field.get_foreign_related_value instance_attr = self.field.get_local_related_value instances_dict = {instance_attr(inst): inst for inst in instances} related_field = self.field.foreign_related_fields[0] remote_field = self.field.remote_field # FIXME: This will need to be revisited when we introduce support for # composite fields. In the meantime we take this practical approach to # solve a regression on 1.6 when the reverse manager in hidden # (related_name ends with a '+'). Refs #21410. # The check for len(...) == 1 is a special case that allows the query # to be join-less and smaller. Refs #21760. if remote_field.is_hidden() or len(self.field.foreign_related_fields) == 1: query = {'%s__in' % related_field.name: {instance_attr(inst)[0] for inst in instances}} else: query = {'%s__in' % self.field.related_query_name(): instances} queryset = queryset.filter(**query) # Since we're going to assign directly in the cache, # we must manage the reverse relation cache manually. if not remote_field.multiple: for rel_obj in queryset: instance = instances_dict[rel_obj_attr(rel_obj)] remote_field.set_cached_value(rel_obj, instance) return queryset, rel_obj_attr, instance_attr, True, self.field.get_cache_name(), False def get_object(self, instance): qs = self.get_queryset(instance=instance) # Assuming the database enforces foreign keys, this won't fail. return qs.get(self.field.get_reverse_related_filter(instance)) def __get__(self, instance, cls=None): """ Get the related instance through the forward relation. With the example above, when getting ``child.parent``: - ``self`` is the descriptor managing the ``parent`` attribute - ``instance`` is the ``child`` instance - ``cls`` is the ``Child`` class (we don't need it) """ if instance is None: return self # The related instance is loaded from the database and then cached # by the field on the model instance state. It can also be pre-cached # by the reverse accessor (ReverseOneToOneDescriptor). try: rel_obj = self.field.get_cached_value(instance) except KeyError: has_value = None not in self.field.get_local_related_value(instance) ancestor_link = instance._meta.get_ancestor_link(self.field.model) if has_value else None if ancestor_link and ancestor_link.is_cached(instance): # An ancestor link will exist if this field is defined on a # multi-table inheritance parent of the instance's class. ancestor = ancestor_link.get_cached_value(instance) # The value might be cached on an ancestor if the instance # originated from walking down the inheritance chain. rel_obj = self.field.get_cached_value(ancestor, default=None) else: rel_obj = None if rel_obj is None and has_value: rel_obj = self.get_object(instance) remote_field = self.field.remote_field # If this is a one-to-one relation, set the reverse accessor # cache on the related object to the current instance to avoid # an extra SQL query if it's accessed later on. if not remote_field.multiple: remote_field.set_cached_value(rel_obj, instance) self.field.set_cached_value(instance, rel_obj) if rel_obj is None and not self.field.null: raise self.RelatedObjectDoesNotExist( "%s has no %s." % (self.field.model.__name__, self.field.name) ) else: return rel_obj def __set__(self, instance, value): """ Set the related instance through the forward relation. With the example above, when setting ``child.parent = parent``: - ``self`` is the descriptor managing the ``parent`` attribute - ``instance`` is the ``child`` instance - ``value`` is the ``parent`` instance on the right of the equal sign """ # An object must be an instance of the related class. if value is not None and not isinstance(value, self.field.remote_field.model._meta.concrete_model): raise ValueError( 'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % ( value, instance._meta.object_name, self.field.name, self.field.remote_field.model._meta.object_name, ) ) elif value is not None: if instance._state.db is None: instance._state.db = router.db_for_write(instance.__class__, instance=value) if value._state.db is None: value._state.db = router.db_for_write(value.__class__, instance=instance) if not router.allow_relation(value, instance): raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value) remote_field = self.field.remote_field # If we're setting the value of a OneToOneField to None, we need to clear # out the cache on any old related object. Otherwise, deleting the # previously-related object will also cause this object to be deleted, # which is wrong. if value is None: # Look up the previously-related object, which may still be available # since we've not yet cleared out the related field. # Use the cache directly, instead of the accessor; if we haven't # populated the cache, then we don't care - we're only accessing # the object to invalidate the accessor cache, so there's no # need to populate the cache just to expire it again. related = self.field.get_cached_value(instance, default=None) # If we've got an old related object, we need to clear out its # cache. This cache also might not exist if the related object # hasn't been accessed yet. if related is not None: remote_field.set_cached_value(related, None) for lh_field, rh_field in self.field.related_fields: setattr(instance, lh_field.attname, None) # Set the values of the related field. else: for lh_field, rh_field in self.field.related_fields: setattr(instance, lh_field.attname, getattr(value, rh_field.attname)) # Set the related instance cache used by __get__ to avoid an SQL query # when accessing the attribute we just set. self.field.set_cached_value(instance, value) # If this is a one-to-one relation, set the reverse accessor cache on # the related object to the current instance to avoid an extra SQL # query if it's accessed later on. if value is not None and not remote_field.multiple: remote_field.set_cached_value(value, instance) def __reduce__(self): """ Pickling should return the instance attached by self.field on the model, not a new copy of that descriptor. Use getattr() to retrieve the instance directly from the model. """ return getattr, (self.field.model, self.field.name) class ForwardOneToOneDescriptor(ForwardManyToOneDescriptor): """ Accessor to the related object on the forward side of a one-to-one relation. In the example:: class Restaurant(Model): place = OneToOneField(Place, related_name='restaurant') ``Restaurant.place`` is a ``ForwardOneToOneDescriptor`` instance. """ def get_object(self, instance): if self.field.remote_field.parent_link: deferred = instance.get_deferred_fields() # Because it's a parent link, all the data is available in the # instance, so populate the parent model with this data. rel_model = self.field.remote_field.model fields = [field.attname for field in rel_model._meta.concrete_fields] # If any of the related model's fields are deferred, fallback to # fetching all fields from the related model. This avoids a query # on the related model for every deferred field. if not any(field in fields for field in deferred): kwargs = {field: getattr(instance, field) for field in fields} obj = rel_model(**kwargs) obj._state.adding = instance._state.adding obj._state.db = instance._state.db return obj return super().get_object(instance) def __set__(self, instance, value): super().__set__(instance, value) # If the primary key is a link to a parent model and a parent instance # is being set, update the value of the inherited pk(s). if self.field.primary_key and self.field.remote_field.parent_link: opts = instance._meta # Inherited primary key fields from this object's base classes. inherited_pk_fields = [ field for field in opts.concrete_fields if field.primary_key and field.remote_field ] for field in inherited_pk_fields: rel_model_pk_name = field.remote_field.model._meta.pk.attname raw_value = getattr(value, rel_model_pk_name) if value is not None else None setattr(instance, rel_model_pk_name, raw_value) class ReverseOneToOneDescriptor: """ Accessor to the related object on the reverse side of a one-to-one relation. In the example:: class Restaurant(Model): place = OneToOneField(Place, related_name='restaurant') ``Place.restaurant`` is a ``ReverseOneToOneDescriptor`` instance. """ def __init__(self, related): # Following the example above, `related` is an instance of OneToOneRel # which represents the reverse restaurant field (place.restaurant). self.related = related @cached_property def RelatedObjectDoesNotExist(self): # The exception isn't created at initialization time for the sake of # consistency with `ForwardManyToOneDescriptor`. return type( 'RelatedObjectDoesNotExist', (self.related.related_model.DoesNotExist, AttributeError), { '__module__': self.related.model.__module__, '__qualname__': '%s.%s.RelatedObjectDoesNotExist' % ( self.related.model.__qualname__, self.related.name, ) }, ) def is_cached(self, instance): return self.related.is_cached(instance) def get_queryset(self, **hints): return self.related.related_model._base_manager.db_manager(hints=hints).all() def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = self.get_queryset() queryset._add_hints(instance=instances[0]) rel_obj_attr = self.related.field.get_local_related_value instance_attr = self.related.field.get_foreign_related_value instances_dict = {instance_attr(inst): inst for inst in instances} query = {'%s__in' % self.related.field.name: instances} queryset = queryset.filter(**query) # Since we're going to assign directly in the cache, # we must manage the reverse relation cache manually. for rel_obj in queryset: instance = instances_dict[rel_obj_attr(rel_obj)] self.related.field.set_cached_value(rel_obj, instance) return queryset, rel_obj_attr, instance_attr, True, self.related.get_cache_name(), False def __get__(self, instance, cls=None): """ Get the related instance through the reverse relation. With the example above, when getting ``place.restaurant``: - ``self`` is the descriptor managing the ``restaurant`` attribute - ``instance`` is the ``place`` instance - ``cls`` is the ``Place`` class (unused) Keep in mind that ``Restaurant`` holds the foreign key to ``Place``. """ if instance is None: return self # The related instance is loaded from the database and then cached # by the field on the model instance state. It can also be pre-cached # by the forward accessor (ForwardManyToOneDescriptor). try: rel_obj = self.related.get_cached_value(instance) except KeyError: related_pk = instance.pk if related_pk is None: rel_obj = None else: filter_args = self.related.field.get_forward_related_filter(instance) try: rel_obj = self.get_queryset(instance=instance).get(**filter_args) except self.related.related_model.DoesNotExist: rel_obj = None else: # Set the forward accessor cache on the related object to # the current instance to avoid an extra SQL query if it's # accessed later on. self.related.field.set_cached_value(rel_obj, instance) self.related.set_cached_value(instance, rel_obj) if rel_obj is None: raise self.RelatedObjectDoesNotExist( "%s has no %s." % ( instance.__class__.__name__, self.related.get_accessor_name() ) ) else: return rel_obj def __set__(self, instance, value): """ Set the related instance through the reverse relation. With the example above, when setting ``place.restaurant = restaurant``: - ``self`` is the descriptor managing the ``restaurant`` attribute - ``instance`` is the ``place`` instance - ``value`` is the ``restaurant`` instance on the right of the equal sign Keep in mind that ``Restaurant`` holds the foreign key to ``Place``. """ # The similarity of the code below to the code in # ForwardManyToOneDescriptor is annoying, but there's a bunch # of small differences that would make a common base class convoluted. if value is None: # Update the cached related instance (if any) & clear the cache. # Following the example above, this would be the cached # ``restaurant`` instance (if any). rel_obj = self.related.get_cached_value(instance, default=None) if rel_obj is not None: # Remove the ``restaurant`` instance from the ``place`` # instance cache. self.related.delete_cached_value(instance) # Set the ``place`` field on the ``restaurant`` # instance to None. setattr(rel_obj, self.related.field.name, None) elif not isinstance(value, self.related.related_model): # An object must be an instance of the related class. raise ValueError( 'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % ( value, instance._meta.object_name, self.related.get_accessor_name(), self.related.related_model._meta.object_name, ) ) else: if instance._state.db is None: instance._state.db = router.db_for_write(instance.__class__, instance=value) if value._state.db is None: value._state.db = router.db_for_write(value.__class__, instance=instance) if not router.allow_relation(value, instance): raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value) related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields) # Set the value of the related field to the value of the related object's related field for index, field in enumerate(self.related.field.local_related_fields): setattr(value, field.attname, related_pk[index]) # Set the related instance cache used by __get__ to avoid an SQL query # when accessing the attribute we just set. self.related.set_cached_value(instance, value) # Set the forward accessor cache on the related object to the current # instance to avoid an extra SQL query if it's accessed later on. self.related.field.set_cached_value(value, instance) def __reduce__(self): # Same purpose as ForwardManyToOneDescriptor.__reduce__(). return getattr, (self.related.model, self.related.name) class ReverseManyToOneDescriptor: """ Accessor to the related objects manager on the reverse side of a many-to-one relation. In the example:: class Child(Model): parent = ForeignKey(Parent, related_name='children') ``Parent.children`` is a ``ReverseManyToOneDescriptor`` instance. Most of the implementation is delegated to a dynamically defined manager class built by ``create_forward_many_to_many_manager()`` defined below. """ def __init__(self, rel): self.rel = rel self.field = rel.field @cached_property def related_manager_cls(self): related_model = self.rel.related_model return create_reverse_many_to_one_manager( related_model._default_manager.__class__, self.rel, ) def __get__(self, instance, cls=None): """ Get the related objects through the reverse relation. With the example above, when getting ``parent.children``: - ``self`` is the descriptor managing the ``children`` attribute - ``instance`` is the ``parent`` instance - ``cls`` is the ``Parent`` class (unused) """ if instance is None: return self return self.related_manager_cls(instance) def _get_set_deprecation_msg_params(self): return ( 'reverse side of a related set', self.rel.get_accessor_name(), ) def __set__(self, instance, value): raise TypeError( 'Direct assignment to the %s is prohibited. Use %s.set() instead.' % self._get_set_deprecation_msg_params(), ) def create_reverse_many_to_one_manager(superclass, rel): """ Create a manager for the reverse side of a many-to-one relation. This manager subclasses another manager, generally the default manager of the related model, and adds behaviors specific to many-to-one relations. """ class RelatedManager(superclass): def __init__(self, instance): super().__init__() self.instance = instance self.model = rel.related_model self.field = rel.field self.core_filters = {self.field.name: instance} def __call__(self, *, manager): manager = getattr(self.model, manager) manager_class = create_reverse_many_to_one_manager(manager.__class__, rel) return manager_class(self.instance) do_not_call_in_templates = True def _apply_rel_filters(self, queryset): """ Filter the queryset for the instance this manager is bound to. """ db = self._db or router.db_for_read(self.model, instance=self.instance) empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls queryset._add_hints(instance=self.instance) if self._db: queryset = queryset.using(self._db) queryset._defer_next_filter = True queryset = queryset.filter(**self.core_filters) for field in self.field.foreign_related_fields: val = getattr(self.instance, field.attname) if val is None or (val == '' and empty_strings_as_null): return queryset.none() if self.field.many_to_one: # Guard against field-like objects such as GenericRelation # that abuse create_reverse_many_to_one_manager() with reverse # one-to-many relationships instead and break known related # objects assignment. try: target_field = self.field.target_field except FieldError: # The relationship has multiple target fields. Use a tuple # for related object id. rel_obj_id = tuple([ getattr(self.instance, target_field.attname) for target_field in self.field.get_path_info()[-1].target_fields ]) else: rel_obj_id = getattr(self.instance, target_field.attname) queryset._known_related_objects = {self.field: {rel_obj_id: self.instance}} return queryset def _remove_prefetched_objects(self): try: self.instance._prefetched_objects_cache.pop(self.field.remote_field.get_cache_name()) except (AttributeError, KeyError): pass # nothing to clear from cache def get_queryset(self): try: return self.instance._prefetched_objects_cache[self.field.remote_field.get_cache_name()] except (AttributeError, KeyError): queryset = super().get_queryset() return self._apply_rel_filters(queryset) def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = super().get_queryset() queryset._add_hints(instance=instances[0]) queryset = queryset.using(queryset._db or self._db) rel_obj_attr = self.field.get_local_related_value instance_attr = self.field.get_foreign_related_value instances_dict = {instance_attr(inst): inst for inst in instances} query = {'%s__in' % self.field.name: instances} queryset = queryset.filter(**query) # Since we just bypassed this class' get_queryset(), we must manage # the reverse relation manually. for rel_obj in queryset: instance = instances_dict[rel_obj_attr(rel_obj)] setattr(rel_obj, self.field.name, instance) cache_name = self.field.remote_field.get_cache_name() return queryset, rel_obj_attr, instance_attr, False, cache_name, False def add(self, *objs, bulk=True): self._remove_prefetched_objects() db = router.db_for_write(self.model, instance=self.instance) def check_and_update_obj(obj): if not isinstance(obj, self.model): raise TypeError("'%s' instance expected, got %r" % ( self.model._meta.object_name, obj, )) setattr(obj, self.field.name, self.instance) if bulk: pks = [] for obj in objs: check_and_update_obj(obj) if obj._state.adding or obj._state.db != db: raise ValueError( "%r instance isn't saved. Use bulk=False or save " "the object first." % obj ) pks.append(obj.pk) self.model._base_manager.using(db).filter(pk__in=pks).update(**{ self.field.name: self.instance, }) else: with transaction.atomic(using=db, savepoint=False): for obj in objs: check_and_update_obj(obj) obj.save() add.alters_data = True def create(self, **kwargs): kwargs[self.field.name] = self.instance db = router.db_for_write(self.model, instance=self.instance) return super(RelatedManager, self.db_manager(db)).create(**kwargs) create.alters_data = True def get_or_create(self, **kwargs): kwargs[self.field.name] = self.instance db = router.db_for_write(self.model, instance=self.instance) return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs) get_or_create.alters_data = True def update_or_create(self, **kwargs): kwargs[self.field.name] = self.instance db = router.db_for_write(self.model, instance=self.instance) return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs) update_or_create.alters_data = True # remove() and clear() are only provided if the ForeignKey can have a value of null. if rel.field.null: def remove(self, *objs, bulk=True): if not objs: return val = self.field.get_foreign_related_value(self.instance) old_ids = set() for obj in objs: if not isinstance(obj, self.model): raise TypeError("'%s' instance expected, got %r" % ( self.model._meta.object_name, obj, )) # Is obj actually part of this descriptor set? if self.field.get_local_related_value(obj) == val: old_ids.add(obj.pk) else: raise self.field.remote_field.model.DoesNotExist( "%r is not related to %r." % (obj, self.instance) ) self._clear(self.filter(pk__in=old_ids), bulk) remove.alters_data = True def clear(self, *, bulk=True): self._clear(self, bulk) clear.alters_data = True def _clear(self, queryset, bulk): self._remove_prefetched_objects() db = router.db_for_write(self.model, instance=self.instance) queryset = queryset.using(db) if bulk: # `QuerySet.update()` is intrinsically atomic. queryset.update(**{self.field.name: None}) else: with transaction.atomic(using=db, savepoint=False): for obj in queryset: setattr(obj, self.field.name, None) obj.save(update_fields=[self.field.name]) _clear.alters_data = True def set(self, objs, *, bulk=True, clear=False): # Force evaluation of `objs` in case it's a queryset whose value # could be affected by `manager.clear()`. Refs #19816. objs = tuple(objs) if self.field.null: db = router.db_for_write(self.model, instance=self.instance) with transaction.atomic(using=db, savepoint=False): if clear: self.clear(bulk=bulk) self.add(*objs, bulk=bulk) else: old_objs = set(self.using(db).all()) new_objs = [] for obj in objs: if obj in old_objs: old_objs.remove(obj) else: new_objs.append(obj) self.remove(*old_objs, bulk=bulk) self.add(*new_objs, bulk=bulk) else: self.add(*objs, bulk=bulk) set.alters_data = True return RelatedManager class ManyToManyDescriptor(ReverseManyToOneDescriptor): """ Accessor to the related objects manager on the forward and reverse sides of a many-to-many relation. In the example:: class Pizza(Model): toppings = ManyToManyField(Topping, related_name='pizzas') ``Pizza.toppings`` and ``Topping.pizzas`` are ``ManyToManyDescriptor`` instances. Most of the implementation is delegated to a dynamically defined manager class built by ``create_forward_many_to_many_manager()`` defined below. """ def __init__(self, rel, reverse=False): super().__init__(rel) self.reverse = reverse @property def through(self): # through is provided so that you have easy access to the through # model (Book.authors.through) for inlines, etc. This is done as # a property to ensure that the fully resolved value is returned. return self.rel.through @cached_property def related_manager_cls(self): related_model = self.rel.related_model if self.reverse else self.rel.model return create_forward_many_to_many_manager( related_model._default_manager.__class__, self.rel, reverse=self.reverse, ) def _get_set_deprecation_msg_params(self): return ( '%s side of a many-to-many set' % ('reverse' if self.reverse else 'forward'), self.rel.get_accessor_name() if self.reverse else self.field.name, ) def create_forward_many_to_many_manager(superclass, rel, reverse): """ Create a manager for the either side of a many-to-many relation. This manager subclasses another manager, generally the default manager of the related model, and adds behaviors specific to many-to-many relations. """ class ManyRelatedManager(superclass): def __init__(self, instance=None): super().__init__() self.instance = instance if not reverse: self.model = rel.model self.query_field_name = rel.field.related_query_name() self.prefetch_cache_name = rel.field.name self.source_field_name = rel.field.m2m_field_name() self.target_field_name = rel.field.m2m_reverse_field_name() self.symmetrical = rel.symmetrical else: self.model = rel.related_model self.query_field_name = rel.field.name self.prefetch_cache_name = rel.field.related_query_name() self.source_field_name = rel.field.m2m_reverse_field_name() self.target_field_name = rel.field.m2m_field_name() self.symmetrical = False self.through = rel.through self.reverse = reverse self.source_field = self.through._meta.get_field(self.source_field_name) self.target_field = self.through._meta.get_field(self.target_field_name) self.core_filters = {} self.pk_field_names = {} for lh_field, rh_field in self.source_field.related_fields: core_filter_key = '%s__%s' % (self.query_field_name, rh_field.name) self.core_filters[core_filter_key] = getattr(instance, rh_field.attname) self.pk_field_names[lh_field.name] = rh_field.name self.related_val = self.source_field.get_foreign_related_value(instance) if None in self.related_val: raise ValueError('"%r" needs to have a value for field "%s" before ' 'this many-to-many relationship can be used.' % (instance, self.pk_field_names[self.source_field_name])) # Even if this relation is not to pk, we require still pk value. # The wish is that the instance has been already saved to DB, # although having a pk value isn't a guarantee of that. if instance.pk is None: raise ValueError("%r instance needs to have a primary key value before " "a many-to-many relationship can be used." % instance.__class__.__name__) def __call__(self, *, manager): manager = getattr(self.model, manager) manager_class = create_forward_many_to_many_manager(manager.__class__, rel, reverse) return manager_class(instance=self.instance) do_not_call_in_templates = True def _build_remove_filters(self, removed_vals): filters = Q((self.source_field_name, self.related_val)) # No need to add a subquery condition if removed_vals is a QuerySet without # filters. removed_vals_filters = (not isinstance(removed_vals, QuerySet) or removed_vals._has_filters()) if removed_vals_filters: filters &= Q((f'{self.target_field_name}__in', removed_vals)) if self.symmetrical: symmetrical_filters = Q((self.target_field_name, self.related_val)) if removed_vals_filters: symmetrical_filters &= Q((f'{self.source_field_name}__in', removed_vals)) filters |= symmetrical_filters return filters def _apply_rel_filters(self, queryset): """ Filter the queryset for the instance this manager is bound to. """ queryset._add_hints(instance=self.instance) if self._db: queryset = queryset.using(self._db) queryset._defer_next_filter = True return queryset._next_is_sticky().filter(**self.core_filters) def _remove_prefetched_objects(self): try: self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name) except (AttributeError, KeyError): pass # nothing to clear from cache def get_queryset(self): try: return self.instance._prefetched_objects_cache[self.prefetch_cache_name] except (AttributeError, KeyError): queryset = super().get_queryset() return self._apply_rel_filters(queryset) def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = super().get_queryset() queryset._add_hints(instance=instances[0]) queryset = queryset.using(queryset._db or self._db) query = {'%s__in' % self.query_field_name: instances} queryset = queryset._next_is_sticky().filter(**query) # M2M: need to annotate the query in order to get the primary model # that the secondary model was actually related to. We know that # there will already be a join on the join table, so we can just add # the select. # For non-autocreated 'through' models, can't assume we are # dealing with PK values. fk = self.through._meta.get_field(self.source_field_name) join_table = fk.model._meta.db_table connection = connections[queryset.db] qn = connection.ops.quote_name queryset = queryset.extra(select={ '_prefetch_related_val_%s' % f.attname: '%s.%s' % (qn(join_table), qn(f.column)) for f in fk.local_related_fields}) return ( queryset, lambda result: tuple( getattr(result, '_prefetch_related_val_%s' % f.attname) for f in fk.local_related_fields ), lambda inst: tuple( f.get_db_prep_value(getattr(inst, f.attname), connection) for f in fk.foreign_related_fields ), False, self.prefetch_cache_name, False, ) def add(self, *objs, through_defaults=None): self._remove_prefetched_objects() db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): self._add_items( self.source_field_name, self.target_field_name, *objs, through_defaults=through_defaults, ) # If this is a symmetrical m2m relation to self, add the mirror # entry in the m2m table. if self.symmetrical: self._add_items( self.target_field_name, self.source_field_name, *objs, through_defaults=through_defaults, ) add.alters_data = True def remove(self, *objs): self._remove_prefetched_objects() self._remove_items(self.source_field_name, self.target_field_name, *objs) remove.alters_data = True def clear(self): db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): signals.m2m_changed.send( sender=self.through, action="pre_clear", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db, ) self._remove_prefetched_objects() filters = self._build_remove_filters(super().get_queryset().using(db)) self.through._default_manager.using(db).filter(filters).delete() signals.m2m_changed.send( sender=self.through, action="post_clear", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=None, using=db, ) clear.alters_data = True def set(self, objs, *, clear=False, through_defaults=None): # Force evaluation of `objs` in case it's a queryset whose value # could be affected by `manager.clear()`. Refs #19816. objs = tuple(objs) db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): if clear: self.clear() self.add(*objs, through_defaults=through_defaults) else: old_ids = set(self.using(db).values_list(self.target_field.target_field.attname, flat=True)) new_objs = [] for obj in objs: fk_val = ( self.target_field.get_foreign_related_value(obj)[0] if isinstance(obj, self.model) else self.target_field.get_prep_value(obj) ) if fk_val in old_ids: old_ids.remove(fk_val) else: new_objs.append(obj) self.remove(*old_ids) self.add(*new_objs, through_defaults=through_defaults) set.alters_data = True def create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs) self.add(new_obj, through_defaults=through_defaults) return new_obj create.alters_data = True def get_or_create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs) # We only need to add() if created because if we got an object back # from get() then the relationship already exists. if created: self.add(obj, through_defaults=through_defaults) return obj, created get_or_create.alters_data = True def update_or_create(self, *, through_defaults=None, **kwargs): db = router.db_for_write(self.instance.__class__, instance=self.instance) obj, created = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs) # We only need to add() if created because if we got an object back # from get() then the relationship already exists. if created: self.add(obj, through_defaults=through_defaults) return obj, created update_or_create.alters_data = True def _get_target_ids(self, target_field_name, objs): """ Return the set of ids of `objs` that the target field references. """ from django.db.models import Model target_ids = set() target_field = self.through._meta.get_field(target_field_name) for obj in objs: if isinstance(obj, self.model): if not router.allow_relation(obj, self.instance): raise ValueError( 'Cannot add "%r": instance is on database "%s", ' 'value is on database "%s"' % (obj, self.instance._state.db, obj._state.db) ) target_id = target_field.get_foreign_related_value(obj)[0] if target_id is None: raise ValueError( 'Cannot add "%r": the value for field "%s" is None' % (obj, target_field_name) ) target_ids.add(target_id) elif isinstance(obj, Model): raise TypeError( "'%s' instance expected, got %r" % (self.model._meta.object_name, obj) ) else: target_ids.add(target_field.get_prep_value(obj)) return target_ids def _get_missing_target_ids(self, source_field_name, target_field_name, db, target_ids): """ Return the subset of ids of `objs` that aren't already assigned to this relationship. """ vals = self.through._default_manager.using(db).values_list( target_field_name, flat=True ).filter(**{ source_field_name: self.related_val[0], '%s__in' % target_field_name: target_ids, }) return target_ids.difference(vals) def _get_add_plan(self, db, source_field_name): """ Return a boolean triple of the way the add should be performed. The first element is whether or not bulk_create(ignore_conflicts) can be used, the second whether or not signals must be sent, and the third element is whether or not the immediate bulk insertion with conflicts ignored can be performed. """ # Conflicts can be ignored when the intermediary model is # auto-created as the only possible collision is on the # (source_id, target_id) tuple. The same assertion doesn't hold for # user-defined intermediary models as they could have other fields # causing conflicts which must be surfaced. can_ignore_conflicts = ( connections[db].features.supports_ignore_conflicts and self.through._meta.auto_created is not False ) # Don't send the signal when inserting duplicate data row # for symmetrical reverse entries. must_send_signals = (self.reverse or source_field_name == self.source_field_name) and ( signals.m2m_changed.has_listeners(self.through) ) # Fast addition through bulk insertion can only be performed # if no m2m_changed listeners are connected for self.through # as they require the added set of ids to be provided via # pk_set. return can_ignore_conflicts, must_send_signals, (can_ignore_conflicts and not must_send_signals) def _add_items(self, source_field_name, target_field_name, *objs, through_defaults=None): # source_field_name: the PK fieldname in join table for the source object # target_field_name: the PK fieldname in join table for the target object # *objs - objects to add. Either object instances, or primary keys of object instances. if not objs: return through_defaults = dict(resolve_callables(through_defaults or {})) target_ids = self._get_target_ids(target_field_name, objs) db = router.db_for_write(self.through, instance=self.instance) can_ignore_conflicts, must_send_signals, can_fast_add = self._get_add_plan(db, source_field_name) if can_fast_add: self.through._default_manager.using(db).bulk_create([ self.through(**{ '%s_id' % source_field_name: self.related_val[0], '%s_id' % target_field_name: target_id, }) for target_id in target_ids ], ignore_conflicts=True) return missing_target_ids = self._get_missing_target_ids( source_field_name, target_field_name, db, target_ids ) with transaction.atomic(using=db, savepoint=False): if must_send_signals: signals.m2m_changed.send( sender=self.through, action='pre_add', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=missing_target_ids, using=db, ) # Add the ones that aren't there already. self.through._default_manager.using(db).bulk_create([ self.through(**through_defaults, **{ '%s_id' % source_field_name: self.related_val[0], '%s_id' % target_field_name: target_id, }) for target_id in missing_target_ids ], ignore_conflicts=can_ignore_conflicts) if must_send_signals: signals.m2m_changed.send( sender=self.through, action='post_add', instance=self.instance, reverse=self.reverse, model=self.model, pk_set=missing_target_ids, using=db, ) def _remove_items(self, source_field_name, target_field_name, *objs): # source_field_name: the PK colname in join table for the source object # target_field_name: the PK colname in join table for the target object # *objs - objects to remove. Either object instances, or primary # keys of object instances. if not objs: return # Check that all the objects are of the right type old_ids = set() for obj in objs: if isinstance(obj, self.model): fk_val = self.target_field.get_foreign_related_value(obj)[0] old_ids.add(fk_val) else: old_ids.add(obj) db = router.db_for_write(self.through, instance=self.instance) with transaction.atomic(using=db, savepoint=False): # Send a signal to the other end if need be. signals.m2m_changed.send( sender=self.through, action="pre_remove", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db, ) target_model_qs = super().get_queryset() if target_model_qs._has_filters(): old_vals = target_model_qs.using(db).filter(**{ '%s__in' % self.target_field.target_field.attname: old_ids}) else: old_vals = old_ids filters = self._build_remove_filters(old_vals) self.through._default_manager.using(db).filter(filters).delete() signals.m2m_changed.send( sender=self.through, action="post_remove", instance=self.instance, reverse=self.reverse, model=self.model, pk_set=old_ids, using=db, ) return ManyRelatedManager
144e533f6555d3cdf555d5b7973c3be9b01be502c7e7d375f8855d77c9f3849d
""" "Rel objects" for related fields. "Rel objects" (for lack of a better name) carry information about the relation modeled by a related field and provide some utility functions. They're stored in the ``remote_field`` attribute of the field. They also act as reverse fields for the purposes of the Meta API because they're the closest concept currently available. """ from django.core import exceptions from django.utils.functional import cached_property from django.utils.hashable import make_hashable from . import BLANK_CHOICE_DASH from .mixins import FieldCacheMixin class ForeignObjectRel(FieldCacheMixin): """ Used by ForeignObject to store information about the relation. ``_meta.get_fields()`` returns this class to provide access to the field flags for the reverse relation. """ # Field flags auto_created = True concrete = False editable = False is_relation = True # Reverse relations are always nullable (Django can't enforce that a # foreign key on the related model points to this model). null = True empty_strings_allowed = False def __init__(self, field, to, related_name=None, related_query_name=None, limit_choices_to=None, parent_link=False, on_delete=None): self.field = field self.model = to self.related_name = related_name self.related_query_name = related_query_name self.limit_choices_to = {} if limit_choices_to is None else limit_choices_to self.parent_link = parent_link self.on_delete = on_delete self.symmetrical = False self.multiple = True # Some of the following cached_properties can't be initialized in # __init__ as the field doesn't have its model yet. Calling these methods # before field.contribute_to_class() has been called will result in # AttributeError @cached_property def hidden(self): return self.is_hidden() @cached_property def name(self): return self.field.related_query_name() @property def remote_field(self): return self.field @property def target_field(self): """ When filtering against this relation, return the field on the remote model against which the filtering should happen. """ target_fields = self.get_path_info()[-1].target_fields if len(target_fields) > 1: raise exceptions.FieldError("Can't use target_field for multicolumn relations.") return target_fields[0] @cached_property def related_model(self): if not self.field.model: raise AttributeError( "This property can't be accessed before self.field.contribute_to_class has been called.") return self.field.model @cached_property def many_to_many(self): return self.field.many_to_many @cached_property def many_to_one(self): return self.field.one_to_many @cached_property def one_to_many(self): return self.field.many_to_one @cached_property def one_to_one(self): return self.field.one_to_one def get_lookup(self, lookup_name): return self.field.get_lookup(lookup_name) def get_internal_type(self): return self.field.get_internal_type() @property def db_type(self): return self.field.db_type def __repr__(self): return '<%s: %s.%s>' % ( type(self).__name__, self.related_model._meta.app_label, self.related_model._meta.model_name, ) @property def identity(self): return ( self.field, self.model, self.related_name, self.related_query_name, make_hashable(self.limit_choices_to), self.parent_link, self.on_delete, self.symmetrical, self.multiple, ) def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self.identity == other.identity def __hash__(self): return hash(self.identity) def get_choices( self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None, ordering=(), ): """ Return choices with a default blank choices included, for use as <select> choices for this field. Analog of django.db.models.fields.Field.get_choices(), provided initially for utilization by RelatedFieldListFilter. """ limit_choices_to = limit_choices_to or self.limit_choices_to qs = self.related_model._default_manager.complex_filter(limit_choices_to) if ordering: qs = qs.order_by(*ordering) return (blank_choice if include_blank else []) + [ (x.pk, str(x)) for x in qs ] def is_hidden(self): """Should the related object be hidden?""" return bool(self.related_name) and self.related_name[-1] == '+' def get_joining_columns(self): return self.field.get_reverse_joining_columns() def get_extra_restriction(self, alias, related_alias): return self.field.get_extra_restriction(related_alias, alias) def set_field_name(self): """ Set the related field's name, this is not available until later stages of app loading, so set_field_name is called from set_attributes_from_rel() """ # By default foreign object doesn't relate to any remote field (for # example custom multicolumn joins currently have no remote field). self.field_name = None def get_accessor_name(self, model=None): # This method encapsulates the logic that decides what name to give an # accessor descriptor that retrieves related many-to-one or # many-to-many objects. It uses the lowercased object_name + "_set", # but this can be overridden with the "related_name" option. Due to # backwards compatibility ModelForms need to be able to provide an # alternate model. See BaseInlineFormSet.get_default_prefix(). opts = model._meta if model else self.related_model._meta model = model or self.related_model if self.multiple: # If this is a symmetrical m2m relation on self, there is no reverse accessor. if self.symmetrical and model == self.model: return None if self.related_name: return self.related_name return opts.model_name + ('_set' if self.multiple else '') def get_path_info(self, filtered_relation=None): return self.field.get_reverse_path_info(filtered_relation) def get_cache_name(self): """ Return the name of the cache key to use for storing an instance of the forward model on the reverse model. """ return self.get_accessor_name() class ManyToOneRel(ForeignObjectRel): """ Used by the ForeignKey field to store information about the relation. ``_meta.get_fields()`` returns this class to provide access to the field flags for the reverse relation. Note: Because we somewhat abuse the Rel objects by using them as reverse fields we get the funny situation where ``ManyToOneRel.many_to_one == False`` and ``ManyToOneRel.one_to_many == True``. This is unfortunate but the actual ManyToOneRel class is a private API and there is work underway to turn reverse relations into actual fields. """ def __init__(self, field, to, field_name, related_name=None, related_query_name=None, limit_choices_to=None, parent_link=False, on_delete=None): super().__init__( field, to, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, parent_link=parent_link, on_delete=on_delete, ) self.field_name = field_name def __getstate__(self): state = self.__dict__.copy() state.pop('related_model', None) return state @property def identity(self): return super().identity + (self.field_name,) def get_related_field(self): """ Return the Field in the 'to' object to which this relationship is tied. """ field = self.model._meta.get_field(self.field_name) if not field.concrete: raise exceptions.FieldDoesNotExist("No related field named '%s'" % self.field_name) return field def set_field_name(self): self.field_name = self.field_name or self.model._meta.pk.name class OneToOneRel(ManyToOneRel): """ Used by OneToOneField to store information about the relation. ``_meta.get_fields()`` returns this class to provide access to the field flags for the reverse relation. """ def __init__(self, field, to, field_name, related_name=None, related_query_name=None, limit_choices_to=None, parent_link=False, on_delete=None): super().__init__( field, to, field_name, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, parent_link=parent_link, on_delete=on_delete, ) self.multiple = False class ManyToManyRel(ForeignObjectRel): """ Used by ManyToManyField to store information about the relation. ``_meta.get_fields()`` returns this class to provide access to the field flags for the reverse relation. """ def __init__(self, field, to, related_name=None, related_query_name=None, limit_choices_to=None, symmetrical=True, through=None, through_fields=None, db_constraint=True): super().__init__( field, to, related_name=related_name, related_query_name=related_query_name, limit_choices_to=limit_choices_to, ) if through and not db_constraint: raise ValueError("Can't supply a through model and db_constraint=False") self.through = through if through_fields and not through: raise ValueError("Cannot specify through_fields without a through model") self.through_fields = through_fields self.symmetrical = symmetrical self.db_constraint = db_constraint @property def identity(self): return super().identity + ( self.through, make_hashable(self.through_fields), self.db_constraint, ) def get_related_field(self): """ Return the field in the 'to' object to which this relationship is tied. Provided for symmetry with ManyToOneRel. """ opts = self.through._meta if self.through_fields: field = opts.get_field(self.through_fields[0]) else: for field in opts.fields: rel = getattr(field, 'remote_field', None) if rel and rel.model == self.model: break return field.foreign_related_fields[0]
bd0927c99a782b228ba74392305b45de6e2dba132301bbd18611a5e294632d74
"""Database functions that do comparisons or type conversions.""" from django.db import NotSupportedError from django.db.models.expressions import Func, Value from django.db.models.fields.json import JSONField from django.utils.regex_helper import _lazy_re_compile class Cast(Func): """Coerce an expression to a new field type.""" function = 'CAST' template = '%(function)s(%(expressions)s AS %(db_type)s)' def __init__(self, expression, output_field): super().__init__(expression, output_field=output_field) def as_sql(self, compiler, connection, **extra_context): extra_context['db_type'] = self.output_field.cast_db_type(connection) return super().as_sql(compiler, connection, **extra_context) def as_sqlite(self, compiler, connection, **extra_context): db_type = self.output_field.db_type(connection) if db_type in {'datetime', 'time'}: # Use strftime as datetime/time don't keep fractional seconds. template = 'strftime(%%s, %(expressions)s)' sql, params = super().as_sql(compiler, connection, template=template, **extra_context) format_string = '%H:%M:%f' if db_type == 'time' else '%Y-%m-%d %H:%M:%f' params.insert(0, format_string) return sql, params elif db_type == 'date': template = 'date(%(expressions)s)' return super().as_sql(compiler, connection, template=template, **extra_context) return self.as_sql(compiler, connection, **extra_context) def as_mysql(self, compiler, connection, **extra_context): template = None output_type = self.output_field.get_internal_type() # MySQL doesn't support explicit cast to float. if output_type == 'FloatField': template = '(%(expressions)s + 0.0)' # MariaDB doesn't support explicit cast to JSON. elif output_type == 'JSONField' and connection.mysql_is_mariadb: template = "JSON_EXTRACT(%(expressions)s, '$')" return self.as_sql(compiler, connection, template=template, **extra_context) def as_postgresql(self, compiler, connection, **extra_context): # CAST would be valid too, but the :: shortcut syntax is more readable. # 'expressions' is wrapped in parentheses in case it's a complex # expression. return self.as_sql(compiler, connection, template='(%(expressions)s)::%(db_type)s', **extra_context) def as_oracle(self, compiler, connection, **extra_context): if self.output_field.get_internal_type() == 'JSONField': # Oracle doesn't support explicit cast to JSON. template = "JSON_QUERY(%(expressions)s, '$')" return super().as_sql(compiler, connection, template=template, **extra_context) return self.as_sql(compiler, connection, **extra_context) class Coalesce(Func): """Return, from left to right, the first non-null expression.""" function = 'COALESCE' def __init__(self, *expressions, **extra): if len(expressions) < 2: raise ValueError('Coalesce must take at least two expressions') super().__init__(*expressions, **extra) @property def empty_aggregate_value(self): for expression in self.get_source_expressions(): result = expression.empty_aggregate_value if result is NotImplemented or result is not None: return result return None def as_oracle(self, compiler, connection, **extra_context): # Oracle prohibits mixing TextField (NCLOB) and CharField (NVARCHAR2), # so convert all fields to NCLOB when that type is expected. if self.output_field.get_internal_type() == 'TextField': clone = self.copy() clone.set_source_expressions([ Func(expression, function='TO_NCLOB') for expression in self.get_source_expressions() ]) return super(Coalesce, clone).as_sql(compiler, connection, **extra_context) return self.as_sql(compiler, connection, **extra_context) class Collate(Func): function = 'COLLATE' template = '%(expressions)s %(function)s %(collation)s' # Inspired from https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS collation_re = _lazy_re_compile(r'^[\w\-]+$') def __init__(self, expression, collation): if not (collation and self.collation_re.match(collation)): raise ValueError('Invalid collation name: %r.' % collation) self.collation = collation super().__init__(expression) def as_sql(self, compiler, connection, **extra_context): extra_context.setdefault('collation', connection.ops.quote_name(self.collation)) return super().as_sql(compiler, connection, **extra_context) class Greatest(Func): """ Return the maximum expression. If any expression is null the return value is database-specific: On PostgreSQL, the maximum not-null expression is returned. On MySQL, Oracle, and SQLite, if any expression is null, null is returned. """ function = 'GREATEST' def __init__(self, *expressions, **extra): if len(expressions) < 2: raise ValueError('Greatest must take at least two expressions') super().__init__(*expressions, **extra) def as_sqlite(self, compiler, connection, **extra_context): """Use the MAX function on SQLite.""" return super().as_sqlite(compiler, connection, function='MAX', **extra_context) class JSONObject(Func): function = 'JSON_OBJECT' output_field = JSONField() def __init__(self, **fields): expressions = [] for key, value in fields.items(): expressions.extend((Value(key), value)) super().__init__(*expressions) def as_sql(self, compiler, connection, **extra_context): if not connection.features.has_json_object_function: raise NotSupportedError( 'JSONObject() is not supported on this database backend.' ) return super().as_sql(compiler, connection, **extra_context) def as_postgresql(self, compiler, connection, **extra_context): return self.as_sql( compiler, connection, function='JSONB_BUILD_OBJECT', **extra_context, ) def as_oracle(self, compiler, connection, **extra_context): class ArgJoiner: def join(self, args): args = [' VALUE '.join(arg) for arg in zip(args[::2], args[1::2])] return ', '.join(args) return self.as_sql( compiler, connection, arg_joiner=ArgJoiner(), template='%(function)s(%(expressions)s RETURNING CLOB)', **extra_context, ) class Least(Func): """ Return the minimum expression. If any expression is null the return value is database-specific: On PostgreSQL, return the minimum not-null expression. On MySQL, Oracle, and SQLite, if any expression is null, return null. """ function = 'LEAST' def __init__(self, *expressions, **extra): if len(expressions) < 2: raise ValueError('Least must take at least two expressions') super().__init__(*expressions, **extra) def as_sqlite(self, compiler, connection, **extra_context): """Use the MIN function on SQLite.""" return super().as_sqlite(compiler, connection, function='MIN', **extra_context) class NullIf(Func): function = 'NULLIF' arity = 2 def as_oracle(self, compiler, connection, **extra_context): expression1 = self.get_source_expressions()[0] if isinstance(expression1, Value) and expression1.value is None: raise ValueError('Oracle does not allow Value(None) for expression1.') return super().as_sql(compiler, connection, **extra_context)
47af44ecd06bd03357c123804ead482dcce5ebbea766a921fe11d27e5c6f8353
from datetime import datetime from django.conf import settings from django.db.models.expressions import Func from django.db.models.fields import ( DateField, DateTimeField, DurationField, Field, IntegerField, TimeField, ) from django.db.models.lookups import ( Transform, YearExact, YearGt, YearGte, YearLt, YearLte, ) from django.utils import timezone class TimezoneMixin: tzinfo = None def get_tzname(self): # Timezone conversions must happen to the input datetime *before* # applying a function. 2015-12-31 23:00:00 -02:00 is stored in the # database as 2016-01-01 01:00:00 +00:00. Any results should be # based on the input datetime not the stored datetime. tzname = None if settings.USE_TZ: if self.tzinfo is None: tzname = timezone.get_current_timezone_name() else: tzname = timezone._get_timezone_name(self.tzinfo) return tzname class Extract(TimezoneMixin, Transform): lookup_name = None output_field = IntegerField() def __init__(self, expression, lookup_name=None, tzinfo=None, **extra): if self.lookup_name is None: self.lookup_name = lookup_name if self.lookup_name is None: raise ValueError('lookup_name must be provided') self.tzinfo = tzinfo super().__init__(expression, **extra) def as_sql(self, compiler, connection): sql, params = compiler.compile(self.lhs) lhs_output_field = self.lhs.output_field if isinstance(lhs_output_field, DateTimeField): tzname = self.get_tzname() sql = connection.ops.datetime_extract_sql(self.lookup_name, sql, tzname) elif self.tzinfo is not None: raise ValueError('tzinfo can only be used with DateTimeField.') elif isinstance(lhs_output_field, DateField): sql = connection.ops.date_extract_sql(self.lookup_name, sql) elif isinstance(lhs_output_field, TimeField): sql = connection.ops.time_extract_sql(self.lookup_name, sql) elif isinstance(lhs_output_field, DurationField): if not connection.features.has_native_duration_field: raise ValueError('Extract requires native DurationField database support.') sql = connection.ops.time_extract_sql(self.lookup_name, sql) else: # resolve_expression has already validated the output_field so this # assert should never be hit. assert False, "Tried to Extract from an invalid type." return sql, params def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): copy = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) field = getattr(copy.lhs, 'output_field', None) if field is None: return copy if not isinstance(field, (DateField, DateTimeField, TimeField, DurationField)): raise ValueError( 'Extract input expression must be DateField, DateTimeField, ' 'TimeField, or DurationField.' ) # Passing dates to functions expecting datetimes is most likely a mistake. if type(field) == DateField and copy.lookup_name in ('hour', 'minute', 'second'): raise ValueError( "Cannot extract time component '%s' from DateField '%s'." % (copy.lookup_name, field.name) ) if ( isinstance(field, DurationField) and copy.lookup_name in ('year', 'iso_year', 'month', 'week', 'week_day', 'iso_week_day', 'quarter') ): raise ValueError( "Cannot extract component '%s' from DurationField '%s'." % (copy.lookup_name, field.name) ) return copy class ExtractYear(Extract): lookup_name = 'year' class ExtractIsoYear(Extract): """Return the ISO-8601 week-numbering year.""" lookup_name = 'iso_year' class ExtractMonth(Extract): lookup_name = 'month' class ExtractDay(Extract): lookup_name = 'day' class ExtractWeek(Extract): """ Return 1-52 or 53, based on ISO-8601, i.e., Monday is the first of the week. """ lookup_name = 'week' class ExtractWeekDay(Extract): """ Return Sunday=1 through Saturday=7. To replicate this in Python: (mydatetime.isoweekday() % 7) + 1 """ lookup_name = 'week_day' class ExtractIsoWeekDay(Extract): """Return Monday=1 through Sunday=7, based on ISO-8601.""" lookup_name = 'iso_week_day' class ExtractQuarter(Extract): lookup_name = 'quarter' class ExtractHour(Extract): lookup_name = 'hour' class ExtractMinute(Extract): lookup_name = 'minute' class ExtractSecond(Extract): lookup_name = 'second' DateField.register_lookup(ExtractYear) DateField.register_lookup(ExtractMonth) DateField.register_lookup(ExtractDay) DateField.register_lookup(ExtractWeekDay) DateField.register_lookup(ExtractIsoWeekDay) DateField.register_lookup(ExtractWeek) DateField.register_lookup(ExtractIsoYear) DateField.register_lookup(ExtractQuarter) TimeField.register_lookup(ExtractHour) TimeField.register_lookup(ExtractMinute) TimeField.register_lookup(ExtractSecond) DateTimeField.register_lookup(ExtractHour) DateTimeField.register_lookup(ExtractMinute) DateTimeField.register_lookup(ExtractSecond) ExtractYear.register_lookup(YearExact) ExtractYear.register_lookup(YearGt) ExtractYear.register_lookup(YearGte) ExtractYear.register_lookup(YearLt) ExtractYear.register_lookup(YearLte) ExtractIsoYear.register_lookup(YearExact) ExtractIsoYear.register_lookup(YearGt) ExtractIsoYear.register_lookup(YearGte) ExtractIsoYear.register_lookup(YearLt) ExtractIsoYear.register_lookup(YearLte) class Now(Func): template = 'CURRENT_TIMESTAMP' output_field = DateTimeField() def as_postgresql(self, compiler, connection, **extra_context): # PostgreSQL's CURRENT_TIMESTAMP means "the time at the start of the # transaction". Use STATEMENT_TIMESTAMP to be cross-compatible with # other databases. return self.as_sql(compiler, connection, template='STATEMENT_TIMESTAMP()', **extra_context) class TruncBase(TimezoneMixin, Transform): kind = None tzinfo = None def __init__(self, expression, output_field=None, tzinfo=None, is_dst=None, **extra): self.tzinfo = tzinfo self.is_dst = is_dst super().__init__(expression, output_field=output_field, **extra) def as_sql(self, compiler, connection): inner_sql, inner_params = compiler.compile(self.lhs) tzname = None if isinstance(self.lhs.output_field, DateTimeField): tzname = self.get_tzname() elif self.tzinfo is not None: raise ValueError('tzinfo can only be used with DateTimeField.') if isinstance(self.output_field, DateTimeField): sql = connection.ops.datetime_trunc_sql(self.kind, inner_sql, tzname) elif isinstance(self.output_field, DateField): sql = connection.ops.date_trunc_sql(self.kind, inner_sql, tzname) elif isinstance(self.output_field, TimeField): sql = connection.ops.time_trunc_sql(self.kind, inner_sql, tzname) else: raise ValueError('Trunc only valid on DateField, TimeField, or DateTimeField.') return sql, inner_params def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False): copy = super().resolve_expression(query, allow_joins, reuse, summarize, for_save) field = copy.lhs.output_field # DateTimeField is a subclass of DateField so this works for both. if not isinstance(field, (DateField, TimeField)): raise TypeError( "%r isn't a DateField, TimeField, or DateTimeField." % field.name ) # If self.output_field was None, then accessing the field will trigger # the resolver to assign it to self.lhs.output_field. if not isinstance(copy.output_field, (DateField, DateTimeField, TimeField)): raise ValueError('output_field must be either DateField, TimeField, or DateTimeField') # Passing dates or times to functions expecting datetimes is most # likely a mistake. class_output_field = self.__class__.output_field if isinstance(self.__class__.output_field, Field) else None output_field = class_output_field or copy.output_field has_explicit_output_field = class_output_field or field.__class__ is not copy.output_field.__class__ if type(field) == DateField and ( isinstance(output_field, DateTimeField) or copy.kind in ('hour', 'minute', 'second', 'time')): raise ValueError("Cannot truncate DateField '%s' to %s." % ( field.name, output_field.__class__.__name__ if has_explicit_output_field else 'DateTimeField' )) elif isinstance(field, TimeField) and ( isinstance(output_field, DateTimeField) or copy.kind in ('year', 'quarter', 'month', 'week', 'day', 'date')): raise ValueError("Cannot truncate TimeField '%s' to %s." % ( field.name, output_field.__class__.__name__ if has_explicit_output_field else 'DateTimeField' )) return copy def convert_value(self, value, expression, connection): if isinstance(self.output_field, DateTimeField): if not settings.USE_TZ: pass elif value is not None: value = value.replace(tzinfo=None) value = timezone.make_aware(value, self.tzinfo, is_dst=self.is_dst) elif not connection.features.has_zoneinfo_database: raise ValueError( 'Database returned an invalid datetime value. Are time ' 'zone definitions for your database installed?' ) elif isinstance(value, datetime): if value is None: pass elif isinstance(self.output_field, DateField): value = value.date() elif isinstance(self.output_field, TimeField): value = value.time() return value class Trunc(TruncBase): def __init__(self, expression, kind, output_field=None, tzinfo=None, is_dst=None, **extra): self.kind = kind super().__init__( expression, output_field=output_field, tzinfo=tzinfo, is_dst=is_dst, **extra ) class TruncYear(TruncBase): kind = 'year' class TruncQuarter(TruncBase): kind = 'quarter' class TruncMonth(TruncBase): kind = 'month' class TruncWeek(TruncBase): """Truncate to midnight on the Monday of the week.""" kind = 'week' class TruncDay(TruncBase): kind = 'day' class TruncDate(TruncBase): kind = 'date' lookup_name = 'date' output_field = DateField() def as_sql(self, compiler, connection): # Cast to date rather than truncate to date. lhs, lhs_params = compiler.compile(self.lhs) tzname = self.get_tzname() sql = connection.ops.datetime_cast_date_sql(lhs, tzname) return sql, lhs_params class TruncTime(TruncBase): kind = 'time' lookup_name = 'time' output_field = TimeField() def as_sql(self, compiler, connection): # Cast to time rather than truncate to time. lhs, lhs_params = compiler.compile(self.lhs) tzname = self.get_tzname() sql = connection.ops.datetime_cast_time_sql(lhs, tzname) return sql, lhs_params class TruncHour(TruncBase): kind = 'hour' class TruncMinute(TruncBase): kind = 'minute' class TruncSecond(TruncBase): kind = 'second' DateTimeField.register_lookup(TruncDate) DateTimeField.register_lookup(TruncTime)
2fa9acb5f7849a85b8be0d40b29d4ca98bb6e486f57e92b6a5e788e29d88ca33
""" Create SQL statements for QuerySets. The code in here encapsulates all of the SQL construction so that QuerySets themselves do not have to (and could be backed by things other than SQL databases). The abstraction barrier only works one way: this module has to know all about the internals of models in order to get the information it needs. """ import copy import difflib import functools import sys from collections import Counter, namedtuple from collections.abc import Iterator, Mapping from itertools import chain, count, product from string import ascii_uppercase from django.core.exceptions import FieldDoesNotExist, FieldError from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections from django.db.models.aggregates import Count from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import ( BaseExpression, Col, Exists, F, OuterRef, Ref, ResolvedOuterRef, ) from django.db.models.fields import Field from django.db.models.fields.related_lookups import MultiColSource from django.db.models.lookups import Lookup from django.db.models.query_utils import ( Q, check_rel_lookup_compatibility, refs_expression, ) from django.db.models.sql.constants import INNER, LOUTER, ORDER_DIR, SINGLE from django.db.models.sql.datastructures import ( BaseTable, Empty, Join, MultiJoin, ) from django.db.models.sql.where import ( AND, OR, ExtraWhere, NothingNode, WhereNode, ) from django.utils.functional import cached_property from django.utils.tree import Node __all__ = ['Query', 'RawQuery'] def get_field_names_from_opts(opts): return set(chain.from_iterable( (f.name, f.attname) if f.concrete else (f.name,) for f in opts.get_fields() )) def get_children_from_q(q): for child in q.children: if isinstance(child, Node): yield from get_children_from_q(child) else: yield child JoinInfo = namedtuple( 'JoinInfo', ('final_field', 'targets', 'opts', 'joins', 'path', 'transform_function') ) class RawQuery: """A single raw SQL query.""" def __init__(self, sql, using, params=()): self.params = params self.sql = sql self.using = using self.cursor = None # Mirror some properties of a normal query so that # the compiler can be used to process results. self.low_mark, self.high_mark = 0, None # Used for offset/limit self.extra_select = {} self.annotation_select = {} def chain(self, using): return self.clone(using) def clone(self, using): return RawQuery(self.sql, using, params=self.params) def get_columns(self): if self.cursor is None: self._execute_query() converter = connections[self.using].introspection.identifier_converter return [converter(column_meta[0]) for column_meta in self.cursor.description] def __iter__(self): # Always execute a new query for a new iterator. # This could be optimized with a cache at the expense of RAM. self._execute_query() if not connections[self.using].features.can_use_chunked_reads: # If the database can't use chunked reads we need to make sure we # evaluate the entire query up front. result = list(self.cursor) else: result = self.cursor return iter(result) def __repr__(self): return "<%s: %s>" % (self.__class__.__name__, self) @property def params_type(self): if self.params is None: return None return dict if isinstance(self.params, Mapping) else tuple def __str__(self): if self.params_type is None: return self.sql return self.sql % self.params_type(self.params) def _execute_query(self): connection = connections[self.using] # Adapt parameters to the database, as much as possible considering # that the target type isn't known. See #17755. params_type = self.params_type adapter = connection.ops.adapt_unknown_value if params_type is tuple: params = tuple(adapter(val) for val in self.params) elif params_type is dict: params = {key: adapter(val) for key, val in self.params.items()} elif params_type is None: params = None else: raise RuntimeError("Unexpected params type: %s" % params_type) self.cursor = connection.cursor() self.cursor.execute(self.sql, params) class Query(BaseExpression): """A single SQL query.""" alias_prefix = 'T' subq_aliases = frozenset([alias_prefix]) compiler = 'SQLCompiler' def __init__(self, model, alias_cols=True): self.model = model self.alias_refcount = {} # alias_map is the most important data structure regarding joins. # It's used for recording which joins exist in the query and what # types they are. The key is the alias of the joined table (possibly # the table name) and the value is a Join-like object (see # sql.datastructures.Join for more information). self.alias_map = {} # Whether to provide alias to columns during reference resolving. self.alias_cols = alias_cols # Sometimes the query contains references to aliases in outer queries (as # a result of split_exclude). Correct alias quoting needs to know these # aliases too. # Map external tables to whether they are aliased. self.external_aliases = {} self.table_map = {} # Maps table names to list of aliases. self.default_cols = True self.default_ordering = True self.standard_ordering = True self.used_aliases = set() self.filter_is_sticky = False self.subquery = False # SQL-related attributes # Select and related select clauses are expressions to use in the # SELECT clause of the query. # The select is used for cases where we want to set up the select # clause to contain other than default fields (values(), subqueries...) # Note that annotations go to annotations dictionary. self.select = () self.where = WhereNode() # The group_by attribute can have one of the following forms: # - None: no group by at all in the query # - A tuple of expressions: group by (at least) those expressions. # String refs are also allowed for now. # - True: group by all select fields of the model # See compiler.get_group_by() for details. self.group_by = None self.order_by = () self.low_mark, self.high_mark = 0, None # Used for offset/limit self.distinct = False self.distinct_fields = () self.select_for_update = False self.select_for_update_nowait = False self.select_for_update_skip_locked = False self.select_for_update_of = () self.select_for_no_key_update = False self.select_related = False # Arbitrary limit for select_related to prevents infinite recursion. self.max_depth = 5 # Holds the selects defined by a call to values() or values_list() # excluding annotation_select and extra_select. self.values_select = () # SQL annotation-related attributes self.annotations = {} # Maps alias -> Annotation Expression self.annotation_select_mask = None self._annotation_select_cache = None # Set combination attributes self.combinator = None self.combinator_all = False self.combined_queries = () # These are for extensions. The contents are more or less appended # verbatim to the appropriate clause. self.extra = {} # Maps col_alias -> (col_sql, params). self.extra_select_mask = None self._extra_select_cache = None self.extra_tables = () self.extra_order_by = () # A tuple that is a set of model field names and either True, if these # are the fields to defer, or False if these are the only fields to # load. self.deferred_loading = (frozenset(), True) self._filtered_relations = {} self.explain_query = False self.explain_format = None self.explain_options = {} @property def output_field(self): if len(self.select) == 1: select = self.select[0] return getattr(select, 'target', None) or select.field elif len(self.annotation_select) == 1: return next(iter(self.annotation_select.values())).output_field @property def has_select_fields(self): return bool(self.select or self.annotation_select_mask or self.extra_select_mask) @cached_property def base_table(self): for alias in self.alias_map: return alias def __str__(self): """ Return the query as a string of SQL with the parameter values substituted in (use sql_with_params() to see the unsubstituted string). Parameter values won't necessarily be quoted correctly, since that is done by the database interface at execution time. """ sql, params = self.sql_with_params() return sql % params def sql_with_params(self): """ Return the query as an SQL string and the parameters that will be substituted into the query. """ return self.get_compiler(DEFAULT_DB_ALIAS).as_sql() def __deepcopy__(self, memo): """Limit the amount of work when a Query is deepcopied.""" result = self.clone() memo[id(self)] = result return result def get_compiler(self, using=None, connection=None, elide_empty=True): if using is None and connection is None: raise ValueError("Need either using or connection") if using: connection = connections[using] return connection.ops.compiler(self.compiler)(self, connection, using, elide_empty) def get_meta(self): """ Return the Options instance (the model._meta) from which to start processing. Normally, this is self.model._meta, but it can be changed by subclasses. """ return self.model._meta def clone(self): """ Return a copy of the current Query. A lightweight alternative to to deepcopy(). """ obj = Empty() obj.__class__ = self.__class__ # Copy references to everything. obj.__dict__ = self.__dict__.copy() # Clone attributes that can't use shallow copy. obj.alias_refcount = self.alias_refcount.copy() obj.alias_map = self.alias_map.copy() obj.external_aliases = self.external_aliases.copy() obj.table_map = self.table_map.copy() obj.where = self.where.clone() obj.annotations = self.annotations.copy() if self.annotation_select_mask is None: obj.annotation_select_mask = None else: obj.annotation_select_mask = self.annotation_select_mask.copy() obj.combined_queries = tuple(query.clone() for query in self.combined_queries) # _annotation_select_cache cannot be copied, as doing so breaks the # (necessary) state in which both annotations and # _annotation_select_cache point to the same underlying objects. # It will get re-populated in the cloned queryset the next time it's # used. obj._annotation_select_cache = None obj.extra = self.extra.copy() if self.extra_select_mask is None: obj.extra_select_mask = None else: obj.extra_select_mask = self.extra_select_mask.copy() if self._extra_select_cache is None: obj._extra_select_cache = None else: obj._extra_select_cache = self._extra_select_cache.copy() if self.select_related is not False: # Use deepcopy because select_related stores fields in nested # dicts. obj.select_related = copy.deepcopy(obj.select_related) if 'subq_aliases' in self.__dict__: obj.subq_aliases = self.subq_aliases.copy() obj.used_aliases = self.used_aliases.copy() obj._filtered_relations = self._filtered_relations.copy() # Clear the cached_property try: del obj.base_table except AttributeError: pass return obj def chain(self, klass=None): """ Return a copy of the current Query that's ready for another operation. The klass argument changes the type of the Query, e.g. UpdateQuery. """ obj = self.clone() if klass and obj.__class__ != klass: obj.__class__ = klass if not obj.filter_is_sticky: obj.used_aliases = set() obj.filter_is_sticky = False if hasattr(obj, '_setup_query'): obj._setup_query() return obj def relabeled_clone(self, change_map): clone = self.clone() clone.change_aliases(change_map) return clone def _get_col(self, target, field, alias): if not self.alias_cols: alias = None return target.get_col(alias, field) def rewrite_cols(self, annotation, col_cnt): # We must make sure the inner query has the referred columns in it. # If we are aggregating over an annotation, then Django uses Ref() # instances to note this. However, if we are annotating over a column # of a related model, then it might be that column isn't part of the # SELECT clause of the inner query, and we must manually make sure # the column is selected. An example case is: # .aggregate(Sum('author__awards')) # Resolving this expression results in a join to author, but there # is no guarantee the awards column of author is in the select clause # of the query. Thus we must manually add the column to the inner # query. orig_exprs = annotation.get_source_expressions() new_exprs = [] for expr in orig_exprs: # FIXME: These conditions are fairly arbitrary. Identify a better # method of having expressions decide which code path they should # take. if isinstance(expr, Ref): # Its already a Ref to subquery (see resolve_ref() for # details) new_exprs.append(expr) elif isinstance(expr, (WhereNode, Lookup)): # Decompose the subexpressions further. The code here is # copied from the else clause, but this condition must appear # before the contains_aggregate/is_summary condition below. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) else: # Reuse aliases of expressions already selected in subquery. for col_alias, selected_annotation in self.annotation_select.items(): if selected_annotation is expr: new_expr = Ref(col_alias, expr) break else: # An expression that is not selected the subquery. if isinstance(expr, Col) or (expr.contains_aggregate and not expr.is_summary): # Reference column or another aggregate. Select it # under a non-conflicting alias. col_cnt += 1 col_alias = '__col%d' % col_cnt self.annotations[col_alias] = expr self.append_annotation_mask([col_alias]) new_expr = Ref(col_alias, expr) else: # Some other expression not referencing database values # directly. Its subexpression might contain Cols. new_expr, col_cnt = self.rewrite_cols(expr, col_cnt) new_exprs.append(new_expr) annotation.set_source_expressions(new_exprs) return annotation, col_cnt def get_aggregation(self, using, added_aggregate_names): """ Return the dictionary with the values of the existing aggregations. """ if not self.annotation_select: return {} existing_annotations = [ annotation for alias, annotation in self.annotations.items() if alias not in added_aggregate_names ] # Decide if we need to use a subquery. # # Existing annotations would cause incorrect results as get_aggregation() # must produce just one result and thus must not use GROUP BY. But we # aren't smart enough to remove the existing annotations from the # query, so those would force us to use GROUP BY. # # If the query has limit or distinct, or uses set operations, then # those operations must be done in a subquery so that the query # aggregates on the limit and/or distinct results instead of applying # the distinct and limit after the aggregation. if (isinstance(self.group_by, tuple) or self.is_sliced or existing_annotations or self.distinct or self.combinator): from django.db.models.sql.subqueries import AggregateQuery inner_query = self.clone() inner_query.subquery = True outer_query = AggregateQuery(self.model, inner_query) inner_query.select_for_update = False inner_query.select_related = False inner_query.set_annotation_mask(self.annotation_select) # Queries with distinct_fields need ordering and when a limit is # applied we must take the slice from the ordered query. Otherwise # no need for ordering. inner_query.clear_ordering(force=False) if not inner_query.distinct: # If the inner query uses default select and it has some # aggregate annotations, then we must make sure the inner # query is grouped by the main model's primary key. However, # clearing the select clause can alter results if distinct is # used. has_existing_aggregate_annotations = any( annotation for annotation in existing_annotations if getattr(annotation, 'contains_aggregate', True) ) if inner_query.default_cols and has_existing_aggregate_annotations: inner_query.group_by = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),) inner_query.default_cols = False relabels = {t: 'subquery' for t in inner_query.alias_map} relabels[None] = 'subquery' # Remove any aggregates marked for reduction from the subquery # and move them to the outer AggregateQuery. col_cnt = 0 for alias, expression in list(inner_query.annotation_select.items()): annotation_select_mask = inner_query.annotation_select_mask if expression.is_summary: expression, col_cnt = inner_query.rewrite_cols(expression, col_cnt) outer_query.annotations[alias] = expression.relabeled_clone(relabels) del inner_query.annotations[alias] annotation_select_mask.remove(alias) # Make sure the annotation_select wont use cached results. inner_query.set_annotation_mask(inner_query.annotation_select_mask) if inner_query.select == () and not inner_query.default_cols and not inner_query.annotation_select_mask: # In case of Model.objects[0:3].count(), there would be no # field selected in the inner query, yet we must use a subquery. # So, make sure at least one field is selected. inner_query.select = (self.model._meta.pk.get_col(inner_query.get_initial_alias()),) else: outer_query = self self.select = () self.default_cols = False self.extra = {} empty_aggregate_result = [ expression.empty_aggregate_value for expression in outer_query.annotation_select.values() ] elide_empty = not any(result is NotImplemented for result in empty_aggregate_result) outer_query.clear_ordering(force=True) outer_query.clear_limits() outer_query.select_for_update = False outer_query.select_related = False compiler = outer_query.get_compiler(using, elide_empty=elide_empty) result = compiler.execute_sql(SINGLE) if result is None: result = empty_aggregate_result converters = compiler.get_converters(outer_query.annotation_select.values()) result = next(compiler.apply_converters((result,), converters)) return dict(zip(outer_query.annotation_select, result)) def get_count(self, using): """ Perform a COUNT() query using the current filter constraints. """ obj = self.clone() obj.add_annotation(Count('*'), alias='__count', is_summary=True) number = obj.get_aggregation(using, ['__count'])['__count'] if number is None: number = 0 return number def has_filters(self): return self.where def exists(self, using, limit=True): q = self.clone() if not q.distinct: if q.group_by is True: q.add_fields((f.attname for f in self.model._meta.concrete_fields), False) # Disable GROUP BY aliases to avoid orphaning references to the # SELECT clause which is about to be cleared. q.set_group_by(allow_aliases=False) q.clear_select_clause() if q.combined_queries and q.combinator == 'union': limit_combined = connections[using].features.supports_slicing_ordering_in_compound q.combined_queries = tuple( combined_query.exists(using, limit=limit_combined) for combined_query in q.combined_queries ) q.clear_ordering(force=True) if limit: q.set_limits(high=1) q.add_extra({'a': 1}, None, None, None, None, None) q.set_extra_mask(['a']) return q def has_results(self, using): q = self.exists(using) compiler = q.get_compiler(using=using) return compiler.has_results() def explain(self, using, format=None, **options): q = self.clone() q.explain_query = True q.explain_format = format q.explain_options = options compiler = q.get_compiler(using=using) return '\n'.join(compiler.explain_query()) def combine(self, rhs, connector): """ Merge the 'rhs' query into the current one (with any 'rhs' effects being applied *after* (that is, "to the right of") anything in the current query. 'rhs' is not modified during a call to this function. The 'connector' parameter describes how to connect filters from the 'rhs' query. """ if self.model != rhs.model: raise TypeError('Cannot combine queries on two different base models.') if self.is_sliced: raise TypeError('Cannot combine queries once a slice has been taken.') if self.distinct != rhs.distinct: raise TypeError('Cannot combine a unique query with a non-unique query.') if self.distinct_fields != rhs.distinct_fields: raise TypeError('Cannot combine queries with different distinct fields.') # Work out how to relabel the rhs aliases, if necessary. change_map = {} conjunction = (connector == AND) # Determine which existing joins can be reused. When combining the # query with AND we must recreate all joins for m2m filters. When # combining with OR we can reuse joins. The reason is that in AND # case a single row can't fulfill a condition like: # revrel__col=1 & revrel__col=2 # But, there might be two different related rows matching this # condition. In OR case a single True is enough, so single row is # enough, too. # # Note that we will be creating duplicate joins for non-m2m joins in # the AND case. The results will be correct but this creates too many # joins. This is something that could be fixed later on. reuse = set() if conjunction else set(self.alias_map) # Base table must be present in the query - this is the same # table on both sides. self.get_initial_alias() joinpromoter = JoinPromoter(connector, 2, False) joinpromoter.add_votes( j for j in self.alias_map if self.alias_map[j].join_type == INNER) rhs_votes = set() # Now, add the joins from rhs query into the new query (skipping base # table). rhs_tables = list(rhs.alias_map)[1:] for alias in rhs_tables: join = rhs.alias_map[alias] # If the left side of the join was already relabeled, use the # updated alias. join = join.relabeled_clone(change_map) new_alias = self.join(join, reuse=reuse) if join.join_type == INNER: rhs_votes.add(new_alias) # We can't reuse the same join again in the query. If we have two # distinct joins for the same connection in rhs query, then the # combined query must have two joins, too. reuse.discard(new_alias) if alias != new_alias: change_map[alias] = new_alias if not rhs.alias_refcount[alias]: # The alias was unused in the rhs query. Unref it so that it # will be unused in the new query, too. We have to add and # unref the alias so that join promotion has information of # the join type for the unused alias. self.unref_alias(new_alias) joinpromoter.add_votes(rhs_votes) joinpromoter.update_join_types(self) # Combine subqueries aliases to ensure aliases relabelling properly # handle subqueries when combining where and select clauses. self.subq_aliases |= rhs.subq_aliases # Now relabel a copy of the rhs where-clause and add it to the current # one. w = rhs.where.clone() w.relabel_aliases(change_map) self.where.add(w, connector) # Selection columns and extra extensions are those provided by 'rhs'. if rhs.select: self.set_select([col.relabeled_clone(change_map) for col in rhs.select]) else: self.select = () if connector == OR: # It would be nice to be able to handle this, but the queries don't # really make sense (or return consistent value sets). Not worth # the extra complexity when you can write a real query instead. if self.extra and rhs.extra: raise ValueError("When merging querysets using 'or', you cannot have extra(select=...) on both sides.") self.extra.update(rhs.extra) extra_select_mask = set() if self.extra_select_mask is not None: extra_select_mask.update(self.extra_select_mask) if rhs.extra_select_mask is not None: extra_select_mask.update(rhs.extra_select_mask) if extra_select_mask: self.set_extra_mask(extra_select_mask) self.extra_tables += rhs.extra_tables # Ordering uses the 'rhs' ordering, unless it has none, in which case # the current ordering is used. self.order_by = rhs.order_by or self.order_by self.extra_order_by = rhs.extra_order_by or self.extra_order_by def deferred_to_data(self, target, callback): """ Convert the self.deferred_loading data structure to an alternate data structure, describing the field that *will* be loaded. This is used to compute the columns to select from the database and also by the QuerySet class to work out which fields are being initialized on each model. Models that have all their fields included aren't mentioned in the result, only those that have field restrictions in place. The "target" parameter is the instance that is populated (in place). The "callback" is a function that is called whenever a (model, field) pair need to be added to "target". It accepts three parameters: "target", and the model and list of fields being added for that model. """ field_names, defer = self.deferred_loading if not field_names: return orig_opts = self.get_meta() seen = {} must_include = {orig_opts.concrete_model: {orig_opts.pk}} for field_name in field_names: parts = field_name.split(LOOKUP_SEP) cur_model = self.model._meta.concrete_model opts = orig_opts for name in parts[:-1]: old_model = cur_model if name in self._filtered_relations: name = self._filtered_relations[name].relation_name source = opts.get_field(name) if is_reverse_o2o(source): cur_model = source.related_model else: cur_model = source.remote_field.model opts = cur_model._meta # Even if we're "just passing through" this model, we must add # both the current model's pk and the related reference field # (if it's not a reverse relation) to the things we select. if not is_reverse_o2o(source): must_include[old_model].add(source) add_to_dict(must_include, cur_model, opts.pk) field = opts.get_field(parts[-1]) is_reverse_object = field.auto_created and not field.concrete model = field.related_model if is_reverse_object else field.model model = model._meta.concrete_model if model == opts.model: model = cur_model if not is_reverse_o2o(field): add_to_dict(seen, model, field) if defer: # We need to load all fields for each model, except those that # appear in "seen" (for all models that appear in "seen"). The only # slight complexity here is handling fields that exist on parent # models. workset = {} for model, values in seen.items(): for field in model._meta.local_fields: if field not in values: m = field.model._meta.concrete_model add_to_dict(workset, m, field) for model, values in must_include.items(): # If we haven't included a model in workset, we don't add the # corresponding must_include fields for that model, since an # empty set means "include all fields". That's why there's no # "else" branch here. if model in workset: workset[model].update(values) for model, values in workset.items(): callback(target, model, values) else: for model, values in must_include.items(): if model in seen: seen[model].update(values) else: # As we've passed through this model, but not explicitly # included any fields, we have to make sure it's mentioned # so that only the "must include" fields are pulled in. seen[model] = values # Now ensure that every model in the inheritance chain is mentioned # in the parent list. Again, it must be mentioned to ensure that # only "must include" fields are pulled in. for model in orig_opts.get_parent_list(): seen.setdefault(model, set()) for model, values in seen.items(): callback(target, model, values) def table_alias(self, table_name, create=False, filtered_relation=None): """ Return a table alias for the given table_name and whether this is a new alias or not. If 'create' is true, a new alias is always created. Otherwise, the most recently created alias for the table (if one exists) is reused. """ alias_list = self.table_map.get(table_name) if not create and alias_list: alias = alias_list[0] self.alias_refcount[alias] += 1 return alias, False # Create a new alias for this table. if alias_list: alias = '%s%d' % (self.alias_prefix, len(self.alias_map) + 1) alias_list.append(alias) else: # The first occurrence of a table uses the table name directly. alias = filtered_relation.alias if filtered_relation is not None else table_name self.table_map[table_name] = [alias] self.alias_refcount[alias] = 1 return alias, True def ref_alias(self, alias): """Increases the reference count for this alias.""" self.alias_refcount[alias] += 1 def unref_alias(self, alias, amount=1): """Decreases the reference count for this alias.""" self.alias_refcount[alias] -= amount def promote_joins(self, aliases): """ Promote recursively the join type of given aliases and its children to an outer join. If 'unconditional' is False, only promote the join if it is nullable or the parent join is an outer join. The children promotion is done to avoid join chains that contain a LOUTER b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted, then we must also promote b->c automatically, or otherwise the promotion of a->b doesn't actually change anything in the query results. """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type is None: # This is the base table (first FROM entry) - this table # isn't really joined at all in the query, so we should not # alter its join type. continue # Only the first alias (skipped above) should have None join_type assert self.alias_map[alias].join_type is not None parent_alias = self.alias_map[alias].parent_alias parent_louter = parent_alias and self.alias_map[parent_alias].join_type == LOUTER already_louter = self.alias_map[alias].join_type == LOUTER if ((self.alias_map[alias].nullable or parent_louter) and not already_louter): self.alias_map[alias] = self.alias_map[alias].promote() # Join type of 'alias' changed, so re-examine all aliases that # refer to this one. aliases.extend( join for join in self.alias_map if self.alias_map[join].parent_alias == alias and join not in aliases ) def demote_joins(self, aliases): """ Change join type from LOUTER to INNER for all joins in aliases. Similarly to promote_joins(), this method must ensure no join chains containing first an outer, then an inner join are generated. If we are demoting b->c join in chain a LOUTER b LOUTER c then we must demote a->b automatically, or otherwise the demotion of b->c doesn't actually change anything in the query results. . """ aliases = list(aliases) while aliases: alias = aliases.pop(0) if self.alias_map[alias].join_type == LOUTER: self.alias_map[alias] = self.alias_map[alias].demote() parent_alias = self.alias_map[alias].parent_alias if self.alias_map[parent_alias].join_type == INNER: aliases.append(parent_alias) def reset_refcounts(self, to_counts): """ Reset reference counts for aliases so that they match the value passed in `to_counts`. """ for alias, cur_refcount in self.alias_refcount.copy().items(): unref_amount = cur_refcount - to_counts.get(alias, 0) self.unref_alias(alias, unref_amount) def change_aliases(self, change_map): """ Change the aliases in change_map (which maps old-alias -> new-alias), relabelling any references to them in select columns and the where clause. """ assert set(change_map).isdisjoint(change_map.values()) # 1. Update references in "select" (normal columns plus aliases), # "group by" and "where". self.where.relabel_aliases(change_map) if isinstance(self.group_by, tuple): self.group_by = tuple([col.relabeled_clone(change_map) for col in self.group_by]) self.select = tuple([col.relabeled_clone(change_map) for col in self.select]) self.annotations = self.annotations and { key: col.relabeled_clone(change_map) for key, col in self.annotations.items() } # 2. Rename the alias in the internal table/alias datastructures. for old_alias, new_alias in change_map.items(): if old_alias not in self.alias_map: continue alias_data = self.alias_map[old_alias].relabeled_clone(change_map) self.alias_map[new_alias] = alias_data self.alias_refcount[new_alias] = self.alias_refcount[old_alias] del self.alias_refcount[old_alias] del self.alias_map[old_alias] table_aliases = self.table_map[alias_data.table_name] for pos, alias in enumerate(table_aliases): if alias == old_alias: table_aliases[pos] = new_alias break self.external_aliases = { # Table is aliased or it's being changed and thus is aliased. change_map.get(alias, alias): (aliased or alias in change_map) for alias, aliased in self.external_aliases.items() } def bump_prefix(self, outer_query): """ Change the alias prefix to the next letter in the alphabet in a way that the outer query's aliases and this query's aliases will not conflict. Even tables that previously had no alias will get an alias after this call. """ def prefix_gen(): """ Generate a sequence of characters in alphabetical order: -> 'A', 'B', 'C', ... When the alphabet is finished, the sequence will continue with the Cartesian product: -> 'AA', 'AB', 'AC', ... """ alphabet = ascii_uppercase prefix = chr(ord(self.alias_prefix) + 1) yield prefix for n in count(1): seq = alphabet[alphabet.index(prefix):] if prefix else alphabet for s in product(seq, repeat=n): yield ''.join(s) prefix = None if self.alias_prefix != outer_query.alias_prefix: # No clashes between self and outer query should be possible. return # Explicitly avoid infinite loop. The constant divider is based on how # much depth recursive subquery references add to the stack. This value # might need to be adjusted when adding or removing function calls from # the code path in charge of performing these operations. local_recursion_limit = sys.getrecursionlimit() // 16 for pos, prefix in enumerate(prefix_gen()): if prefix not in self.subq_aliases: self.alias_prefix = prefix break if pos > local_recursion_limit: raise RecursionError( 'Maximum recursion depth exceeded: too many subqueries.' ) self.subq_aliases = self.subq_aliases.union([self.alias_prefix]) outer_query.subq_aliases = outer_query.subq_aliases.union(self.subq_aliases) self.change_aliases({ alias: '%s%d' % (self.alias_prefix, pos) for pos, alias in enumerate(self.alias_map) }) def get_initial_alias(self): """ Return the first alias for this query, after increasing its reference count. """ if self.alias_map: alias = self.base_table self.ref_alias(alias) else: alias = self.join(BaseTable(self.get_meta().db_table, None)) return alias def count_active_tables(self): """ Return the number of tables in this query with a non-zero reference count. After execution, the reference counts are zeroed, so tables added in compiler will not be seen by this method. """ return len([1 for count in self.alias_refcount.values() if count]) def join(self, join, reuse=None): """ Return an alias for the 'join', either reusing an existing alias for that join or creating a new one. 'join' is either a sql.datastructures.BaseTable or Join. The 'reuse' parameter can be either None which means all joins are reusable, or it can be a set containing the aliases that can be reused. A join is always created as LOUTER if the lhs alias is LOUTER to make sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new joins are created as LOUTER if the join is nullable. """ reuse_aliases = [ a for a, j in self.alias_map.items() if (reuse is None or a in reuse) and j.equals(join) ] if reuse_aliases: if join.table_alias in reuse_aliases: reuse_alias = join.table_alias else: # Reuse the most recent alias of the joined table # (a many-to-many relation may be joined multiple times). reuse_alias = reuse_aliases[-1] self.ref_alias(reuse_alias) return reuse_alias # No reuse is possible, so we need a new alias. alias, _ = self.table_alias(join.table_name, create=True, filtered_relation=join.filtered_relation) if join.join_type: if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable: join_type = LOUTER else: join_type = INNER join.join_type = join_type join.table_alias = alias self.alias_map[alias] = join return alias def join_parent_model(self, opts, model, alias, seen): """ Make sure the given 'model' is joined in the query. If 'model' isn't a parent of 'opts' or if it is None this method is a no-op. The 'alias' is the root alias for starting the join, 'seen' is a dict of model -> alias of existing joins. It must also contain a mapping of None -> some alias. This will be returned in the no-op case. """ if model in seen: return seen[model] chain = opts.get_base_chain(model) if not chain: return alias curr_opts = opts for int_model in chain: if int_model in seen: curr_opts = int_model._meta alias = seen[int_model] continue # Proxy model have elements in base chain # with no parents, assign the new options # object and skip to the next base in that # case if not curr_opts.parents[int_model]: curr_opts = int_model._meta continue link_field = curr_opts.get_ancestor_link(int_model) join_info = self.setup_joins([link_field.name], curr_opts, alias) curr_opts = int_model._meta alias = seen[int_model] = join_info.joins[-1] return alias or seen[None] def add_annotation(self, annotation, alias, is_summary=False, select=True): """Add a single annotation expression to the Query.""" annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None, summarize=is_summary) if select: self.append_annotation_mask([alias]) else: self.set_annotation_mask(set(self.annotation_select).difference({alias})) self.annotations[alias] = annotation def resolve_expression(self, query, *args, **kwargs): clone = self.clone() # Subqueries need to use a different set of aliases than the outer query. clone.bump_prefix(query) clone.subquery = True clone.where.resolve_expression(query, *args, **kwargs) for key, value in clone.annotations.items(): resolved = value.resolve_expression(query, *args, **kwargs) if hasattr(resolved, 'external_aliases'): resolved.external_aliases.update(clone.external_aliases) clone.annotations[key] = resolved # Outer query's aliases are considered external. for alias, table in query.alias_map.items(): clone.external_aliases[alias] = ( (isinstance(table, Join) and table.join_field.related_model._meta.db_table != alias) or (isinstance(table, BaseTable) and table.table_name != table.table_alias) ) return clone def get_external_cols(self): exprs = chain(self.annotations.values(), self.where.children) return [ col for col in self._gen_cols(exprs, include_external=True) if col.alias in self.external_aliases ] def as_sql(self, compiler, connection): # Some backends (e.g. Oracle) raise an error when a subquery contains # unnecessary ORDER BY clause. if ( self.subquery and not connection.features.ignores_unnecessary_order_by_in_subqueries ): self.clear_ordering(force=False) sql, params = self.get_compiler(connection=connection).as_sql() if self.subquery: sql = '(%s)' % sql return sql, params def resolve_lookup_value(self, value, can_reuse, allow_joins): if hasattr(value, 'resolve_expression'): value = value.resolve_expression( self, reuse=can_reuse, allow_joins=allow_joins, ) elif isinstance(value, (list, tuple)): # The items of the iterable may be expressions and therefore need # to be resolved independently. values = ( self.resolve_lookup_value(sub_value, can_reuse, allow_joins) for sub_value in value ) type_ = type(value) if hasattr(type_, '_make'): # namedtuple return type_(*values) return type_(values) return value def solve_lookup_type(self, lookup): """ Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains'). """ lookup_splitted = lookup.split(LOOKUP_SEP) if self.annotations: expression, expression_lookups = refs_expression(lookup_splitted, self.annotations) if expression: return expression_lookups, (), expression _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta()) field_parts = lookup_splitted[0:len(lookup_splitted) - len(lookup_parts)] if len(lookup_parts) > 1 and not field_parts: raise FieldError( 'Invalid lookup "%s" for model %s".' % (lookup, self.get_meta().model.__name__) ) return lookup_parts, field_parts, False def check_query_object_type(self, value, opts, field): """ Check whether the object passed while querying is of the correct type. If not, raise a ValueError specifying the wrong object. """ if hasattr(value, '_meta'): if not check_rel_lookup_compatibility(value._meta.model, opts, field): raise ValueError( 'Cannot query "%s": Must be "%s" instance.' % (value, opts.object_name)) def check_related_objects(self, field, value, opts): """Check the type of object passed to query relations.""" if field.is_relation: # Check that the field and the queryset use the same model in a # query like .filter(author=Author.objects.all()). For example, the # opts would be Author's (from the author field) and value.model # would be Author.objects.all() queryset's .model (Author also). # The field is the related field on the lhs side. if (isinstance(value, Query) and not value.has_select_fields and not check_rel_lookup_compatibility(value.model, opts, field)): raise ValueError( 'Cannot use QuerySet for "%s": Use a QuerySet for "%s".' % (value.model._meta.object_name, opts.object_name) ) elif hasattr(value, '_meta'): self.check_query_object_type(value, opts, field) elif hasattr(value, '__iter__'): for v in value: self.check_query_object_type(v, opts, field) def check_filterable(self, expression): """Raise an error if expression cannot be used in a WHERE clause.""" if ( hasattr(expression, 'resolve_expression') and not getattr(expression, 'filterable', True) ): raise NotSupportedError( expression.__class__.__name__ + ' is disallowed in the filter ' 'clause.' ) if hasattr(expression, 'get_source_expressions'): for expr in expression.get_source_expressions(): self.check_filterable(expr) def build_lookup(self, lookups, lhs, rhs): """ Try to extract transforms and lookup from given lhs. The lhs value is something that works like SQLExpression. The rhs value is what the lookup is going to compare against. The lookups is a list of names to extract using get_lookup() and get_transform(). """ # __exact is the default lookup if one isn't given. *transforms, lookup_name = lookups or ['exact'] for name in transforms: lhs = self.try_transform(lhs, name) # First try get_lookup() so that the lookup takes precedence if the lhs # supports both transform and lookup for the name. lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: if lhs.field.is_relation: raise FieldError('Related Field got invalid lookup: {}'.format(lookup_name)) # A lookup wasn't found. Try to interpret the name as a transform # and do an Exact lookup against it. lhs = self.try_transform(lhs, lookup_name) lookup_name = 'exact' lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: return lookup = lookup_class(lhs, rhs) # Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all # uses of None as a query value unless the lookup supports it. if lookup.rhs is None and not lookup.can_use_none_as_rhs: if lookup_name not in ('exact', 'iexact'): raise ValueError("Cannot use None as a query value") return lhs.get_lookup('isnull')(lhs, True) # For Oracle '' is equivalent to null. The check must be done at this # stage because join promotion can't be done in the compiler. Using # DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here. # A similar thing is done in is_nullable(), too. if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and lookup_name == 'exact' and lookup.rhs == ''): return lhs.get_lookup('isnull')(lhs, True) return lookup def try_transform(self, lhs, name): """ Helper method for build_lookup(). Try to fetch and initialize a transform for name parameter from lhs. """ transform_class = lhs.get_transform(name) if transform_class: return transform_class(lhs) else: output_field = lhs.output_field.__class__ suggested_lookups = difflib.get_close_matches(name, output_field.get_lookups()) if suggested_lookups: suggestion = ', perhaps you meant %s?' % ' or '.join(suggested_lookups) else: suggestion = '.' raise FieldError( "Unsupported lookup '%s' for %s or join on the field not " "permitted%s" % (name, output_field.__name__, suggestion) ) def build_filter(self, filter_expr, branch_negated=False, current_negated=False, can_reuse=None, allow_joins=True, split_subq=True, check_filterable=True): """ Build a WhereNode for a single filter clause but don't add it to this Query. Query.add_q() will then add this filter to the where Node. The 'branch_negated' tells us if the current branch contains any negations. This will be used to determine if subqueries are needed. The 'current_negated' is used to determine if the current filter is negated or not and this will be used to determine if IS NULL filtering is needed. The difference between current_negated and branch_negated is that branch_negated is set on first negation, but current_negated is flipped for each negation. Note that add_filter will not do any negating itself, that is done upper in the code by add_q(). The 'can_reuse' is a set of reusable joins for multijoins. The method will create a filter clause that can be added to the current query. However, if the filter isn't added to the query then the caller is responsible for unreffing the joins used. """ if isinstance(filter_expr, dict): raise FieldError("Cannot parse keyword query as dict") if isinstance(filter_expr, Q): return self._add_q( filter_expr, branch_negated=branch_negated, current_negated=current_negated, used_aliases=can_reuse, allow_joins=allow_joins, split_subq=split_subq, check_filterable=check_filterable, ) if hasattr(filter_expr, 'resolve_expression'): if not getattr(filter_expr, 'conditional', False): raise TypeError('Cannot filter against a non-conditional expression.') condition = filter_expr.resolve_expression(self, allow_joins=allow_joins) if not isinstance(condition, Lookup): condition = self.build_lookup(['exact'], condition, True) return WhereNode([condition], connector=AND), [] arg, value = filter_expr if not arg: raise FieldError("Cannot parse keyword query %r" % arg) lookups, parts, reffed_expression = self.solve_lookup_type(arg) if check_filterable: self.check_filterable(reffed_expression) if not allow_joins and len(parts) > 1: raise FieldError("Joined field references are not permitted in this query") pre_joins = self.alias_refcount.copy() value = self.resolve_lookup_value(value, can_reuse, allow_joins) used_joins = {k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)} if check_filterable: self.check_filterable(value) if reffed_expression: condition = self.build_lookup(lookups, reffed_expression, value) return WhereNode([condition], connector=AND), [] opts = self.get_meta() alias = self.get_initial_alias() allow_many = not branch_negated or not split_subq try: join_info = self.setup_joins( parts, opts, alias, can_reuse=can_reuse, allow_many=allow_many, ) # Prevent iterator from being consumed by check_related_objects() if isinstance(value, Iterator): value = list(value) self.check_related_objects(join_info.final_field, value, join_info.opts) # split_exclude() needs to know which joins were generated for the # lookup parts self._lookup_joins = join_info.joins except MultiJoin as e: return self.split_exclude(filter_expr, can_reuse, e.names_with_path) # Update used_joins before trimming since they are reused to determine # which joins could be later promoted to INNER. used_joins.update(join_info.joins) targets, alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path) if can_reuse is not None: can_reuse.update(join_list) if join_info.final_field.is_relation: # No support for transforms for relational fields num_lookups = len(lookups) if num_lookups > 1: raise FieldError('Related Field got invalid lookup: {}'.format(lookups[0])) if len(targets) == 1: col = self._get_col(targets[0], join_info.final_field, alias) else: col = MultiColSource(alias, targets, join_info.targets, join_info.final_field) else: col = self._get_col(targets[0], join_info.final_field, alias) condition = self.build_lookup(lookups, col, value) lookup_type = condition.lookup_name clause = WhereNode([condition], connector=AND) require_outer = lookup_type == 'isnull' and condition.rhs is True and not current_negated if current_negated and (lookup_type != 'isnull' or condition.rhs is False) and condition.rhs is not None: require_outer = True if lookup_type != 'isnull': # The condition added here will be SQL like this: # NOT (col IS NOT NULL), where the first NOT is added in # upper layers of code. The reason for addition is that if col # is null, then col != someval will result in SQL "unknown" # which isn't the same as in Python. The Python None handling # is wanted, and it can be gotten by # (col IS NULL OR col != someval) # <=> # NOT (col IS NOT NULL AND col = someval). if ( self.is_nullable(targets[0]) or self.alias_map[join_list[-1]].join_type == LOUTER ): lookup_class = targets[0].get_lookup('isnull') col = self._get_col(targets[0], join_info.targets[0], alias) clause.add(lookup_class(col, False), AND) # If someval is a nullable column, someval IS NOT NULL is # added. if isinstance(value, Col) and self.is_nullable(value.target): lookup_class = value.target.get_lookup('isnull') clause.add(lookup_class(value, False), AND) return clause, used_joins if not require_outer else () def add_filter(self, filter_lhs, filter_rhs): self.add_q(Q((filter_lhs, filter_rhs))) def add_q(self, q_object): """ A preprocessor for the internal _add_q(). Responsible for doing final join promotion. """ # For join promotion this case is doing an AND for the added q_object # and existing conditions. So, any existing inner join forces the join # type to remain inner. Existing outer joins can however be demoted. # (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if # rel_a doesn't produce any rows, then the whole condition must fail. # So, demotion is OK. existing_inner = {a for a in self.alias_map if self.alias_map[a].join_type == INNER} clause, _ = self._add_q(q_object, self.used_aliases) if clause: self.where.add(clause, AND) self.demote_joins(existing_inner) def build_where(self, filter_expr): return self.build_filter(filter_expr, allow_joins=False)[0] def clear_where(self): self.where = WhereNode() def _add_q(self, q_object, used_aliases, branch_negated=False, current_negated=False, allow_joins=True, split_subq=True, check_filterable=True): """Add a Q-object to the current filter.""" connector = q_object.connector current_negated = current_negated ^ q_object.negated branch_negated = branch_negated or q_object.negated target_clause = WhereNode(connector=connector, negated=q_object.negated) joinpromoter = JoinPromoter(q_object.connector, len(q_object.children), current_negated) for child in q_object.children: child_clause, needed_inner = self.build_filter( child, can_reuse=used_aliases, branch_negated=branch_negated, current_negated=current_negated, allow_joins=allow_joins, split_subq=split_subq, check_filterable=check_filterable, ) joinpromoter.add_votes(needed_inner) if child_clause: target_clause.add(child_clause, connector) needed_inner = joinpromoter.update_join_types(self) return target_clause, needed_inner def build_filtered_relation_q(self, q_object, reuse, branch_negated=False, current_negated=False): """Add a FilteredRelation object to the current filter.""" connector = q_object.connector current_negated ^= q_object.negated branch_negated = branch_negated or q_object.negated target_clause = WhereNode(connector=connector, negated=q_object.negated) for child in q_object.children: if isinstance(child, Node): child_clause = self.build_filtered_relation_q( child, reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, ) else: child_clause, _ = self.build_filter( child, can_reuse=reuse, branch_negated=branch_negated, current_negated=current_negated, allow_joins=True, split_subq=False, ) target_clause.add(child_clause, connector) return target_clause def add_filtered_relation(self, filtered_relation, alias): filtered_relation.alias = alias lookups = dict(get_children_from_q(filtered_relation.condition)) relation_lookup_parts, relation_field_parts, _ = self.solve_lookup_type(filtered_relation.relation_name) if relation_lookup_parts: raise ValueError( "FilteredRelation's relation_name cannot contain lookups " "(got %r)." % filtered_relation.relation_name ) for lookup in chain(lookups): lookup_parts, lookup_field_parts, _ = self.solve_lookup_type(lookup) shift = 2 if not lookup_parts else 1 lookup_field_path = lookup_field_parts[:-shift] for idx, lookup_field_part in enumerate(lookup_field_path): if len(relation_field_parts) > idx: if relation_field_parts[idx] != lookup_field_part: raise ValueError( "FilteredRelation's condition doesn't support " "relations outside the %r (got %r)." % (filtered_relation.relation_name, lookup) ) else: raise ValueError( "FilteredRelation's condition doesn't support nested " "relations deeper than the relation_name (got %r for " "%r)." % (lookup, filtered_relation.relation_name) ) self._filtered_relations[filtered_relation.alias] = filtered_relation def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False): """ Walk the list of names and turns them into PathInfo tuples. A single name in 'names' can generate multiple PathInfos (m2m, for example). 'names' is the path of names to travel, 'opts' is the model Options we start the name resolving from, 'allow_many' is as for setup_joins(). If fail_on_missing is set to True, then a name that can't be resolved will generate a FieldError. Return a list of PathInfo tuples. In addition return the final field (the last used join field) and target (which is a field guaranteed to contain the same value as the final field). Finally, return those names that weren't found (which are likely transforms and the final lookup). """ path, names_with_path = [], [] for pos, name in enumerate(names): cur_names_with_path = (name, []) if name == 'pk': name = opts.pk.name field = None filtered_relation = None try: field = opts.get_field(name) except FieldDoesNotExist: if name in self.annotation_select: field = self.annotation_select[name].output_field elif name in self._filtered_relations and pos == 0: filtered_relation = self._filtered_relations[name] if LOOKUP_SEP in filtered_relation.relation_name: parts = filtered_relation.relation_name.split(LOOKUP_SEP) filtered_relation_path, field, _, _ = self.names_to_path( parts, opts, allow_many, fail_on_missing, ) path.extend(filtered_relation_path[:-1]) else: field = opts.get_field(filtered_relation.relation_name) if field is not None: # Fields that contain one-to-many relations with a generic # model (like a GenericForeignKey) cannot generate reverse # relations and therefore cannot be used for reverse querying. if field.is_relation and not field.related_model: raise FieldError( "Field %r does not generate an automatic reverse " "relation and therefore cannot be used for reverse " "querying. If it is a GenericForeignKey, consider " "adding a GenericRelation." % name ) try: model = field.model._meta.concrete_model except AttributeError: # QuerySet.annotate() may introduce fields that aren't # attached to a model. model = None else: # We didn't find the current field, so move position back # one step. pos -= 1 if pos == -1 or fail_on_missing: available = sorted([ *get_field_names_from_opts(opts), *self.annotation_select, *self._filtered_relations, ]) raise FieldError("Cannot resolve keyword '%s' into field. " "Choices are: %s" % (name, ", ".join(available))) break # Check if we need any joins for concrete inheritance cases (the # field lives in parent, but we are currently in one of its # children) if model is not opts.model: path_to_parent = opts.get_path_to_parent(model) if path_to_parent: path.extend(path_to_parent) cur_names_with_path[1].extend(path_to_parent) opts = path_to_parent[-1].to_opts if hasattr(field, 'get_path_info'): pathinfos = field.get_path_info(filtered_relation) if not allow_many: for inner_pos, p in enumerate(pathinfos): if p.m2m: cur_names_with_path[1].extend(pathinfos[0:inner_pos + 1]) names_with_path.append(cur_names_with_path) raise MultiJoin(pos + 1, names_with_path) last = pathinfos[-1] path.extend(pathinfos) final_field = last.join_field opts = last.to_opts targets = last.target_fields cur_names_with_path[1].extend(pathinfos) names_with_path.append(cur_names_with_path) else: # Local non-relational field. final_field = field targets = (field,) if fail_on_missing and pos + 1 != len(names): raise FieldError( "Cannot resolve keyword %r into field. Join on '%s'" " not permitted." % (names[pos + 1], name)) break return path, final_field, targets, names[pos + 1:] def setup_joins(self, names, opts, alias, can_reuse=None, allow_many=True): """ Compute the necessary table joins for the passage through the fields given in 'names'. 'opts' is the Options class for the current model (which gives the table we are starting from), 'alias' is the alias for the table to start the joining from. The 'can_reuse' defines the reverse foreign key joins we can reuse. It can be None in which case all joins are reusable or a set of aliases that can be reused. Note that non-reverse foreign keys are always reusable when using setup_joins(). If 'allow_many' is False, then any reverse foreign key seen will generate a MultiJoin exception. Return the final field involved in the joins, the target field (used for any 'where' constraint), the final 'opts' value, the joins, the field path traveled to generate the joins, and a transform function that takes a field and alias and is equivalent to `field.get_col(alias)` in the simple case but wraps field transforms if they were included in names. The target field is the field containing the concrete value. Final field can be something different, for example foreign key pointing to that value. Final field is needed for example in some value conversions (convert 'obj' in fk__id=obj to pk val using the foreign key field for example). """ joins = [alias] # The transform can't be applied yet, as joins must be trimmed later. # To avoid making every caller of this method look up transforms # directly, compute transforms here and create a partial that converts # fields to the appropriate wrapped version. def final_transformer(field, alias): if not self.alias_cols: alias = None return field.get_col(alias) # Try resolving all the names as fields first. If there's an error, # treat trailing names as lookups until a field can be resolved. last_field_exception = None for pivot in range(len(names), 0, -1): try: path, final_field, targets, rest = self.names_to_path( names[:pivot], opts, allow_many, fail_on_missing=True, ) except FieldError as exc: if pivot == 1: # The first item cannot be a lookup, so it's safe # to raise the field error here. raise else: last_field_exception = exc else: # The transforms are the remaining items that couldn't be # resolved into fields. transforms = names[pivot:] break for name in transforms: def transform(field, alias, *, name, previous): try: wrapped = previous(field, alias) return self.try_transform(wrapped, name) except FieldError: # FieldError is raised if the transform doesn't exist. if isinstance(final_field, Field) and last_field_exception: raise last_field_exception else: raise final_transformer = functools.partial(transform, name=name, previous=final_transformer) # Then, add the path to the query's joins. Note that we can't trim # joins at this stage - we will need the information about join type # of the trimmed joins. for join in path: if join.filtered_relation: filtered_relation = join.filtered_relation.clone() table_alias = filtered_relation.alias else: filtered_relation = None table_alias = None opts = join.to_opts if join.direct: nullable = self.is_nullable(join.join_field) else: nullable = True connection = Join( opts.db_table, alias, table_alias, INNER, join.join_field, nullable, filtered_relation=filtered_relation, ) reuse = can_reuse if join.m2m else None alias = self.join(connection, reuse=reuse) joins.append(alias) if filtered_relation: filtered_relation.path = joins[:] return JoinInfo(final_field, targets, opts, joins, path, final_transformer) def trim_joins(self, targets, joins, path): """ The 'target' parameter is the final field being joined to, 'joins' is the full list of join aliases. The 'path' contain the PathInfos used to create the joins. Return the final target field and table alias and the new active joins. Always trim any direct join if the target column is already in the previous table. Can't trim reverse joins as it's unknown if there's anything on the other side of the join. """ joins = joins[:] for pos, info in enumerate(reversed(path)): if len(joins) == 1 or not info.direct: break if info.filtered_relation: break join_targets = {t.column for t in info.join_field.foreign_related_fields} cur_targets = {t.column for t in targets} if not cur_targets.issubset(join_targets): break targets_dict = {r[1].column: r[0] for r in info.join_field.related_fields if r[1].column in cur_targets} targets = tuple(targets_dict[t.column] for t in targets) self.unref_alias(joins.pop()) return targets, joins[-1], joins @classmethod def _gen_cols(cls, exprs, include_external=False): for expr in exprs: if isinstance(expr, Col): yield expr elif include_external and callable(getattr(expr, 'get_external_cols', None)): yield from expr.get_external_cols() else: yield from cls._gen_cols( expr.get_source_expressions(), include_external=include_external, ) @classmethod def _gen_col_aliases(cls, exprs): yield from (expr.alias for expr in cls._gen_cols(exprs)) def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False): annotation = self.annotations.get(name) if annotation is not None: if not allow_joins: for alias in self._gen_col_aliases([annotation]): if isinstance(self.alias_map[alias], Join): raise FieldError( 'Joined field references are not permitted in ' 'this query' ) if summarize: # Summarize currently means we are doing an aggregate() query # which is executed as a wrapped subquery if any of the # aggregate() elements reference an existing annotation. In # that case we need to return a Ref to the subquery's annotation. if name not in self.annotation_select: raise FieldError( "Cannot aggregate over the '%s' alias. Use annotate() " "to promote it." % name ) return Ref(name, self.annotation_select[name]) else: return annotation else: field_list = name.split(LOOKUP_SEP) annotation = self.annotations.get(field_list[0]) if annotation is not None: for transform in field_list[1:]: annotation = self.try_transform(annotation, transform) return annotation join_info = self.setup_joins(field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse) targets, final_alias, join_list = self.trim_joins(join_info.targets, join_info.joins, join_info.path) if not allow_joins and len(join_list) > 1: raise FieldError('Joined field references are not permitted in this query') if len(targets) > 1: raise FieldError("Referencing multicolumn fields with F() objects " "isn't supported") # Verify that the last lookup in name is a field or a transform: # transform_function() raises FieldError if not. transform = join_info.transform_function(targets[0], final_alias) if reuse is not None: reuse.update(join_list) return transform def split_exclude(self, filter_expr, can_reuse, names_with_path): """ When doing an exclude against any kind of N-to-many relation, we need to use a subquery. This method constructs the nested query, given the original exclude filter (filter_expr) and the portion up to the first N-to-many relation field. For example, if the origin filter is ~Q(child__name='foo'), filter_expr is ('child__name', 'foo') and can_reuse is a set of joins usable for filters in the original query. We will turn this into equivalent of: WHERE NOT EXISTS( SELECT 1 FROM child WHERE name = 'foo' AND child.parent_id = parent.id LIMIT 1 ) """ # Generate the inner query. query = Query(self.model) query._filtered_relations = self._filtered_relations filter_lhs, filter_rhs = filter_expr if isinstance(filter_rhs, OuterRef): filter_rhs = OuterRef(filter_rhs) elif isinstance(filter_rhs, F): filter_rhs = OuterRef(filter_rhs.name) query.add_filter(filter_lhs, filter_rhs) query.clear_ordering(force=True) # Try to have as simple as possible subquery -> trim leading joins from # the subquery. trimmed_prefix, contains_louter = query.trim_start(names_with_path) col = query.select[0] select_field = col.target alias = col.alias if alias in can_reuse: pk = select_field.model._meta.pk # Need to add a restriction so that outer query's filters are in effect for # the subquery, too. query.bump_prefix(self) lookup_class = select_field.get_lookup('exact') # Note that the query.select[0].alias is different from alias # due to bump_prefix above. lookup = lookup_class(pk.get_col(query.select[0].alias), pk.get_col(alias)) query.where.add(lookup, AND) query.external_aliases[alias] = True lookup_class = select_field.get_lookup('exact') lookup = lookup_class(col, ResolvedOuterRef(trimmed_prefix)) query.where.add(lookup, AND) condition, needed_inner = self.build_filter(Exists(query)) if contains_louter: or_null_condition, _ = self.build_filter( ('%s__isnull' % trimmed_prefix, True), current_negated=True, branch_negated=True, can_reuse=can_reuse) condition.add(or_null_condition, OR) # Note that the end result will be: # (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL. # This might look crazy but due to how IN works, this seems to be # correct. If the IS NOT NULL check is removed then outercol NOT # IN will return UNKNOWN. If the IS NULL check is removed, then if # outercol IS NULL we will not match the row. return condition, needed_inner def set_empty(self): self.where.add(NothingNode(), AND) for query in self.combined_queries: query.set_empty() def is_empty(self): return any(isinstance(c, NothingNode) for c in self.where.children) def set_limits(self, low=None, high=None): """ Adjust the limits on the rows retrieved. Use low/high to set these, as it makes it more Pythonic to read and write. When the SQL query is created, convert them to the appropriate offset and limit values. Apply any limits passed in here to the existing constraints. Add low to the current low value and clamp both to any existing high value. """ if high is not None: if self.high_mark is not None: self.high_mark = min(self.high_mark, self.low_mark + high) else: self.high_mark = self.low_mark + high if low is not None: if self.high_mark is not None: self.low_mark = min(self.high_mark, self.low_mark + low) else: self.low_mark = self.low_mark + low if self.low_mark == self.high_mark: self.set_empty() def clear_limits(self): """Clear any existing limits.""" self.low_mark, self.high_mark = 0, None @property def is_sliced(self): return self.low_mark != 0 or self.high_mark is not None def has_limit_one(self): return self.high_mark is not None and (self.high_mark - self.low_mark) == 1 def can_filter(self): """ Return True if adding filters to this instance is still possible. Typically, this means no limits or offsets have been put on the results. """ return not self.is_sliced def clear_select_clause(self): """Remove all fields from SELECT clause.""" self.select = () self.default_cols = False self.select_related = False self.set_extra_mask(()) self.set_annotation_mask(()) def clear_select_fields(self): """ Clear the list of fields to select (but not extra_select columns). Some queryset types completely replace any existing list of select columns. """ self.select = () self.values_select = () def add_select_col(self, col, name): self.select += col, self.values_select += name, def set_select(self, cols): self.default_cols = False self.select = tuple(cols) def add_distinct_fields(self, *field_names): """ Add and resolve the given fields to the query's "distinct on" clause. """ self.distinct_fields = field_names self.distinct = True def add_fields(self, field_names, allow_m2m=True): """ Add the given (model) fields to the select set. Add the field names in the order specified. """ alias = self.get_initial_alias() opts = self.get_meta() try: cols = [] for name in field_names: # Join promotion note - we must not remove any rows here, so # if there is no existing joins, use outer join. join_info = self.setup_joins(name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m) targets, final_alias, joins = self.trim_joins( join_info.targets, join_info.joins, join_info.path, ) for target in targets: cols.append(join_info.transform_function(target, final_alias)) if cols: self.set_select(cols) except MultiJoin: raise FieldError("Invalid field name: '%s'" % name) except FieldError: if LOOKUP_SEP in name: # For lookups spanning over relationships, show the error # from the model on which the lookup failed. raise elif name in self.annotations: raise FieldError( "Cannot select the '%s' alias. Use annotate() to promote " "it." % name ) else: names = sorted([ *get_field_names_from_opts(opts), *self.extra, *self.annotation_select, *self._filtered_relations ]) raise FieldError("Cannot resolve keyword %r into field. " "Choices are: %s" % (name, ", ".join(names))) def add_ordering(self, *ordering): """ Add items from the 'ordering' sequence to the query's "order by" clause. These items are either field names (not column names) -- possibly with a direction prefix ('-' or '?') -- or OrderBy expressions. If 'ordering' is empty, clear all ordering from the query. """ errors = [] for item in ordering: if isinstance(item, str): if item == '?': continue if item.startswith('-'): item = item[1:] if item in self.annotations: continue if self.extra and item in self.extra: continue # names_to_path() validates the lookup. A descriptive # FieldError will be raise if it's not. self.names_to_path(item.split(LOOKUP_SEP), self.model._meta) elif not hasattr(item, 'resolve_expression'): errors.append(item) if getattr(item, 'contains_aggregate', False): raise FieldError( 'Using an aggregate in order_by() without also including ' 'it in annotate() is not allowed: %s' % item ) if errors: raise FieldError('Invalid order_by arguments: %s' % errors) if ordering: self.order_by += ordering else: self.default_ordering = False def clear_ordering(self, force=False, clear_default=True): """ Remove any ordering settings if the current query allows it without side effects, set 'force' to True to clear the ordering regardless. If 'clear_default' is True, there will be no ordering in the resulting query (not even the model's default). """ if not force and (self.is_sliced or self.distinct_fields or self.select_for_update): return self.order_by = () self.extra_order_by = () if clear_default: self.default_ordering = False def set_group_by(self, allow_aliases=True): """ Expand the GROUP BY clause required by the query. This will usually be the set of all non-aggregate fields in the return data. If the database backend supports grouping by the primary key, and the query would be equivalent, the optimization will be made automatically. """ # Column names from JOINs to check collisions with aliases. if allow_aliases: column_names = set() seen_models = set() for join in list(self.alias_map.values())[1:]: # Skip base table. model = join.join_field.related_model if model not in seen_models: column_names.update({ field.column for field in model._meta.local_concrete_fields }) seen_models.add(model) group_by = list(self.select) if self.annotation_select: for alias, annotation in self.annotation_select.items(): if not allow_aliases or alias in column_names: alias = None group_by_cols = annotation.get_group_by_cols(alias=alias) group_by.extend(group_by_cols) self.group_by = tuple(group_by) def add_select_related(self, fields): """ Set up the select_related data structure so that we only select certain related models (as opposed to all models, when self.select_related=True). """ if isinstance(self.select_related, bool): field_dict = {} else: field_dict = self.select_related for field in fields: d = field_dict for part in field.split(LOOKUP_SEP): d = d.setdefault(part, {}) self.select_related = field_dict def add_extra(self, select, select_params, where, params, tables, order_by): """ Add data to the various extra_* attributes for user-created additions to the query. """ if select: # We need to pair any placeholder markers in the 'select' # dictionary with their parameters in 'select_params' so that # subsequent updates to the select dictionary also adjust the # parameters appropriately. select_pairs = {} if select_params: param_iter = iter(select_params) else: param_iter = iter([]) for name, entry in select.items(): entry = str(entry) entry_params = [] pos = entry.find("%s") while pos != -1: if pos == 0 or entry[pos - 1] != '%': entry_params.append(next(param_iter)) pos = entry.find("%s", pos + 2) select_pairs[name] = (entry, entry_params) self.extra.update(select_pairs) if where or params: self.where.add(ExtraWhere(where, params), AND) if tables: self.extra_tables += tuple(tables) if order_by: self.extra_order_by = order_by def clear_deferred_loading(self): """Remove any fields from the deferred loading set.""" self.deferred_loading = (frozenset(), True) def add_deferred_loading(self, field_names): """ Add the given list of model field names to the set of fields to exclude from loading from the database when automatic column selection is done. Add the new field names to any existing field names that are deferred (or removed from any existing field names that are marked as the only ones for immediate loading). """ # Fields on related models are stored in the literal double-underscore # format, so that we can use a set datastructure. We do the foo__bar # splitting and handling when computing the SQL column names (as part of # get_columns()). existing, defer = self.deferred_loading if defer: # Add to existing deferred names. self.deferred_loading = existing.union(field_names), True else: # Remove names from the set of any existing "immediate load" names. if new_existing := existing.difference(field_names): self.deferred_loading = new_existing, False else: self.clear_deferred_loading() if new_only := set(field_names).difference(existing): self.deferred_loading = new_only, True def add_immediate_loading(self, field_names): """ Add the given list of model field names to the set of fields to retrieve when the SQL is executed ("immediate loading" fields). The field names replace any existing immediate loading field names. If there are field names already specified for deferred loading, remove those names from the new field_names before storing the new names for immediate loading. (That is, immediate loading overrides any existing immediate values, but respects existing deferrals.) """ existing, defer = self.deferred_loading field_names = set(field_names) if 'pk' in field_names: field_names.remove('pk') field_names.add(self.get_meta().pk.name) if defer: # Remove any existing deferred names from the current set before # setting the new names. self.deferred_loading = field_names.difference(existing), False else: # Replace any existing "immediate load" field names. self.deferred_loading = frozenset(field_names), False def get_loaded_field_names(self): """ If any fields are marked to be deferred, return a dictionary mapping models to a set of names in those fields that will be loaded. If a model is not in the returned dictionary, none of its fields are deferred. If no fields are marked for deferral, return an empty dictionary. """ # We cache this because we call this function multiple times # (compiler.fill_related_selections, query.iterator) try: return self._loaded_field_names_cache except AttributeError: collection = {} self.deferred_to_data(collection, self.get_loaded_field_names_cb) self._loaded_field_names_cache = collection return collection def get_loaded_field_names_cb(self, target, model, fields): """Callback used by get_deferred_field_names().""" target[model] = {f.attname for f in fields} def set_annotation_mask(self, names): """Set the mask of annotations that will be returned by the SELECT.""" if names is None: self.annotation_select_mask = None else: self.annotation_select_mask = set(names) self._annotation_select_cache = None def append_annotation_mask(self, names): if self.annotation_select_mask is not None: self.set_annotation_mask(self.annotation_select_mask.union(names)) def set_extra_mask(self, names): """ Set the mask of extra select items that will be returned by SELECT. Don't remove them from the Query since they might be used later. """ if names is None: self.extra_select_mask = None else: self.extra_select_mask = set(names) self._extra_select_cache = None def set_values(self, fields): self.select_related = False self.clear_deferred_loading() self.clear_select_fields() if fields: field_names = [] extra_names = [] annotation_names = [] if not self.extra and not self.annotations: # Shortcut - if there are no extra or annotations, then # the values() clause must be just field names. field_names = list(fields) else: self.default_cols = False for f in fields: if f in self.extra_select: extra_names.append(f) elif f in self.annotation_select: annotation_names.append(f) else: field_names.append(f) self.set_extra_mask(extra_names) self.set_annotation_mask(annotation_names) selected = frozenset(field_names + extra_names + annotation_names) else: field_names = [f.attname for f in self.model._meta.concrete_fields] selected = frozenset(field_names) # Selected annotations must be known before setting the GROUP BY # clause. if self.group_by is True: self.add_fields((f.attname for f in self.model._meta.concrete_fields), False) # Disable GROUP BY aliases to avoid orphaning references to the # SELECT clause which is about to be cleared. self.set_group_by(allow_aliases=False) self.clear_select_fields() elif self.group_by: # Resolve GROUP BY annotation references if they are not part of # the selected fields anymore. group_by = [] for expr in self.group_by: if isinstance(expr, Ref) and expr.refs not in selected: expr = self.annotations[expr.refs] group_by.append(expr) self.group_by = tuple(group_by) self.values_select = tuple(field_names) self.add_fields(field_names, True) @property def annotation_select(self): """ Return the dictionary of aggregate columns that are not masked and should be used in the SELECT clause. Cache this result for performance. """ if self._annotation_select_cache is not None: return self._annotation_select_cache elif not self.annotations: return {} elif self.annotation_select_mask is not None: self._annotation_select_cache = { k: v for k, v in self.annotations.items() if k in self.annotation_select_mask } return self._annotation_select_cache else: return self.annotations @property def extra_select(self): if self._extra_select_cache is not None: return self._extra_select_cache if not self.extra: return {} elif self.extra_select_mask is not None: self._extra_select_cache = { k: v for k, v in self.extra.items() if k in self.extra_select_mask } return self._extra_select_cache else: return self.extra def trim_start(self, names_with_path): """ Trim joins from the start of the join path. The candidates for trim are the PathInfos in names_with_path structure that are m2m joins. Also set the select column so the start matches the join. This method is meant to be used for generating the subquery joins & cols in split_exclude(). Return a lookup usable for doing outerq.filter(lookup=self) and a boolean indicating if the joins in the prefix contain a LEFT OUTER join. _""" all_paths = [] for _, paths in names_with_path: all_paths.extend(paths) contains_louter = False # Trim and operate only on tables that were generated for # the lookup part of the query. That is, avoid trimming # joins generated for F() expressions. lookup_tables = [ t for t in self.alias_map if t in self._lookup_joins or t == self.base_table ] for trimmed_paths, path in enumerate(all_paths): if path.m2m: break if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER: contains_louter = True alias = lookup_tables[trimmed_paths] self.unref_alias(alias) # The path.join_field is a Rel, lets get the other side's field join_field = path.join_field.field # Build the filter prefix. paths_in_prefix = trimmed_paths trimmed_prefix = [] for name, path in names_with_path: if paths_in_prefix - len(path) < 0: break trimmed_prefix.append(name) paths_in_prefix -= len(path) trimmed_prefix.append( join_field.foreign_related_fields[0].name) trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix) # Lets still see if we can trim the first join from the inner query # (that is, self). We can't do this for: # - LEFT JOINs because we would miss those rows that have nothing on # the outer side, # - INNER JOINs from filtered relations because we would miss their # filters. first_join = self.alias_map[lookup_tables[trimmed_paths + 1]] if first_join.join_type != LOUTER and not first_join.filtered_relation: select_fields = [r[0] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths + 1] self.unref_alias(lookup_tables[trimmed_paths]) extra_restriction = join_field.get_extra_restriction(None, lookup_tables[trimmed_paths + 1]) if extra_restriction: self.where.add(extra_restriction, AND) else: # TODO: It might be possible to trim more joins from the start of the # inner query if it happens to have a longer join chain containing the # values in select_fields. Lets punt this one for now. select_fields = [r[1] for r in join_field.related_fields] select_alias = lookup_tables[trimmed_paths] # The found starting point is likely a Join instead of a BaseTable reference. # But the first entry in the query's FROM clause must not be a JOIN. for table in self.alias_map: if self.alias_refcount[table] > 0: self.alias_map[table] = BaseTable(self.alias_map[table].table_name, table) break self.set_select([f.get_col(select_alias) for f in select_fields]) return trimmed_prefix, contains_louter def is_nullable(self, field): """ Check if the given field should be treated as nullable. Some backends treat '' as null and Django treats such fields as nullable for those backends. In such situations field.null can be False even if we should treat the field as nullable. """ # We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have # (nor should it have) knowledge of which connection is going to be # used. The proper fix would be to defer all decisions where # is_nullable() is needed to the compiler stage, but that is not easy # to do currently. return ( connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and field.empty_strings_allowed ) or field.null def get_order_dir(field, default='ASC'): """ Return the field name and direction for an order specification. For example, '-foo' is returned as ('foo', 'DESC'). The 'default' param is used to indicate which way no prefix (or a '+' prefix) should sort. The '-' prefix always sorts the opposite way. """ dirn = ORDER_DIR[default] if field[0] == '-': return field[1:], dirn[1] return field, dirn[0] def add_to_dict(data, key, value): """ Add "value" to the set of values for "key", whether or not "key" already exists. """ if key in data: data[key].add(value) else: data[key] = {value} def is_reverse_o2o(field): """ Check if the given field is reverse-o2o. The field is expected to be some sort of relation field or related object. """ return field.is_relation and field.one_to_one and not field.concrete class JoinPromoter: """ A class to abstract away join promotion problems for complex filter conditions. """ def __init__(self, connector, num_children, negated): self.connector = connector self.negated = negated if self.negated: if connector == AND: self.effective_connector = OR else: self.effective_connector = AND else: self.effective_connector = self.connector self.num_children = num_children # Maps of table alias to how many times it is seen as required for # inner and/or outer joins. self.votes = Counter() def add_votes(self, votes): """ Add single vote per item to self.votes. Parameter can be any iterable. """ self.votes.update(votes) def update_join_types(self, query): """ Change join types so that the generated query is as efficient as possible, but still correct. So, change as many joins as possible to INNER, but don't make OUTER joins INNER if that could remove results from the query. """ to_promote = set() to_demote = set() # The effective_connector is used so that NOT (a AND b) is treated # similarly to (a OR b) for join promotion. for table, votes in self.votes.items(): # We must use outer joins in OR case when the join isn't contained # in all of the joins. Otherwise the INNER JOIN itself could remove # valid results. Consider the case where a model with rel_a and # rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now, # if rel_a join doesn't produce any results is null (for example # reverse foreign key or null value in direct foreign key), and # there is a matching row in rel_b with col=2, then an INNER join # to rel_a would remove a valid match from the query. So, we need # to promote any existing INNER to LOUTER (it is possible this # promotion in turn will be demoted later on). if self.effective_connector == 'OR' and votes < self.num_children: to_promote.add(table) # If connector is AND and there is a filter that can match only # when there is a joinable row, then use INNER. For example, in # rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL # as join output, then the col=1 or col=2 can't match (as # NULL=anything is always false). # For the OR case, if all children voted for a join to be inner, # then we can use INNER for the join. For example: # (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell) # then if rel_a doesn't produce any rows, the whole condition # can't match. Hence we can safely use INNER join. if self.effective_connector == 'AND' or ( self.effective_connector == 'OR' and votes == self.num_children): to_demote.add(table) # Finally, what happens in cases where we have: # (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0 # Now, we first generate the OR clause, and promote joins for it # in the first if branch above. Both rel_a and rel_b are promoted # to LOUTER joins. After that we do the AND case. The OR case # voted no inner joins but the rel_a__col__gte=0 votes inner join # for rel_a. We demote it back to INNER join (in AND case a single # vote is enough). The demotion is OK, if rel_a doesn't produce # rows, then the rel_a__col__gte=0 clause can't be true, and thus # the whole clause must be false. So, it is safe to use INNER # join. # Note that in this example we could just as well have the __gte # clause and the OR clause swapped. Or we could replace the __gte # clause with an OR clause containing rel_a__col=1|rel_a__col=2, # and again we could safely demote to INNER. query.promote_joins(to_promote) query.demote_joins(to_demote) return to_demote
a75d17444449fa850c328f58a9c987d35382b0d63b1767bcc63b143f924ec2d8
import collections import json import re from functools import partial from itertools import chain from django.core.exceptions import EmptyResultSet, FieldError from django.db import DatabaseError, NotSupportedError from django.db.models.constants import LOOKUP_SEP from django.db.models.expressions import F, OrderBy, RawSQL, Ref, Value from django.db.models.functions import Cast, Random from django.db.models.query_utils import select_related_descend from django.db.models.sql.constants import ( CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE, ) from django.db.models.sql.query import Query, get_order_dir from django.db.transaction import TransactionManagementError from django.utils.functional import cached_property from django.utils.hashable import make_hashable from django.utils.regex_helper import _lazy_re_compile class SQLCompiler: # Multiline ordering SQL clause may appear from RawSQL. ordering_parts = _lazy_re_compile( r'^(.*)\s(?:ASC|DESC).*', re.MULTILINE | re.DOTALL, ) def __init__(self, query, connection, using, elide_empty=True): self.query = query self.connection = connection self.using = using # Some queries, e.g. coalesced aggregation, need to be executed even if # they would return an empty result set. self.elide_empty = elide_empty self.quote_cache = {'*': '*'} # The select, klass_info, and annotations are needed by QuerySet.iterator() # these are set as a side-effect of executing the query. Note that we calculate # separately a list of extra select columns needed for grammatical correctness # of the query, but these columns are not included in self.select. self.select = None self.annotation_col_map = None self.klass_info = None self._meta_ordering = None def setup_query(self): if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map): self.query.get_initial_alias() self.select, self.klass_info, self.annotation_col_map = self.get_select() self.col_count = len(self.select) def pre_sql_setup(self): """ Do any necessary class setup immediately prior to producing SQL. This is for things that can't necessarily be done in __init__ because we might not have all the pieces in place at that time. """ self.setup_query() order_by = self.get_order_by() self.where, self.having = self.query.where.split_having() extra_select = self.get_extra_select(order_by, self.select) self.has_extra_select = bool(extra_select) group_by = self.get_group_by(self.select + extra_select, order_by) return extra_select, order_by, group_by def get_group_by(self, select, order_by): """ Return a list of 2-tuples of form (sql, params). The logic of what exactly the GROUP BY clause contains is hard to describe in other words than "if it passes the test suite, then it is correct". """ # Some examples: # SomeModel.objects.annotate(Count('somecol')) # GROUP BY: all fields of the model # # SomeModel.objects.values('name').annotate(Count('somecol')) # GROUP BY: name # # SomeModel.objects.annotate(Count('somecol')).values('name') # GROUP BY: all cols of the model # # SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # SomeModel.objects.values('name').annotate(Count('somecol')).values('pk') # GROUP BY: name, pk # # In fact, the self.query.group_by is the minimal set to GROUP BY. It # can't be ever restricted to a smaller set, but additional columns in # HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately # the end result is that it is impossible to force the query to have # a chosen GROUP BY clause - you can almost do this by using the form: # .values(*wanted_cols).annotate(AnAggregate()) # but any later annotations, extra selects, values calls that # refer some column outside of the wanted_cols, order_by, or even # filter calls can alter the GROUP BY clause. # The query.group_by is either None (no GROUP BY at all), True # (group by select fields), or a list of expressions to be added # to the group by. if self.query.group_by is None: return [] expressions = [] if self.query.group_by is not True: # If the group by is set to a list (by .values() call most likely), # then we need to add everything in it to the GROUP BY clause. # Backwards compatibility hack for setting query.group_by. Remove # when we have public API way of forcing the GROUP BY clause. # Converts string references to expressions. for expr in self.query.group_by: if not hasattr(expr, 'as_sql'): expressions.append(self.query.resolve_ref(expr)) else: expressions.append(expr) # Note that even if the group_by is set, it is only the minimal # set to group by. So, we need to add cols in select, order_by, and # having into the select in any case. ref_sources = { expr.source for expr in expressions if isinstance(expr, Ref) } for expr, _, _ in select: # Skip members of the select clause that are already included # by reference. if expr in ref_sources: continue cols = expr.get_group_by_cols() for col in cols: expressions.append(col) if not self._meta_ordering: for expr, (sql, params, is_ref) in order_by: # Skip references to the SELECT clause, as all expressions in # the SELECT clause are already part of the GROUP BY. if not is_ref: expressions.extend(expr.get_group_by_cols()) having_group_by = self.having.get_group_by_cols() if self.having else () for expr in having_group_by: expressions.append(expr) result = [] seen = set() expressions = self.collapse_group_by(expressions, having_group_by) for expr in expressions: sql, params = self.compile(expr) sql, params = expr.select_format(self, sql, params) params_hash = make_hashable(params) if (sql, params_hash) not in seen: result.append((sql, params)) seen.add((sql, params_hash)) return result def collapse_group_by(self, expressions, having): # If the DB can group by primary key, then group by the primary key of # query's main model. Note that for PostgreSQL the GROUP BY clause must # include the primary key of every table, but for MySQL it is enough to # have the main table's primary key. if self.connection.features.allows_group_by_pk: # Determine if the main model's primary key is in the query. pk = None for expr in expressions: # Is this a reference to query's base table primary key? If the # expression isn't a Col-like, then skip the expression. if (getattr(expr, 'target', None) == self.query.model._meta.pk and getattr(expr, 'alias', None) == self.query.base_table): pk = expr break # If the main model's primary key is in the query, group by that # field, HAVING expressions, and expressions associated with tables # that don't have a primary key included in the grouped columns. if pk: pk_aliases = { expr.alias for expr in expressions if hasattr(expr, 'target') and expr.target.primary_key } expressions = [pk] + [ expr for expr in expressions if expr in having or ( getattr(expr, 'alias', None) is not None and expr.alias not in pk_aliases ) ] elif self.connection.features.allows_group_by_selected_pks: # Filter out all expressions associated with a table's primary key # present in the grouped columns. This is done by identifying all # tables that have their primary key included in the grouped # columns and removing non-primary key columns referring to them. # Unmanaged models are excluded because they could be representing # database views on which the optimization might not be allowed. pks = { expr for expr in expressions if ( hasattr(expr, 'target') and expr.target.primary_key and self.connection.features.allows_group_by_selected_pks_on_model(expr.target.model) ) } aliases = {expr.alias for expr in pks} expressions = [ expr for expr in expressions if expr in pks or getattr(expr, 'alias', None) not in aliases ] return expressions def get_select(self): """ Return three values: - a list of 3-tuples of (expression, (sql, params), alias) - a klass_info structure, - a dictionary of annotations The (sql, params) is what the expression will produce, and alias is the "AS alias" for the column (possibly None). The klass_info structure contains the following information: - The base model of the query. - Which columns for that model are present in the query (by position of the select clause). - related_klass_infos: [f, klass_info] to descent into The annotations is a dictionary of {'attname': column position} values. """ select = [] klass_info = None annotations = {} select_idx = 0 for alias, (sql, params) in self.query.extra_select.items(): annotations[alias] = select_idx select.append((RawSQL(sql, params), alias)) select_idx += 1 assert not (self.query.select and self.query.default_cols) if self.query.default_cols: cols = self.get_default_columns() else: # self.query.select is a special case. These columns never go to # any model. cols = self.query.select if cols: select_list = [] for col in cols: select_list.append(select_idx) select.append((col, None)) select_idx += 1 klass_info = { 'model': self.query.model, 'select_fields': select_list, } for alias, annotation in self.query.annotation_select.items(): annotations[alias] = select_idx select.append((annotation, alias)) select_idx += 1 if self.query.select_related: related_klass_infos = self.get_related_selections(select) klass_info['related_klass_infos'] = related_klass_infos def get_select_from_parent(klass_info): for ki in klass_info['related_klass_infos']: if ki['from_parent']: ki['select_fields'] = (klass_info['select_fields'] + ki['select_fields']) get_select_from_parent(ki) get_select_from_parent(klass_info) ret = [] for col, alias in select: try: sql, params = self.compile(col) except EmptyResultSet: # Select a predicate that's always False. sql, params = '0', () else: sql, params = col.select_format(self, sql, params) ret.append((col, (sql, params), alias)) return ret, klass_info, annotations def _order_by_pairs(self): if self.query.extra_order_by: ordering = self.query.extra_order_by elif not self.query.default_ordering: ordering = self.query.order_by elif self.query.order_by: ordering = self.query.order_by elif self.query.get_meta().ordering: ordering = self.query.get_meta().ordering self._meta_ordering = ordering else: ordering = [] if self.query.standard_ordering: default_order, _ = ORDER_DIR['ASC'] else: default_order, _ = ORDER_DIR['DESC'] for field in ordering: if hasattr(field, 'resolve_expression'): if isinstance(field, Value): # output_field must be resolved for constants. field = Cast(field, field.output_field) if not isinstance(field, OrderBy): field = field.asc() if not self.query.standard_ordering: field = field.copy() field.reverse_ordering() yield field, False continue if field == '?': # random yield OrderBy(Random()), False continue col, order = get_order_dir(field, default_order) descending = order == 'DESC' if col in self.query.annotation_select: # Reference to expression in SELECT clause yield ( OrderBy( Ref(col, self.query.annotation_select[col]), descending=descending, ), True, ) continue if col in self.query.annotations: # References to an expression which is masked out of the SELECT # clause. if self.query.combinator and self.select: # Don't use the resolved annotation because other # combinated queries might define it differently. expr = F(col) else: expr = self.query.annotations[col] if isinstance(expr, Value): # output_field must be resolved for constants. expr = Cast(expr, expr.output_field) yield OrderBy(expr, descending=descending), False continue if '.' in field: # This came in through an extra(order_by=...) addition. Pass it # on verbatim. table, col = col.split('.', 1) yield ( OrderBy( RawSQL('%s.%s' % (self.quote_name_unless_alias(table), col), []), descending=descending, ), False, ) continue if self.query.extra and col in self.query.extra: if col in self.query.extra_select: yield ( OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending), True, ) else: yield ( OrderBy(RawSQL(*self.query.extra[col]), descending=descending), False, ) else: if self.query.combinator and self.select: # Don't use the first model's field because other # combinated queries might define it differently. yield OrderBy(F(col), descending=descending), False else: # 'col' is of the form 'field' or 'field1__field2' or # '-field1__field2__field', etc. yield from self.find_ordering_name( field, self.query.get_meta(), default_order=default_order, ) def get_order_by(self): """ Return a list of 2-tuples of the form (expr, (sql, params, is_ref)) for the ORDER BY clause. The order_by clause can alter the select clause (for example it can add aliases to clauses that do not yet have one, or it can add totally new select clauses). """ result = [] seen = set() for expr, is_ref in self._order_by_pairs(): resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None) if self.query.combinator and self.select: src = resolved.get_source_expressions()[0] expr_src = expr.get_source_expressions()[0] # Relabel order by columns to raw numbers if this is a combined # query; necessary since the columns can't be referenced by the # fully qualified name and the simple column names may collide. for idx, (sel_expr, _, col_alias) in enumerate(self.select): if is_ref and col_alias == src.refs: src = src.source elif col_alias and not ( isinstance(expr_src, F) and col_alias == expr_src.name ): continue if src == sel_expr: resolved.set_source_expressions([RawSQL('%d' % (idx + 1), ())]) break else: if col_alias: raise DatabaseError('ORDER BY term does not match any column in the result set.') # Add column used in ORDER BY clause to the selected # columns and to each combined query. order_by_idx = len(self.query.select) + 1 col_name = f'__orderbycol{order_by_idx}' for q in self.query.combined_queries: q.add_annotation(expr_src, col_name) self.query.add_select_col(resolved, col_name) resolved.set_source_expressions([RawSQL(f'{order_by_idx}', ())]) sql, params = self.compile(resolved) # Don't add the same column twice, but the order direction is # not taken into account so we strip it. When this entire method # is refactored into expressions, then we can check each part as we # generate it. without_ordering = self.ordering_parts.search(sql)[1] params_hash = make_hashable(params) if (without_ordering, params_hash) in seen: continue seen.add((without_ordering, params_hash)) result.append((resolved, (sql, params, is_ref))) return result def get_extra_select(self, order_by, select): extra_select = [] if self.query.distinct and not self.query.distinct_fields: select_sql = [t[1] for t in select] for expr, (sql, params, is_ref) in order_by: without_ordering = self.ordering_parts.search(sql)[1] if not is_ref and (without_ordering, params) not in select_sql: extra_select.append((expr, (without_ordering, params), None)) return extra_select def quote_name_unless_alias(self, name): """ A wrapper around connection.ops.quote_name that doesn't quote aliases for table names. This avoids problems with some SQL dialects that treat quoted strings specially (e.g. PostgreSQL). """ if name in self.quote_cache: return self.quote_cache[name] if ((name in self.query.alias_map and name not in self.query.table_map) or name in self.query.extra_select or ( self.query.external_aliases.get(name) and name not in self.query.table_map)): self.quote_cache[name] = name return name r = self.connection.ops.quote_name(name) self.quote_cache[name] = r return r def compile(self, node): vendor_impl = getattr(node, 'as_' + self.connection.vendor, None) if vendor_impl: sql, params = vendor_impl(self, self.connection) else: sql, params = node.as_sql(self, self.connection) return sql, params def get_combinator_sql(self, combinator, all): features = self.connection.features compilers = [ query.get_compiler(self.using, self.connection, self.elide_empty) for query in self.query.combined_queries if not query.is_empty() ] if not features.supports_slicing_ordering_in_compound: for query, compiler in zip(self.query.combined_queries, compilers): if query.low_mark or query.high_mark: raise DatabaseError('LIMIT/OFFSET not allowed in subqueries of compound statements.') if compiler.get_order_by(): raise DatabaseError('ORDER BY not allowed in subqueries of compound statements.') parts = () for compiler in compilers: try: # If the columns list is limited, then all combined queries # must have the same columns list. Set the selects defined on # the query on all combined queries, if not already set. if not compiler.query.values_select and self.query.values_select: compiler.query = compiler.query.clone() compiler.query.set_values(( *self.query.extra_select, *self.query.values_select, *self.query.annotation_select, )) part_sql, part_args = compiler.as_sql() if compiler.query.combinator: # Wrap in a subquery if wrapping in parentheses isn't # supported. if not features.supports_parentheses_in_compound: part_sql = 'SELECT * FROM ({})'.format(part_sql) # Add parentheses when combining with compound query if not # already added for all compound queries. elif not features.supports_slicing_ordering_in_compound: part_sql = '({})'.format(part_sql) parts += ((part_sql, part_args),) except EmptyResultSet: # Omit the empty queryset with UNION and with DIFFERENCE if the # first queryset is nonempty. if combinator == 'union' or (combinator == 'difference' and parts): continue raise if not parts: raise EmptyResultSet combinator_sql = self.connection.ops.set_operators[combinator] if all and combinator == 'union': combinator_sql += ' ALL' braces = '({})' if features.supports_slicing_ordering_in_compound else '{}' sql_parts, args_parts = zip(*((braces.format(sql), args) for sql, args in parts)) result = [' {} '.format(combinator_sql).join(sql_parts)] params = [] for part in args_parts: params.extend(part) return result, params def as_sql(self, with_limits=True, with_col_aliases=False): """ Create the SQL for this query. Return the SQL string and list of parameters. If 'with_limits' is False, any limit/offset information is not included in the query. """ refcounts_before = self.query.alias_refcount.copy() try: extra_select, order_by, group_by = self.pre_sql_setup() for_update_part = None # Is a LIMIT/OFFSET clause needed? with_limit_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark) combinator = self.query.combinator features = self.connection.features if combinator: if not getattr(features, 'supports_select_{}'.format(combinator)): raise NotSupportedError('{} is not supported on this database backend.'.format(combinator)) result, params = self.get_combinator_sql(combinator, self.query.combinator_all) else: distinct_fields, distinct_params = self.get_distinct() # This must come after 'select', 'ordering', and 'distinct' # (see docstring of get_from_clause() for details). from_, f_params = self.get_from_clause() try: where, w_params = self.compile(self.where) if self.where is not None else ('', []) except EmptyResultSet: if self.elide_empty: raise # Use a predicate that's always False. where, w_params = '0 = 1', [] having, h_params = self.compile(self.having) if self.having is not None else ("", []) result = ['SELECT'] params = [] if self.query.distinct: distinct_result, distinct_params = self.connection.ops.distinct_sql( distinct_fields, distinct_params, ) result += distinct_result params += distinct_params out_cols = [] col_idx = 1 for _, (s_sql, s_params), alias in self.select + extra_select: if alias: s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias)) elif with_col_aliases: s_sql = '%s AS %s' % ( s_sql, self.connection.ops.quote_name('col%d' % col_idx), ) col_idx += 1 params.extend(s_params) out_cols.append(s_sql) result += [', '.join(out_cols), 'FROM', *from_] params.extend(f_params) if self.query.select_for_update and self.connection.features.has_select_for_update: if self.connection.get_autocommit(): raise TransactionManagementError('select_for_update cannot be used outside of a transaction.') if with_limit_offset and not self.connection.features.supports_select_for_update_with_limit: raise NotSupportedError( 'LIMIT/OFFSET is not supported with ' 'select_for_update on this database backend.' ) nowait = self.query.select_for_update_nowait skip_locked = self.query.select_for_update_skip_locked of = self.query.select_for_update_of no_key = self.query.select_for_no_key_update # If it's a NOWAIT/SKIP LOCKED/OF/NO KEY query but the # backend doesn't support it, raise NotSupportedError to # prevent a possible deadlock. if nowait and not self.connection.features.has_select_for_update_nowait: raise NotSupportedError('NOWAIT is not supported on this database backend.') elif skip_locked and not self.connection.features.has_select_for_update_skip_locked: raise NotSupportedError('SKIP LOCKED is not supported on this database backend.') elif of and not self.connection.features.has_select_for_update_of: raise NotSupportedError('FOR UPDATE OF is not supported on this database backend.') elif no_key and not self.connection.features.has_select_for_no_key_update: raise NotSupportedError( 'FOR NO KEY UPDATE is not supported on this ' 'database backend.' ) for_update_part = self.connection.ops.for_update_sql( nowait=nowait, skip_locked=skip_locked, of=self.get_select_for_update_of_arguments(), no_key=no_key, ) if for_update_part and self.connection.features.for_update_after_from: result.append(for_update_part) if where: result.append('WHERE %s' % where) params.extend(w_params) grouping = [] for g_sql, g_params in group_by: grouping.append(g_sql) params.extend(g_params) if grouping: if distinct_fields: raise NotImplementedError('annotate() + distinct(fields) is not implemented.') order_by = order_by or self.connection.ops.force_no_ordering() result.append('GROUP BY %s' % ', '.join(grouping)) if self._meta_ordering: order_by = None if having: result.append('HAVING %s' % having) params.extend(h_params) if self.query.explain_query: result.insert(0, self.connection.ops.explain_query_prefix( self.query.explain_format, **self.query.explain_options )) if order_by: ordering = [] for _, (o_sql, o_params, _) in order_by: ordering.append(o_sql) params.extend(o_params) result.append('ORDER BY %s' % ', '.join(ordering)) if with_limit_offset: result.append(self.connection.ops.limit_offset_sql(self.query.low_mark, self.query.high_mark)) if for_update_part and not self.connection.features.for_update_after_from: result.append(for_update_part) if self.query.subquery and extra_select: # If the query is used as a subquery, the extra selects would # result in more columns than the left-hand side expression is # expecting. This can happen when a subquery uses a combination # of order_by() and distinct(), forcing the ordering expressions # to be selected as well. Wrap the query in another subquery # to exclude extraneous selects. sub_selects = [] sub_params = [] for index, (select, _, alias) in enumerate(self.select, start=1): if not alias and with_col_aliases: alias = 'col%d' % index if alias: sub_selects.append("%s.%s" % ( self.connection.ops.quote_name('subquery'), self.connection.ops.quote_name(alias), )) else: select_clone = select.relabeled_clone({select.alias: 'subquery'}) subselect, subparams = select_clone.as_sql(self, self.connection) sub_selects.append(subselect) sub_params.extend(subparams) return 'SELECT %s FROM (%s) subquery' % ( ', '.join(sub_selects), ' '.join(result), ), tuple(sub_params + params) return ' '.join(result), tuple(params) finally: # Finally do cleanup - get rid of the joins we created above. self.query.reset_refcounts(refcounts_before) def get_default_columns(self, start_alias=None, opts=None, from_parent=None): """ Compute the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal. Return a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if 'as_pairs' is True, return a list of (alias, col_name) pairs instead of strings as the first component and None as the second component). """ result = [] if opts is None: opts = self.query.get_meta() only_load = self.deferred_to_columns() start_alias = start_alias or self.query.get_initial_alias() # The 'seen_models' is used to optimize checking the needed parent # alias for a given field. This also includes None -> start_alias to # be used by local fields. seen_models = {None: start_alias} for field in opts.concrete_fields: model = field.model._meta.concrete_model # A proxy model will have a different model and concrete_model. We # will assign None if the field belongs to this model. if model == opts.model: model = None if from_parent and model is not None and issubclass( from_parent._meta.concrete_model, model._meta.concrete_model): # Avoid loading data for already loaded parents. # We end up here in the case select_related() resolution # proceeds from parent model to child model. In that case the # parent model data is already present in the SELECT clause, # and we want to avoid reloading the same data again. continue if field.model in only_load and field.attname not in only_load[field.model]: continue alias = self.query.join_parent_model(opts, model, start_alias, seen_models) column = field.get_col(alias) result.append(column) return result def get_distinct(self): """ Return a quoted list of fields to use in DISTINCT ON part of the query. This method can alter the tables in the query, and thus it must be called before get_from_clause(). """ result = [] params = [] opts = self.query.get_meta() for name in self.query.distinct_fields: parts = name.split(LOOKUP_SEP) _, targets, alias, joins, path, _, transform_function = self._setup_joins(parts, opts, None) targets, alias, _ = self.query.trim_joins(targets, joins, path) for target in targets: if name in self.query.annotation_select: result.append(name) else: r, p = self.compile(transform_function(target, alias)) result.append(r) params.append(p) return result, params def find_ordering_name(self, name, opts, alias=None, default_order='ASC', already_seen=None): """ Return the table alias (the name might be ambiguous, the alias will not be) and column name for ordering by the given 'name' parameter. The 'name' is of the form 'field1__field2__...__fieldN'. """ name, order = get_order_dir(name, default_order) descending = order == 'DESC' pieces = name.split(LOOKUP_SEP) field, targets, alias, joins, path, opts, transform_function = self._setup_joins(pieces, opts, alias) # If we get to this point and the field is a relation to another model, # append the default ordering for that model unless it is the pk # shortcut or the attribute name of the field that is specified. if ( field.is_relation and opts.ordering and getattr(field, 'attname', None) != pieces[-1] and name != 'pk' ): # Firstly, avoid infinite loops. already_seen = already_seen or set() join_tuple = tuple(getattr(self.query.alias_map[j], 'join_cols', None) for j in joins) if join_tuple in already_seen: raise FieldError('Infinite loop caused by ordering.') already_seen.add(join_tuple) results = [] for item in opts.ordering: if hasattr(item, 'resolve_expression') and not isinstance(item, OrderBy): item = item.desc() if descending else item.asc() if isinstance(item, OrderBy): results.append((item, False)) continue results.extend(self.find_ordering_name(item, opts, alias, order, already_seen)) return results targets, alias, _ = self.query.trim_joins(targets, joins, path) return [(OrderBy(transform_function(t, alias), descending=descending), False) for t in targets] def _setup_joins(self, pieces, opts, alias): """ Helper method for get_order_by() and get_distinct(). get_ordering() and get_distinct() must produce same target columns on same input, as the prefixes of get_ordering() and get_distinct() must match. Executing SQL where this is not true is an error. """ alias = alias or self.query.get_initial_alias() field, targets, opts, joins, path, transform_function = self.query.setup_joins(pieces, opts, alias) alias = joins[-1] return field, targets, alias, joins, path, opts, transform_function def get_from_clause(self): """ Return a list of strings that are joined together to go after the "FROM" part of the query, as well as a list any extra parameters that need to be included. Subclasses, can override this to create a from-clause via a "select". This should only be called after any SQL construction methods that might change the tables that are needed. This means the select columns, ordering, and distinct must be done first. """ result = [] params = [] for alias in tuple(self.query.alias_map): if not self.query.alias_refcount[alias]: continue try: from_clause = self.query.alias_map[alias] except KeyError: # Extra tables can end up in self.tables, but not in the # alias_map if they aren't in a join. That's OK. We skip them. continue clause_sql, clause_params = self.compile(from_clause) result.append(clause_sql) params.extend(clause_params) for t in self.query.extra_tables: alias, _ = self.query.table_alias(t) # Only add the alias if it's not already present (the table_alias() # call increments the refcount, so an alias refcount of one means # this is the only reference). if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1: result.append(', %s' % self.quote_name_unless_alias(alias)) return result, params def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1, requested=None, restricted=None): """ Fill in the information needed for a select_related query. The current depth is measured as the number of connections away from the root model (for example, cur_depth=1 means we are looking at models with direct connections to the root model). """ def _get_field_choices(): direct_choices = (f.name for f in opts.fields if f.is_relation) reverse_choices = ( f.field.related_query_name() for f in opts.related_objects if f.field.unique ) return chain(direct_choices, reverse_choices, self.query._filtered_relations) related_klass_infos = [] if not restricted and cur_depth > self.query.max_depth: # We've recursed far enough; bail out. return related_klass_infos if not opts: opts = self.query.get_meta() root_alias = self.query.get_initial_alias() only_load = self.query.get_loaded_field_names() # Setup for the case when only particular related fields should be # included in the related selection. fields_found = set() if requested is None: restricted = isinstance(self.query.select_related, dict) if restricted: requested = self.query.select_related def get_related_klass_infos(klass_info, related_klass_infos): klass_info['related_klass_infos'] = related_klass_infos for f in opts.fields: field_model = f.model._meta.concrete_model fields_found.add(f.name) if restricted: next = requested.get(f.name, {}) if not f.is_relation: # If a non-related field is used like a relation, # or if a single non-relational field is given. if next or f.name in requested: raise FieldError( "Non-relational field given in select_related: '%s'. " "Choices are: %s" % ( f.name, ", ".join(_get_field_choices()) or '(none)', ) ) else: next = False if not select_related_descend(f, restricted, requested, only_load.get(field_model)): continue klass_info = { 'model': f.remote_field.model, 'field': f, 'reverse': False, 'local_setter': f.set_cached_value, 'remote_setter': f.remote_field.set_cached_value if f.unique else lambda x, y: None, 'from_parent': False, } related_klass_infos.append(klass_info) select_fields = [] _, _, _, joins, _, _ = self.query.setup_joins( [f.name], opts, root_alias) alias = joins[-1] columns = self.get_default_columns(start_alias=alias, opts=f.remote_field.model._meta) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info['select_fields'] = select_fields next_klass_infos = self.get_related_selections( select, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted) get_related_klass_infos(klass_info, next_klass_infos) if restricted: related_fields = [ (o.field, o.related_model) for o in opts.related_objects if o.field.unique and not o.many_to_many ] for f, model in related_fields: if not select_related_descend(f, restricted, requested, only_load.get(model), reverse=True): continue related_field_name = f.related_query_name() fields_found.add(related_field_name) join_info = self.query.setup_joins([related_field_name], opts, root_alias) alias = join_info.joins[-1] from_parent = issubclass(model, opts.model) and model is not opts.model klass_info = { 'model': model, 'field': f, 'reverse': True, 'local_setter': f.remote_field.set_cached_value, 'remote_setter': f.set_cached_value, 'from_parent': from_parent, } related_klass_infos.append(klass_info) select_fields = [] columns = self.get_default_columns( start_alias=alias, opts=model._meta, from_parent=opts.model) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info['select_fields'] = select_fields next = requested.get(f.related_query_name(), {}) next_klass_infos = self.get_related_selections( select, model._meta, alias, cur_depth + 1, next, restricted) get_related_klass_infos(klass_info, next_klass_infos) def local_setter(obj, from_obj): # Set a reverse fk object when relation is non-empty. if from_obj: f.remote_field.set_cached_value(from_obj, obj) def remote_setter(name, obj, from_obj): setattr(from_obj, name, obj) for name in list(requested): # Filtered relations work only on the topmost level. if cur_depth > 1: break if name in self.query._filtered_relations: fields_found.add(name) f, _, join_opts, joins, _, _ = self.query.setup_joins([name], opts, root_alias) model = join_opts.model alias = joins[-1] from_parent = issubclass(model, opts.model) and model is not opts.model klass_info = { 'model': model, 'field': f, 'reverse': True, 'local_setter': local_setter, 'remote_setter': partial(remote_setter, name), 'from_parent': from_parent, } related_klass_infos.append(klass_info) select_fields = [] columns = self.get_default_columns( start_alias=alias, opts=model._meta, from_parent=opts.model, ) for col in columns: select_fields.append(len(select)) select.append((col, None)) klass_info['select_fields'] = select_fields next_requested = requested.get(name, {}) next_klass_infos = self.get_related_selections( select, opts=model._meta, root_alias=alias, cur_depth=cur_depth + 1, requested=next_requested, restricted=restricted, ) get_related_klass_infos(klass_info, next_klass_infos) fields_not_found = set(requested).difference(fields_found) if fields_not_found: invalid_fields = ("'%s'" % s for s in fields_not_found) raise FieldError( 'Invalid field name(s) given in select_related: %s. ' 'Choices are: %s' % ( ', '.join(invalid_fields), ', '.join(_get_field_choices()) or '(none)', ) ) return related_klass_infos def get_select_for_update_of_arguments(self): """ Return a quoted list of arguments for the SELECT FOR UPDATE OF part of the query. """ def _get_parent_klass_info(klass_info): concrete_model = klass_info['model']._meta.concrete_model for parent_model, parent_link in concrete_model._meta.parents.items(): parent_list = parent_model._meta.get_parent_list() yield { 'model': parent_model, 'field': parent_link, 'reverse': False, 'select_fields': [ select_index for select_index in klass_info['select_fields'] # Selected columns from a model or its parents. if ( self.select[select_index][0].target.model == parent_model or self.select[select_index][0].target.model in parent_list ) ], } def _get_first_selected_col_from_model(klass_info): """ Find the first selected column from a model. If it doesn't exist, don't lock a model. select_fields is filled recursively, so it also contains fields from the parent models. """ concrete_model = klass_info['model']._meta.concrete_model for select_index in klass_info['select_fields']: if self.select[select_index][0].target.model == concrete_model: return self.select[select_index][0] def _get_field_choices(): """Yield all allowed field paths in breadth-first search order.""" queue = collections.deque([(None, self.klass_info)]) while queue: parent_path, klass_info = queue.popleft() if parent_path is None: path = [] yield 'self' else: field = klass_info['field'] if klass_info['reverse']: field = field.remote_field path = parent_path + [field.name] yield LOOKUP_SEP.join(path) queue.extend( (path, klass_info) for klass_info in _get_parent_klass_info(klass_info) ) queue.extend( (path, klass_info) for klass_info in klass_info.get('related_klass_infos', []) ) result = [] invalid_names = [] for name in self.query.select_for_update_of: klass_info = self.klass_info if name == 'self': col = _get_first_selected_col_from_model(klass_info) else: for part in name.split(LOOKUP_SEP): klass_infos = ( *klass_info.get('related_klass_infos', []), *_get_parent_klass_info(klass_info), ) for related_klass_info in klass_infos: field = related_klass_info['field'] if related_klass_info['reverse']: field = field.remote_field if field.name == part: klass_info = related_klass_info break else: klass_info = None break if klass_info is None: invalid_names.append(name) continue col = _get_first_selected_col_from_model(klass_info) if col is not None: if self.connection.features.select_for_update_of_column: result.append(self.compile(col)[0]) else: result.append(self.quote_name_unless_alias(col.alias)) if invalid_names: raise FieldError( 'Invalid field name(s) given in select_for_update(of=(...)): %s. ' 'Only relational fields followed in the query are allowed. ' 'Choices are: %s.' % ( ', '.join(invalid_names), ', '.join(_get_field_choices()), ) ) return result def deferred_to_columns(self): """ Convert the self.deferred_loading data structure to mapping of table names to sets of column names which are to be loaded. Return the dictionary. """ columns = {} self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb) return columns def get_converters(self, expressions): converters = {} for i, expression in enumerate(expressions): if expression: backend_converters = self.connection.ops.get_db_converters(expression) field_converters = expression.get_db_converters(self.connection) if backend_converters or field_converters: converters[i] = (backend_converters + field_converters, expression) return converters def apply_converters(self, rows, converters): connection = self.connection converters = list(converters.items()) for row in map(list, rows): for pos, (convs, expression) in converters: value = row[pos] for converter in convs: value = converter(value, expression, connection) row[pos] = value yield row def results_iter(self, results=None, tuple_expected=False, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE): """Return an iterator over the results from executing this query.""" if results is None: results = self.execute_sql(MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size) fields = [s[0] for s in self.select[0:self.col_count]] converters = self.get_converters(fields) rows = chain.from_iterable(results) if converters: rows = self.apply_converters(rows, converters) if tuple_expected: rows = map(tuple, rows) return rows def has_results(self): """ Backends (e.g. NoSQL) can override this in order to use optimized versions of "query has any results." """ return bool(self.execute_sql(SINGLE)) def execute_sql(self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE): """ Run the query against the database and return the result(s). The return value is a single data item if result_type is SINGLE, or an iterator over the results if the result_type is MULTI. result_type is either MULTI (use fetchmany() to retrieve all rows), SINGLE (only retrieve a single row), or None. In this last case, the cursor is returned if any query is executed, since it's used by subclasses such as InsertQuery). It's possible, however, that no query is needed, as the filters describe an empty set. In that case, None is returned, to avoid any unnecessary database interaction. """ result_type = result_type or NO_RESULTS try: sql, params = self.as_sql() if not sql: raise EmptyResultSet except EmptyResultSet: if result_type == MULTI: return iter([]) else: return if chunked_fetch: cursor = self.connection.chunked_cursor() else: cursor = self.connection.cursor() try: cursor.execute(sql, params) except Exception: # Might fail for server-side cursors (e.g. connection closed) cursor.close() raise if result_type == CURSOR: # Give the caller the cursor to process and close. return cursor if result_type == SINGLE: try: val = cursor.fetchone() if val: return val[0:self.col_count] return val finally: # done with the cursor cursor.close() if result_type == NO_RESULTS: cursor.close() return result = cursor_iter( cursor, self.connection.features.empty_fetchmany_value, self.col_count if self.has_extra_select else None, chunk_size, ) if not chunked_fetch or not self.connection.features.can_use_chunked_reads: # If we are using non-chunked reads, we return the same data # structure as normally, but ensure it is all read into memory # before going any further. Use chunked_fetch if requested, # unless the database doesn't support it. return list(result) return result def as_subquery_condition(self, alias, columns, compiler): qn = compiler.quote_name_unless_alias qn2 = self.connection.ops.quote_name for index, select_col in enumerate(self.query.select): lhs_sql, lhs_params = self.compile(select_col) rhs = '%s.%s' % (qn(alias), qn2(columns[index])) self.query.where.add( RawSQL('%s = %s' % (lhs_sql, rhs), lhs_params), 'AND') sql, params = self.as_sql() return 'EXISTS (%s)' % sql, params def explain_query(self): result = list(self.execute_sql()) # Some backends return 1 item tuples with strings, and others return # tuples with integers and strings. Flatten them out into strings. output_formatter = json.dumps if self.query.explain_format == 'json' else str for row in result[0]: if not isinstance(row, str): yield ' '.join(output_formatter(c) for c in row) else: yield row class SQLInsertCompiler(SQLCompiler): returning_fields = None returning_params = tuple() def field_as_sql(self, field, val): """ Take a field and a value intended to be saved on that field, and return placeholder SQL and accompanying params. Check for raw values, expressions, and fields with get_placeholder() defined in that order. When field is None, consider the value raw and use it as the placeholder, with no corresponding parameters returned. """ if field is None: # A field value of None means the value is raw. sql, params = val, [] elif hasattr(val, 'as_sql'): # This is an expression, let's compile it. sql, params = self.compile(val) elif hasattr(field, 'get_placeholder'): # Some fields (e.g. geo fields) need special munging before # they can be inserted. sql, params = field.get_placeholder(val, self, self.connection), [val] else: # Return the common case for the placeholder sql, params = '%s', [val] # The following hook is only used by Oracle Spatial, which sometimes # needs to yield 'NULL' and [] as its placeholder and params instead # of '%s' and [None]. The 'NULL' placeholder is produced earlier by # OracleOperations.get_geom_placeholder(). The following line removes # the corresponding None parameter. See ticket #10888. params = self.connection.ops.modify_insert_params(sql, params) return sql, params def prepare_value(self, field, value): """ Prepare a value to be used in a query by resolving it if it is an expression and otherwise calling the field's get_db_prep_save(). """ if hasattr(value, 'resolve_expression'): value = value.resolve_expression(self.query, allow_joins=False, for_save=True) # Don't allow values containing Col expressions. They refer to # existing columns on a row, but in the case of insert the row # doesn't exist yet. if value.contains_column_references: raise ValueError( 'Failed to insert expression "%s" on %s. F() expressions ' 'can only be used to update, not to insert.' % (value, field) ) if value.contains_aggregate: raise FieldError( 'Aggregate functions are not allowed in this query ' '(%s=%r).' % (field.name, value) ) if value.contains_over_clause: raise FieldError( 'Window expressions are not allowed in this query (%s=%r).' % (field.name, value) ) else: value = field.get_db_prep_save(value, connection=self.connection) return value def pre_save_val(self, field, obj): """ Get the given field's value off the given obj. pre_save() is used for things like auto_now on DateTimeField. Skip it if this is a raw query. """ if self.query.raw: return getattr(obj, field.attname) return field.pre_save(obj, add=True) def assemble_as_sql(self, fields, value_rows): """ Take a sequence of N fields and a sequence of M rows of values, and generate placeholder SQL and parameters for each field and value. Return a pair containing: * a sequence of M rows of N SQL placeholder strings, and * a sequence of M rows of corresponding parameter values. Each placeholder string may contain any number of '%s' interpolation strings, and each parameter row will contain exactly as many params as the total number of '%s's in the corresponding placeholder row. """ if not value_rows: return [], [] # list of (sql, [params]) tuples for each object to be saved # Shape: [n_objs][n_fields][2] rows_of_fields_as_sql = ( (self.field_as_sql(field, v) for field, v in zip(fields, row)) for row in value_rows ) # tuple like ([sqls], [[params]s]) for each object to be saved # Shape: [n_objs][2][n_fields] sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql) # Extract separate lists for placeholders and params. # Each of these has shape [n_objs][n_fields] placeholder_rows, param_rows = zip(*sql_and_param_pair_rows) # Params for each field are still lists, and need to be flattened. param_rows = [[p for ps in row for p in ps] for row in param_rows] return placeholder_rows, param_rows def as_sql(self): # We don't need quote_name_unless_alias() here, since these are all # going to be column names (so we can avoid the extra overhead). qn = self.connection.ops.quote_name opts = self.query.get_meta() insert_statement = self.connection.ops.insert_statement(ignore_conflicts=self.query.ignore_conflicts) result = ['%s %s' % (insert_statement, qn(opts.db_table))] fields = self.query.fields or [opts.pk] result.append('(%s)' % ', '.join(qn(f.column) for f in fields)) if self.query.fields: value_rows = [ [self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields] for obj in self.query.objs ] else: # An empty object. value_rows = [[self.connection.ops.pk_default_value()] for _ in self.query.objs] fields = [None] # Currently the backends just accept values when generating bulk # queries and generate their own placeholders. Doing that isn't # necessary and it should be possible to use placeholders and # expressions in bulk inserts too. can_bulk = (not self.returning_fields and self.connection.features.has_bulk_insert) placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows) ignore_conflicts_suffix_sql = self.connection.ops.ignore_conflicts_suffix_sql( ignore_conflicts=self.query.ignore_conflicts ) if self.returning_fields and self.connection.features.can_return_columns_from_insert: if self.connection.features.can_return_rows_from_bulk_insert: result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows)) params = param_rows else: result.append("VALUES (%s)" % ", ".join(placeholder_rows[0])) params = [param_rows[0]] if ignore_conflicts_suffix_sql: result.append(ignore_conflicts_suffix_sql) # Skip empty r_sql to allow subclasses to customize behavior for # 3rd party backends. Refs #19096. r_sql, self.returning_params = self.connection.ops.return_insert_columns(self.returning_fields) if r_sql: result.append(r_sql) params += [self.returning_params] return [(" ".join(result), tuple(chain.from_iterable(params)))] if can_bulk: result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows)) if ignore_conflicts_suffix_sql: result.append(ignore_conflicts_suffix_sql) return [(" ".join(result), tuple(p for ps in param_rows for p in ps))] else: if ignore_conflicts_suffix_sql: result.append(ignore_conflicts_suffix_sql) return [ (" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals) for p, vals in zip(placeholder_rows, param_rows) ] def execute_sql(self, returning_fields=None): assert not ( returning_fields and len(self.query.objs) != 1 and not self.connection.features.can_return_rows_from_bulk_insert ) opts = self.query.get_meta() self.returning_fields = returning_fields with self.connection.cursor() as cursor: for sql, params in self.as_sql(): cursor.execute(sql, params) if not self.returning_fields: return [] if self.connection.features.can_return_rows_from_bulk_insert and len(self.query.objs) > 1: rows = self.connection.ops.fetch_returned_insert_rows(cursor) elif self.connection.features.can_return_columns_from_insert: assert len(self.query.objs) == 1 rows = [self.connection.ops.fetch_returned_insert_columns( cursor, self.returning_params, )] else: rows = [(self.connection.ops.last_insert_id( cursor, opts.db_table, opts.pk.column, ),)] cols = [field.get_col(opts.db_table) for field in self.returning_fields] converters = self.get_converters(cols) if converters: rows = list(self.apply_converters(rows, converters)) return rows class SQLDeleteCompiler(SQLCompiler): @cached_property def single_alias(self): # Ensure base table is in aliases. self.query.get_initial_alias() return sum(self.query.alias_refcount[t] > 0 for t in self.query.alias_map) == 1 @classmethod def _expr_refs_base_model(cls, expr, base_model): if isinstance(expr, Query): return expr.model == base_model if not hasattr(expr, 'get_source_expressions'): return False return any( cls._expr_refs_base_model(source_expr, base_model) for source_expr in expr.get_source_expressions() ) @cached_property def contains_self_reference_subquery(self): return any( self._expr_refs_base_model(expr, self.query.model) for expr in chain(self.query.annotations.values(), self.query.where.children) ) def _as_sql(self, query): result = [ 'DELETE FROM %s' % self.quote_name_unless_alias(query.base_table) ] where, params = self.compile(query.where) if where: result.append('WHERE %s' % where) return ' '.join(result), tuple(params) def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ if self.single_alias and not self.contains_self_reference_subquery: return self._as_sql(self.query) innerq = self.query.clone() innerq.__class__ = Query innerq.clear_select_clause() pk = self.query.model._meta.pk innerq.select = [ pk.get_col(self.query.get_initial_alias()) ] outerq = Query(self.query.model) if not self.connection.features.update_can_self_select: # Force the materialization of the inner query to allow reference # to the target table on MySQL. sql, params = innerq.get_compiler(connection=self.connection).as_sql() innerq = RawSQL('SELECT * FROM (%s) subquery' % sql, params) outerq.add_filter('pk__in', innerq) return self._as_sql(outerq) class SQLUpdateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ self.pre_sql_setup() if not self.query.values: return '', () qn = self.quote_name_unless_alias values, update_params = [], [] for field, model, val in self.query.values: if hasattr(val, 'resolve_expression'): val = val.resolve_expression(self.query, allow_joins=False, for_save=True) if val.contains_aggregate: raise FieldError( 'Aggregate functions are not allowed in this query ' '(%s=%r).' % (field.name, val) ) if val.contains_over_clause: raise FieldError( 'Window expressions are not allowed in this query ' '(%s=%r).' % (field.name, val) ) elif hasattr(val, 'prepare_database_save'): if field.remote_field: val = field.get_db_prep_save( val.prepare_database_save(field), connection=self.connection, ) else: raise TypeError( "Tried to update field %s with a model instance, %r. " "Use a value compatible with %s." % (field, val, field.__class__.__name__) ) else: val = field.get_db_prep_save(val, connection=self.connection) # Getting the placeholder for the field. if hasattr(field, 'get_placeholder'): placeholder = field.get_placeholder(val, self, self.connection) else: placeholder = '%s' name = field.column if hasattr(val, 'as_sql'): sql, params = self.compile(val) values.append('%s = %s' % (qn(name), placeholder % sql)) update_params.extend(params) elif val is not None: values.append('%s = %s' % (qn(name), placeholder)) update_params.append(val) else: values.append('%s = NULL' % qn(name)) table = self.query.base_table result = [ 'UPDATE %s SET' % qn(table), ', '.join(values), ] where, params = self.compile(self.query.where) if where: result.append('WHERE %s' % where) return ' '.join(result), tuple(update_params + params) def execute_sql(self, result_type): """ Execute the specified update. Return the number of rows affected by the primary update query. The "primary update query" is the first non-empty query that is executed. Row counts for any subsequent, related queries are not available. """ cursor = super().execute_sql(result_type) try: rows = cursor.rowcount if cursor else 0 is_empty = cursor is None finally: if cursor: cursor.close() for query in self.query.get_related_updates(): aux_rows = query.get_compiler(self.using).execute_sql(result_type) if is_empty and aux_rows: rows = aux_rows is_empty = False return rows def pre_sql_setup(self): """ If the update depends on results from other tables, munge the "where" conditions to match the format required for (portable) SQL updates. If multiple updates are required, pull out the id values to update at this point so that they don't change as a result of the progressive updates. """ refcounts_before = self.query.alias_refcount.copy() # Ensure base table is in the query self.query.get_initial_alias() count = self.query.count_active_tables() if not self.query.related_updates and count == 1: return query = self.query.chain(klass=Query) query.select_related = False query.clear_ordering(force=True) query.extra = {} query.select = [] query.add_fields([query.get_meta().pk.name]) super().pre_sql_setup() must_pre_select = count > 1 and not self.connection.features.update_can_self_select # Now we adjust the current query: reset the where clause and get rid # of all the tables we don't need (since they're in the sub-select). self.query.clear_where() if self.query.related_updates or must_pre_select: # Either we're using the idents in multiple update queries (so # don't want them to change), or the db backend doesn't support # selecting from the updating table (e.g. MySQL). idents = [] for rows in query.get_compiler(self.using).execute_sql(MULTI): idents.extend(r[0] for r in rows) self.query.add_filter('pk__in', idents) self.query.related_ids = idents else: # The fast path. Filters and updates in one query. self.query.add_filter('pk__in', query) self.query.reset_refcounts(refcounts_before) class SQLAggregateCompiler(SQLCompiler): def as_sql(self): """ Create the SQL for this query. Return the SQL string and list of parameters. """ sql, params = [], [] for annotation in self.query.annotation_select.values(): ann_sql, ann_params = self.compile(annotation) ann_sql, ann_params = annotation.select_format(self, ann_sql, ann_params) sql.append(ann_sql) params.extend(ann_params) self.col_count = len(self.query.annotation_select) sql = ', '.join(sql) params = tuple(params) inner_query_sql, inner_query_params = self.query.inner_query.get_compiler( self.using, elide_empty=self.elide_empty, ).as_sql(with_col_aliases=True) sql = 'SELECT %s FROM (%s) subquery' % (sql, inner_query_sql) params = params + inner_query_params return sql, params def cursor_iter(cursor, sentinel, col_count, itersize): """ Yield blocks of rows from a cursor and ensure the cursor is closed when done. """ try: for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel): yield rows if col_count is None else [r[:col_count] for r in rows] finally: cursor.close()
a4874da7febd6573728f75aeee8d7f622c9907ba9debf803f864cf8ac9ff60cd
""" Code to manage the creation and SQL rendering of 'where' constraints. """ from django.core.exceptions import EmptyResultSet from django.utils import tree from django.utils.functional import cached_property # Connection types AND = 'AND' OR = 'OR' class WhereNode(tree.Node): """ An SQL WHERE clause. The class is tied to the Query class that created it (in order to create the correct SQL). A child is usually an expression producing boolean values. Most likely the expression is a Lookup instance. However, a child could also be any class with as_sql() and either relabeled_clone() method or relabel_aliases() and clone() methods and contains_aggregate attribute. """ default = AND resolved = False conditional = True def split_having(self, negated=False): """ Return two possibly None nodes: one for those parts of self that should be included in the WHERE clause and one for those parts of self that must be included in the HAVING clause. """ if not self.contains_aggregate: return self, None in_negated = negated ^ self.negated # If the effective connector is OR and this node contains an aggregate, # then we need to push the whole branch to HAVING clause. may_need_split = ( (in_negated and self.connector == AND) or (not in_negated and self.connector == OR)) if may_need_split and self.contains_aggregate: return None, self where_parts = [] having_parts = [] for c in self.children: if hasattr(c, 'split_having'): where_part, having_part = c.split_having(in_negated) if where_part is not None: where_parts.append(where_part) if having_part is not None: having_parts.append(having_part) elif c.contains_aggregate: having_parts.append(c) else: where_parts.append(c) having_node = self.__class__(having_parts, self.connector, self.negated) if having_parts else None where_node = self.__class__(where_parts, self.connector, self.negated) if where_parts else None return where_node, having_node def as_sql(self, compiler, connection): """ Return the SQL version of the where clause and the value to be substituted in. Return '', [] if this node matches everything, None, [] if this node is empty, and raise EmptyResultSet if this node can't match anything. """ result = [] result_params = [] if self.connector == AND: full_needed, empty_needed = len(self.children), 1 else: full_needed, empty_needed = 1, len(self.children) for child in self.children: try: sql, params = compiler.compile(child) except EmptyResultSet: empty_needed -= 1 else: if sql: result.append(sql) result_params.extend(params) else: full_needed -= 1 # Check if this node matches nothing or everything. # First check the amount of full nodes and empty nodes # to make this node empty/full. # Now, check if this node is full/empty using the # counts. if empty_needed == 0: if self.negated: return '', [] else: raise EmptyResultSet if full_needed == 0: if self.negated: raise EmptyResultSet else: return '', [] conn = ' %s ' % self.connector sql_string = conn.join(result) if sql_string: if self.negated: # Some backends (Oracle at least) need parentheses # around the inner SQL in the negated case, even if the # inner SQL contains just a single expression. sql_string = 'NOT (%s)' % sql_string elif len(result) > 1 or self.resolved: sql_string = '(%s)' % sql_string return sql_string, result_params def get_group_by_cols(self, alias=None): cols = [] for child in self.children: cols.extend(child.get_group_by_cols()) return cols def get_source_expressions(self): return self.children[:] def set_source_expressions(self, children): assert len(children) == len(self.children) self.children = children def relabel_aliases(self, change_map): """ Relabel the alias values of any children. 'change_map' is a dictionary mapping old (current) alias values to the new values. """ for pos, child in enumerate(self.children): if hasattr(child, 'relabel_aliases'): # For example another WhereNode child.relabel_aliases(change_map) elif hasattr(child, 'relabeled_clone'): self.children[pos] = child.relabeled_clone(change_map) def clone(self): """ Create a clone of the tree. Must only be called on root nodes (nodes with empty subtree_parents). Childs must be either (Constraint, lookup, value) tuples, or objects supporting .clone(). """ clone = self.__class__._new_instance( children=[], connector=self.connector, negated=self.negated) for child in self.children: if hasattr(child, 'clone'): clone.children.append(child.clone()) else: clone.children.append(child) return clone def relabeled_clone(self, change_map): clone = self.clone() clone.relabel_aliases(change_map) return clone def copy(self): return self.clone() @classmethod def _contains_aggregate(cls, obj): if isinstance(obj, tree.Node): return any(cls._contains_aggregate(c) for c in obj.children) return obj.contains_aggregate @cached_property def contains_aggregate(self): return self._contains_aggregate(self) @classmethod def _contains_over_clause(cls, obj): if isinstance(obj, tree.Node): return any(cls._contains_over_clause(c) for c in obj.children) return obj.contains_over_clause @cached_property def contains_over_clause(self): return self._contains_over_clause(self) @property def is_summary(self): return any(child.is_summary for child in self.children) @staticmethod def _resolve_leaf(expr, query, *args, **kwargs): if hasattr(expr, 'resolve_expression'): expr = expr.resolve_expression(query, *args, **kwargs) return expr @classmethod def _resolve_node(cls, node, query, *args, **kwargs): if hasattr(node, 'children'): for child in node.children: cls._resolve_node(child, query, *args, **kwargs) if hasattr(node, 'lhs'): node.lhs = cls._resolve_leaf(node.lhs, query, *args, **kwargs) if hasattr(node, 'rhs'): node.rhs = cls._resolve_leaf(node.rhs, query, *args, **kwargs) def resolve_expression(self, *args, **kwargs): clone = self.clone() clone._resolve_node(clone, *args, **kwargs) clone.resolved = True return clone @cached_property def output_field(self): from django.db.models import BooleanField return BooleanField() def select_format(self, compiler, sql, params): # Wrap filters with a CASE WHEN expression if a database backend # (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP # BY list. if not compiler.connection.features.supports_boolean_expr_in_select_clause: sql = f'CASE WHEN {sql} THEN 1 ELSE 0 END' return sql, params def get_db_converters(self, connection): return self.output_field.get_db_converters(connection) def get_lookup(self, lookup): return self.output_field.get_lookup(lookup) class NothingNode: """A node that matches nothing.""" contains_aggregate = False def as_sql(self, compiler=None, connection=None): raise EmptyResultSet class ExtraWhere: # The contents are a black box - assume no aggregates are used. contains_aggregate = False def __init__(self, sqls, params): self.sqls = sqls self.params = params def as_sql(self, compiler=None, connection=None): sqls = ["(%s)" % sql for sql in self.sqls] return " AND ".join(sqls), list(self.params or ()) class SubqueryConstraint: # Even if aggregates would be used in a subquery, the outer query isn't # interested about those. contains_aggregate = False def __init__(self, alias, columns, targets, query_object): self.alias = alias self.columns = columns self.targets = targets query_object.clear_ordering(clear_default=True) self.query_object = query_object def as_sql(self, compiler, connection): query = self.query_object query.set_values(self.targets) query_compiler = query.get_compiler(connection=connection) return query_compiler.as_subquery_condition(self.alias, self.columns, compiler)
a68fc1d6fddb6dde3973ae6561b5fdddef6aea9e58ed10aad0ce78ae024bd224
""" Useful auxiliary data structures for query construction. Not useful outside the SQL domain. """ from django.db.models.sql.constants import INNER, LOUTER class MultiJoin(Exception): """ Used by join construction code to indicate the point at which a multi-valued join was attempted (if the caller wants to treat that exceptionally). """ def __init__(self, names_pos, path_with_names): self.level = names_pos # The path travelled, this includes the path to the multijoin. self.names_with_path = path_with_names class Empty: pass class Join: """ Used by sql.Query and sql.SQLCompiler to generate JOIN clauses into the FROM entry. For example, the SQL generated could be LEFT OUTER JOIN "sometable" T1 ON ("othertable"."sometable_id" = "sometable"."id") This class is primarily used in Query.alias_map. All entries in alias_map must be Join compatible by providing the following attributes and methods: - table_name (string) - table_alias (possible alias for the table, can be None) - join_type (can be None for those entries that aren't joined from anything) - parent_alias (which table is this join's parent, can be None similarly to join_type) - as_sql() - relabeled_clone() """ def __init__(self, table_name, parent_alias, table_alias, join_type, join_field, nullable, filtered_relation=None): # Join table self.table_name = table_name self.parent_alias = parent_alias # Note: table_alias is not necessarily known at instantiation time. self.table_alias = table_alias # LOUTER or INNER self.join_type = join_type # A list of 2-tuples to use in the ON clause of the JOIN. # Each 2-tuple will create one join condition in the ON clause. self.join_cols = join_field.get_joining_columns() # Along which field (or ForeignObjectRel in the reverse join case) self.join_field = join_field # Is this join nullabled? self.nullable = nullable self.filtered_relation = filtered_relation def as_sql(self, compiler, connection): """ Generate the full LEFT OUTER JOIN sometable ON sometable.somecol = othertable.othercol, params clause for this join. """ join_conditions = [] params = [] qn = compiler.quote_name_unless_alias qn2 = connection.ops.quote_name # Add a join condition for each pair of joining columns. for lhs_col, rhs_col in self.join_cols: join_conditions.append('%s.%s = %s.%s' % ( qn(self.parent_alias), qn2(lhs_col), qn(self.table_alias), qn2(rhs_col), )) # Add a single condition inside parentheses for whatever # get_extra_restriction() returns. extra_cond = self.join_field.get_extra_restriction(self.table_alias, self.parent_alias) if extra_cond: extra_sql, extra_params = compiler.compile(extra_cond) join_conditions.append('(%s)' % extra_sql) params.extend(extra_params) if self.filtered_relation: extra_sql, extra_params = compiler.compile(self.filtered_relation) if extra_sql: join_conditions.append('(%s)' % extra_sql) params.extend(extra_params) if not join_conditions: # This might be a rel on the other end of an actual declared field. declared_field = getattr(self.join_field, 'field', self.join_field) raise ValueError( "Join generated an empty ON clause. %s did not yield either " "joining columns or extra restrictions." % declared_field.__class__ ) on_clause_sql = ' AND '.join(join_conditions) alias_str = '' if self.table_alias == self.table_name else (' %s' % self.table_alias) sql = '%s %s%s ON (%s)' % (self.join_type, qn(self.table_name), alias_str, on_clause_sql) return sql, params def relabeled_clone(self, change_map): new_parent_alias = change_map.get(self.parent_alias, self.parent_alias) new_table_alias = change_map.get(self.table_alias, self.table_alias) if self.filtered_relation is not None: filtered_relation = self.filtered_relation.clone() filtered_relation.path = [change_map.get(p, p) for p in self.filtered_relation.path] else: filtered_relation = None return self.__class__( self.table_name, new_parent_alias, new_table_alias, self.join_type, self.join_field, self.nullable, filtered_relation=filtered_relation, ) @property def identity(self): return ( self.__class__, self.table_name, self.parent_alias, self.join_field, self.filtered_relation, ) def __eq__(self, other): if not isinstance(other, Join): return NotImplemented return self.identity == other.identity def __hash__(self): return hash(self.identity) def equals(self, other): # Ignore filtered_relation in equality check. return self.identity[:-1] == other.identity[:-1] def demote(self): new = self.relabeled_clone({}) new.join_type = INNER return new def promote(self): new = self.relabeled_clone({}) new.join_type = LOUTER return new class BaseTable: """ The BaseTable class is used for base table references in FROM clause. For example, the SQL "foo" in SELECT * FROM "foo" WHERE somecond could be generated by this class. """ join_type = None parent_alias = None filtered_relation = None def __init__(self, table_name, alias): self.table_name = table_name self.table_alias = alias def as_sql(self, compiler, connection): alias_str = '' if self.table_alias == self.table_name else (' %s' % self.table_alias) base_sql = compiler.quote_name_unless_alias(self.table_name) return base_sql + alias_str, [] def relabeled_clone(self, change_map): return self.__class__(self.table_name, change_map.get(self.table_alias, self.table_alias)) @property def identity(self): return self.__class__, self.table_name, self.table_alias def __eq__(self, other): if not isinstance(other, BaseTable): return NotImplemented return self.identity == other.identity def __hash__(self): return hash(self.identity) def equals(self, other): return self.identity == other.identity
02b902381b15f0ab9c18779deaa63924913eef3f6c9d1e564c3e1a2b317ed2d7
""" Query subclasses which provide extra functionality beyond simple data retrieval. """ from django.core.exceptions import FieldError from django.db.models.sql.constants import ( CURSOR, GET_ITERATOR_CHUNK_SIZE, NO_RESULTS, ) from django.db.models.sql.query import Query __all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'AggregateQuery'] class DeleteQuery(Query): """A DELETE SQL query.""" compiler = 'SQLDeleteCompiler' def do_query(self, table, where, using): self.alias_map = {table: self.alias_map[table]} self.where = where cursor = self.get_compiler(using).execute_sql(CURSOR) if cursor: with cursor: return cursor.rowcount return 0 def delete_batch(self, pk_list, using): """ Set up and execute delete queries for all the objects in pk_list. More than one physical query may be executed if there are a lot of values in pk_list. """ # number of objects deleted num_deleted = 0 field = self.get_meta().pk for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): self.clear_where() self.add_filter( f'{field.attname}__in', pk_list[offset:offset + GET_ITERATOR_CHUNK_SIZE], ) num_deleted += self.do_query(self.get_meta().db_table, self.where, using=using) return num_deleted class UpdateQuery(Query): """An UPDATE SQL query.""" compiler = 'SQLUpdateCompiler' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._setup_query() def _setup_query(self): """ Run on initialization and at the end of chaining. Any attributes that would normally be set in __init__() should go here instead. """ self.values = [] self.related_ids = None self.related_updates = {} def clone(self): obj = super().clone() obj.related_updates = self.related_updates.copy() return obj def update_batch(self, pk_list, values, using): self.add_update_values(values) for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): self.clear_where() self.add_filter('pk__in', pk_list[offset: offset + GET_ITERATOR_CHUNK_SIZE]) self.get_compiler(using).execute_sql(NO_RESULTS) def add_update_values(self, values): """ Convert a dictionary of field name to value mappings into an update query. This is the entry point for the public update() method on querysets. """ values_seq = [] for name, val in values.items(): field = self.get_meta().get_field(name) direct = not (field.auto_created and not field.concrete) or not field.concrete model = field.model._meta.concrete_model if not direct or (field.is_relation and field.many_to_many): raise FieldError( 'Cannot update model field %r (only non-relations and ' 'foreign keys permitted).' % field ) if model is not self.get_meta().concrete_model: self.add_related_update(model, field, val) continue values_seq.append((field, model, val)) return self.add_update_fields(values_seq) def add_update_fields(self, values_seq): """ Append a sequence of (field, model, value) triples to the internal list that will be used to generate the UPDATE query. Might be more usefully called add_update_targets() to hint at the extra information here. """ for field, model, val in values_seq: if hasattr(val, 'resolve_expression'): # Resolve expressions here so that annotations are no longer needed val = val.resolve_expression(self, allow_joins=False, for_save=True) self.values.append((field, model, val)) def add_related_update(self, model, field, value): """ Add (name, value) to an update query for an ancestor model. Update are coalesced so that only one update query per ancestor is run. """ self.related_updates.setdefault(model, []).append((field, None, value)) def get_related_updates(self): """ Return a list of query objects: one for each update required to an ancestor model. Each query will have the same filtering conditions as the current query but will only update a single table. """ if not self.related_updates: return [] result = [] for model, values in self.related_updates.items(): query = UpdateQuery(model) query.values = values if self.related_ids is not None: query.add_filter('pk__in', self.related_ids) result.append(query) return result class InsertQuery(Query): compiler = 'SQLInsertCompiler' def __init__(self, *args, ignore_conflicts=False, **kwargs): super().__init__(*args, **kwargs) self.fields = [] self.objs = [] self.ignore_conflicts = ignore_conflicts def insert_values(self, fields, objs, raw=False): self.fields = fields self.objs = objs self.raw = raw class AggregateQuery(Query): """ Take another query as a parameter to the FROM clause and only select the elements in the provided list. """ compiler = 'SQLAggregateCompiler' def __init__(self, model, inner_query): self.inner_query = inner_query super().__init__(model)
9f4978fa969a675c5c2144f50d961c54ffdbed3524f2e943e129693f41f0703f
from django.db import DatabaseError, InterfaceError from django.db.backends.base.features import BaseDatabaseFeatures from django.utils.functional import cached_property class DatabaseFeatures(BaseDatabaseFeatures): # Oracle crashes with "ORA-00932: inconsistent datatypes: expected - got # BLOB" when grouping by LOBs (#24096). allows_group_by_lob = False interprets_empty_strings_as_nulls = True has_select_for_update = True has_select_for_update_nowait = True has_select_for_update_skip_locked = True has_select_for_update_of = True select_for_update_of_column = True can_return_columns_from_insert = True supports_subqueries_in_group_by = False ignores_unnecessary_order_by_in_subqueries = False supports_transactions = True supports_timezones = False has_native_duration_field = True can_defer_constraint_checks = True supports_partially_nullable_unique_constraints = False supports_deferrable_unique_constraints = True truncates_names = True supports_tablespaces = True supports_sequence_reset = False can_introspect_materialized_views = True atomic_transactions = False supports_combined_alters = False nulls_order_largest = True requires_literal_defaults = True closed_cursor_error_class = InterfaceError bare_select_suffix = " FROM DUAL" # select for update with limit can be achieved on Oracle, but not with the current backend. supports_select_for_update_with_limit = False supports_temporal_subtraction = True # Oracle doesn't ignore quoted identifiers case but the current backend # does by uppercasing all identifiers. ignores_table_name_case = True supports_index_on_text_field = False has_case_insensitive_like = False create_test_procedure_without_params_sql = """ CREATE PROCEDURE "TEST_PROCEDURE" AS V_I INTEGER; BEGIN V_I := 1; END; """ create_test_procedure_with_int_param_sql = """ CREATE PROCEDURE "TEST_PROCEDURE" (P_I INTEGER) AS V_I INTEGER; BEGIN V_I := P_I; END; """ supports_callproc_kwargs = True supports_over_clause = True supports_frame_range_fixed_distance = True supports_ignore_conflicts = False max_query_params = 2**16 - 1 supports_partial_indexes = False supports_slicing_ordering_in_compound = True allows_multiple_constraints_on_same_fields = False supports_boolean_expr_in_select_clause = False supports_primitives_in_json_field = False supports_json_field_contains = False supports_collation_on_textfield = False test_collations = { 'ci': 'BINARY_CI', 'cs': 'BINARY', 'non_default': 'SWEDISH_CI', 'swedish_ci': 'SWEDISH_CI', } django_test_skips = { "Oracle doesn't support SHA224.": { 'db_functions.text.test_sha224.SHA224Tests.test_basic', 'db_functions.text.test_sha224.SHA224Tests.test_transform', }, "Oracle doesn't support bitwise XOR.": { 'expressions.tests.ExpressionOperatorTests.test_lefthand_bitwise_xor', 'expressions.tests.ExpressionOperatorTests.test_lefthand_bitwise_xor_null', }, "Oracle requires ORDER BY in row_number, ANSI:SQL doesn't.": { 'expressions_window.tests.WindowFunctionTests.test_row_number_no_ordering', }, 'Raises ORA-00600: internal error code.': { 'model_fields.test_jsonfield.TestQuerying.test_usage_in_subquery', }, } django_test_expected_failures = { # A bug in Django/cx_Oracle with respect to string handling (#23843). 'annotations.tests.NonAggregateAnnotationTestCase.test_custom_functions', 'annotations.tests.NonAggregateAnnotationTestCase.test_custom_functions_can_ref_other_functions', } @cached_property def introspected_field_types(self): return { **super().introspected_field_types, 'GenericIPAddressField': 'CharField', 'PositiveBigIntegerField': 'BigIntegerField', 'PositiveIntegerField': 'IntegerField', 'PositiveSmallIntegerField': 'IntegerField', 'SmallIntegerField': 'IntegerField', 'TimeField': 'DateTimeField', } @cached_property def supports_collation_on_charfield(self): with self.connection.cursor() as cursor: try: cursor.execute("SELECT CAST('a' AS VARCHAR2(4001)) FROM dual") except DatabaseError as e: if e.args[0].code == 910: return False raise return True
f3da2493ffd66ba73299fc3c8161165ae1331b6ac79a31a17c53bce38149dbb7
import datetime import uuid from functools import lru_cache from django.conf import settings from django.db import DatabaseError, NotSupportedError from django.db.backends.base.operations import BaseDatabaseOperations from django.db.backends.utils import strip_quotes, truncate_name from django.db.models import AutoField, Exists, ExpressionWrapper, Lookup from django.db.models.expressions import RawSQL from django.db.models.sql.where import WhereNode from django.utils import timezone from django.utils.encoding import force_bytes, force_str from django.utils.functional import cached_property from django.utils.regex_helper import _lazy_re_compile from .base import Database from .utils import BulkInsertMapper, InsertVar, Oracle_datetime class DatabaseOperations(BaseDatabaseOperations): # Oracle uses NUMBER(5), NUMBER(11), and NUMBER(19) for integer fields. # SmallIntegerField uses NUMBER(11) instead of NUMBER(5), which is used by # SmallAutoField, to preserve backward compatibility. integer_field_ranges = { 'SmallIntegerField': (-99999999999, 99999999999), 'IntegerField': (-99999999999, 99999999999), 'BigIntegerField': (-9999999999999999999, 9999999999999999999), 'PositiveBigIntegerField': (0, 9999999999999999999), 'PositiveSmallIntegerField': (0, 99999999999), 'PositiveIntegerField': (0, 99999999999), 'SmallAutoField': (-99999, 99999), 'AutoField': (-99999999999, 99999999999), 'BigAutoField': (-9999999999999999999, 9999999999999999999), } set_operators = {**BaseDatabaseOperations.set_operators, 'difference': 'MINUS'} # TODO: colorize this SQL code with style.SQL_KEYWORD(), etc. _sequence_reset_sql = """ DECLARE table_value integer; seq_value integer; seq_name user_tab_identity_cols.sequence_name%%TYPE; BEGIN BEGIN SELECT sequence_name INTO seq_name FROM user_tab_identity_cols WHERE table_name = '%(table_name)s' AND column_name = '%(column_name)s'; EXCEPTION WHEN NO_DATA_FOUND THEN seq_name := '%(no_autofield_sequence_name)s'; END; SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s; SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences WHERE sequence_name = seq_name; WHILE table_value > seq_value LOOP EXECUTE IMMEDIATE 'SELECT "'||seq_name||'".nextval FROM DUAL' INTO seq_value; END LOOP; END; /""" # Oracle doesn't support string without precision; use the max string size. cast_char_field_without_max_length = 'NVARCHAR2(2000)' cast_data_types = { 'AutoField': 'NUMBER(11)', 'BigAutoField': 'NUMBER(19)', 'SmallAutoField': 'NUMBER(5)', 'TextField': cast_char_field_without_max_length, } def cache_key_culling_sql(self): return 'SELECT cache_key FROM %s ORDER BY cache_key OFFSET %%s ROWS FETCH FIRST 1 ROWS ONLY' def date_extract_sql(self, lookup_type, field_name): if lookup_type == 'week_day': # TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday. return "TO_CHAR(%s, 'D')" % field_name elif lookup_type == 'iso_week_day': return "TO_CHAR(%s - 1, 'D')" % field_name elif lookup_type == 'week': # IW = ISO week number return "TO_CHAR(%s, 'IW')" % field_name elif lookup_type == 'quarter': return "TO_CHAR(%s, 'Q')" % field_name elif lookup_type == 'iso_year': return "TO_CHAR(%s, 'IYYY')" % field_name else: # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/EXTRACT-datetime.html return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name) def date_trunc_sql(self, lookup_type, field_name, tzname=None): field_name = self._convert_field_to_tz(field_name, tzname) # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/ROUND-and-TRUNC-Date-Functions.html if lookup_type in ('year', 'month'): return "TRUNC(%s, '%s')" % (field_name, lookup_type.upper()) elif lookup_type == 'quarter': return "TRUNC(%s, 'Q')" % field_name elif lookup_type == 'week': return "TRUNC(%s, 'IW')" % field_name else: return "TRUNC(%s)" % field_name # Oracle crashes with "ORA-03113: end-of-file on communication channel" # if the time zone name is passed in parameter. Use interpolation instead. # https://groups.google.com/forum/#!msg/django-developers/zwQju7hbG78/9l934yelwfsJ # This regexp matches all time zone names from the zoneinfo database. _tzname_re = _lazy_re_compile(r'^[\w/:+-]+$') def _prepare_tzname_delta(self, tzname): if '+' in tzname: return tzname[tzname.find('+'):] elif '-' in tzname: return tzname[tzname.find('-'):] return tzname def _convert_field_to_tz(self, field_name, tzname): if not (settings.USE_TZ and tzname): return field_name if not self._tzname_re.match(tzname): raise ValueError("Invalid time zone name: %s" % tzname) # Convert from connection timezone to the local time, returning # TIMESTAMP WITH TIME ZONE and cast it back to TIMESTAMP to strip the # TIME ZONE details. if self.connection.timezone_name != tzname: return "CAST((FROM_TZ(%s, '%s') AT TIME ZONE '%s') AS TIMESTAMP)" % ( field_name, self.connection.timezone_name, self._prepare_tzname_delta(tzname), ) return field_name def datetime_cast_date_sql(self, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return 'TRUNC(%s)' % field_name def datetime_cast_time_sql(self, field_name, tzname): # Since `TimeField` values are stored as TIMESTAMP change to the # default date and convert the field to the specified timezone. convert_datetime_sql = ( "TO_TIMESTAMP(CONCAT('1900-01-01 ', TO_CHAR(%s, 'HH24:MI:SS.FF')), " "'YYYY-MM-DD HH24:MI:SS.FF')" ) % self._convert_field_to_tz(field_name, tzname) return "CASE WHEN %s IS NOT NULL THEN %s ELSE NULL END" % ( field_name, convert_datetime_sql, ) def datetime_extract_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) return self.date_extract_sql(lookup_type, field_name) def datetime_trunc_sql(self, lookup_type, field_name, tzname): field_name = self._convert_field_to_tz(field_name, tzname) # https://docs.oracle.com/en/database/oracle/oracle-database/18/sqlrf/ROUND-and-TRUNC-Date-Functions.html if lookup_type in ('year', 'month'): sql = "TRUNC(%s, '%s')" % (field_name, lookup_type.upper()) elif lookup_type == 'quarter': sql = "TRUNC(%s, 'Q')" % field_name elif lookup_type == 'week': sql = "TRUNC(%s, 'IW')" % field_name elif lookup_type == 'day': sql = "TRUNC(%s)" % field_name elif lookup_type == 'hour': sql = "TRUNC(%s, 'HH24')" % field_name elif lookup_type == 'minute': sql = "TRUNC(%s, 'MI')" % field_name else: sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision. return sql def time_trunc_sql(self, lookup_type, field_name, tzname=None): # The implementation is similar to `datetime_trunc_sql` as both # `DateTimeField` and `TimeField` are stored as TIMESTAMP where # the date part of the later is ignored. field_name = self._convert_field_to_tz(field_name, tzname) if lookup_type == 'hour': sql = "TRUNC(%s, 'HH24')" % field_name elif lookup_type == 'minute': sql = "TRUNC(%s, 'MI')" % field_name elif lookup_type == 'second': sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision. return sql def get_db_converters(self, expression): converters = super().get_db_converters(expression) internal_type = expression.output_field.get_internal_type() if internal_type in ['JSONField', 'TextField']: converters.append(self.convert_textfield_value) elif internal_type == 'BinaryField': converters.append(self.convert_binaryfield_value) elif internal_type == 'BooleanField': converters.append(self.convert_booleanfield_value) elif internal_type == 'DateTimeField': if settings.USE_TZ: converters.append(self.convert_datetimefield_value) elif internal_type == 'DateField': converters.append(self.convert_datefield_value) elif internal_type == 'TimeField': converters.append(self.convert_timefield_value) elif internal_type == 'UUIDField': converters.append(self.convert_uuidfield_value) # Oracle stores empty strings as null. If the field accepts the empty # string, undo this to adhere to the Django convention of using # the empty string instead of null. if expression.output_field.empty_strings_allowed: converters.append( self.convert_empty_bytes if internal_type == 'BinaryField' else self.convert_empty_string ) return converters def convert_textfield_value(self, value, expression, connection): if isinstance(value, Database.LOB): value = value.read() return value def convert_binaryfield_value(self, value, expression, connection): if isinstance(value, Database.LOB): value = force_bytes(value.read()) return value def convert_booleanfield_value(self, value, expression, connection): if value in (0, 1): value = bool(value) return value # cx_Oracle always returns datetime.datetime objects for # DATE and TIMESTAMP columns, but Django wants to see a # python datetime.date, .time, or .datetime. def convert_datetimefield_value(self, value, expression, connection): if value is not None: value = timezone.make_aware(value, self.connection.timezone) return value def convert_datefield_value(self, value, expression, connection): if isinstance(value, Database.Timestamp): value = value.date() return value def convert_timefield_value(self, value, expression, connection): if isinstance(value, Database.Timestamp): value = value.time() return value def convert_uuidfield_value(self, value, expression, connection): if value is not None: value = uuid.UUID(value) return value @staticmethod def convert_empty_string(value, expression, connection): return '' if value is None else value @staticmethod def convert_empty_bytes(value, expression, connection): return b'' if value is None else value def deferrable_sql(self): return " DEFERRABLE INITIALLY DEFERRED" def fetch_returned_insert_columns(self, cursor, returning_params): columns = [] for param in returning_params: value = param.get_value() if value == []: raise DatabaseError( 'The database did not return a new row id. Probably ' '"ORA-1403: no data found" was raised internally but was ' 'hidden by the Oracle OCI library (see ' 'https://code.djangoproject.com/ticket/28859).' ) columns.append(value[0]) return tuple(columns) def field_cast_sql(self, db_type, internal_type): if db_type and db_type.endswith('LOB') and internal_type != 'JSONField': return "DBMS_LOB.SUBSTR(%s)" else: return "%s" def no_limit_value(self): return None def limit_offset_sql(self, low_mark, high_mark): fetch, offset = self._get_limit_offset_params(low_mark, high_mark) return ' '.join(sql for sql in ( ('OFFSET %d ROWS' % offset) if offset else None, ('FETCH FIRST %d ROWS ONLY' % fetch) if fetch else None, ) if sql) def last_executed_query(self, cursor, sql, params): # https://cx-oracle.readthedocs.io/en/latest/cursor.html#Cursor.statement # The DB API definition does not define this attribute. statement = cursor.statement # Unlike Psycopg's `query` and MySQLdb`'s `_executed`, cx_Oracle's # `statement` doesn't contain the query parameters. Substitute # parameters manually. if isinstance(params, (tuple, list)): for i, param in enumerate(params): statement = statement.replace(':arg%d' % i, force_str(param, errors='replace')) elif isinstance(params, dict): for key, param in params.items(): statement = statement.replace(':%s' % key, force_str(param, errors='replace')) return statement def last_insert_id(self, cursor, table_name, pk_name): sq_name = self._get_sequence_name(cursor, strip_quotes(table_name), pk_name) cursor.execute('"%s".currval' % sq_name) return cursor.fetchone()[0] def lookup_cast(self, lookup_type, internal_type=None): if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'): return "UPPER(%s)" if internal_type == 'JSONField' and lookup_type == 'exact': return 'DBMS_LOB.SUBSTR(%s)' return "%s" def max_in_list_size(self): return 1000 def max_name_length(self): return 30 def pk_default_value(self): return "NULL" def prep_for_iexact_query(self, x): return x def process_clob(self, value): if value is None: return '' return value.read() def quote_name(self, name): # SQL92 requires delimited (quoted) names to be case-sensitive. When # not quoted, Oracle has case-insensitive behavior for identifiers, but # always defaults to uppercase. # We simplify things by making Oracle identifiers always uppercase. if not name.startswith('"') and not name.endswith('"'): name = '"%s"' % truncate_name(name, self.max_name_length()) # Oracle puts the query text into a (query % args) construct, so % signs # in names need to be escaped. The '%%' will be collapsed back to '%' at # that stage so we aren't really making the name longer here. name = name.replace('%', '%%') return name.upper() def regex_lookup(self, lookup_type): if lookup_type == 'regex': match_option = "'c'" else: match_option = "'i'" return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option def return_insert_columns(self, fields): if not fields: return '', () field_names = [] params = [] for field in fields: field_names.append('%s.%s' % ( self.quote_name(field.model._meta.db_table), self.quote_name(field.column), )) params.append(InsertVar(field)) return 'RETURNING %s INTO %s' % ( ', '.join(field_names), ', '.join(['%s'] * len(params)), ), tuple(params) def __foreign_key_constraints(self, table_name, recursive): with self.connection.cursor() as cursor: if recursive: cursor.execute(""" SELECT user_tables.table_name, rcons.constraint_name FROM user_tables JOIN user_constraints cons ON (user_tables.table_name = cons.table_name AND cons.constraint_type = ANY('P', 'U')) LEFT JOIN user_constraints rcons ON (user_tables.table_name = rcons.table_name AND rcons.constraint_type = 'R') START WITH user_tables.table_name = UPPER(%s) CONNECT BY NOCYCLE PRIOR cons.constraint_name = rcons.r_constraint_name GROUP BY user_tables.table_name, rcons.constraint_name HAVING user_tables.table_name != UPPER(%s) ORDER BY MAX(level) DESC """, (table_name, table_name)) else: cursor.execute(""" SELECT cons.table_name, cons.constraint_name FROM user_constraints cons WHERE cons.constraint_type = 'R' AND cons.table_name = UPPER(%s) """, (table_name,)) return cursor.fetchall() @cached_property def _foreign_key_constraints(self): # 512 is large enough to fit the ~330 tables (as of this writing) in # Django's test suite. return lru_cache(maxsize=512)(self.__foreign_key_constraints) def sql_flush(self, style, tables, *, reset_sequences=False, allow_cascade=False): if not tables: return [] truncated_tables = {table.upper() for table in tables} constraints = set() # Oracle's TRUNCATE CASCADE only works with ON DELETE CASCADE foreign # keys which Django doesn't define. Emulate the PostgreSQL behavior # which truncates all dependent tables by manually retrieving all # foreign key constraints and resolving dependencies. for table in tables: for foreign_table, constraint in self._foreign_key_constraints(table, recursive=allow_cascade): if allow_cascade: truncated_tables.add(foreign_table) constraints.add((foreign_table, constraint)) sql = [ '%s %s %s %s %s %s %s %s;' % ( style.SQL_KEYWORD('ALTER'), style.SQL_KEYWORD('TABLE'), style.SQL_FIELD(self.quote_name(table)), style.SQL_KEYWORD('DISABLE'), style.SQL_KEYWORD('CONSTRAINT'), style.SQL_FIELD(self.quote_name(constraint)), style.SQL_KEYWORD('KEEP'), style.SQL_KEYWORD('INDEX'), ) for table, constraint in constraints ] + [ '%s %s %s;' % ( style.SQL_KEYWORD('TRUNCATE'), style.SQL_KEYWORD('TABLE'), style.SQL_FIELD(self.quote_name(table)), ) for table in truncated_tables ] + [ '%s %s %s %s %s %s;' % ( style.SQL_KEYWORD('ALTER'), style.SQL_KEYWORD('TABLE'), style.SQL_FIELD(self.quote_name(table)), style.SQL_KEYWORD('ENABLE'), style.SQL_KEYWORD('CONSTRAINT'), style.SQL_FIELD(self.quote_name(constraint)), ) for table, constraint in constraints ] if reset_sequences: sequences = [ sequence for sequence in self.connection.introspection.sequence_list() if sequence['table'].upper() in truncated_tables ] # Since we've just deleted all the rows, running our sequence ALTER # code will reset the sequence to 0. sql.extend(self.sequence_reset_by_name_sql(style, sequences)) return sql def sequence_reset_by_name_sql(self, style, sequences): sql = [] for sequence_info in sequences: no_autofield_sequence_name = self._get_no_autofield_sequence_name(sequence_info['table']) table = self.quote_name(sequence_info['table']) column = self.quote_name(sequence_info['column'] or 'id') query = self._sequence_reset_sql % { 'no_autofield_sequence_name': no_autofield_sequence_name, 'table': table, 'column': column, 'table_name': strip_quotes(table), 'column_name': strip_quotes(column), } sql.append(query) return sql def sequence_reset_sql(self, style, model_list): output = [] query = self._sequence_reset_sql for model in model_list: for f in model._meta.local_fields: if isinstance(f, AutoField): no_autofield_sequence_name = self._get_no_autofield_sequence_name(model._meta.db_table) table = self.quote_name(model._meta.db_table) column = self.quote_name(f.column) output.append(query % { 'no_autofield_sequence_name': no_autofield_sequence_name, 'table': table, 'column': column, 'table_name': strip_quotes(table), 'column_name': strip_quotes(column), }) # Only one AutoField is allowed per model, so don't # continue to loop break return output def start_transaction_sql(self): return '' def tablespace_sql(self, tablespace, inline=False): if inline: return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace) else: return "TABLESPACE %s" % self.quote_name(tablespace) def adapt_datefield_value(self, value): """ Transform a date value to an object compatible with what is expected by the backend driver for date columns. The default implementation transforms the date to text, but that is not necessary for Oracle. """ return value def adapt_datetimefield_value(self, value): """ Transform a datetime value to an object compatible with what is expected by the backend driver for datetime columns. If naive datetime is passed assumes that is in UTC. Normally Django models.DateTimeField makes sure that if USE_TZ is True passed datetime is timezone aware. """ if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # cx_Oracle doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError("Oracle backend does not support timezone-aware datetimes when USE_TZ is False.") return Oracle_datetime.from_datetime(value) def adapt_timefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value if isinstance(value, str): return datetime.datetime.strptime(value, '%H:%M:%S') # Oracle doesn't support tz-aware times if timezone.is_aware(value): raise ValueError("Oracle backend does not support timezone-aware times.") return Oracle_datetime(1900, 1, 1, value.hour, value.minute, value.second, value.microsecond) def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None): return value def combine_expression(self, connector, sub_expressions): lhs, rhs = sub_expressions if connector == '%%': return 'MOD(%s)' % ','.join(sub_expressions) elif connector == '&': return 'BITAND(%s)' % ','.join(sub_expressions) elif connector == '|': return 'BITAND(-%(lhs)s-1,%(rhs)s)+%(lhs)s' % {'lhs': lhs, 'rhs': rhs} elif connector == '<<': return '(%(lhs)s * POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs} elif connector == '>>': return 'FLOOR(%(lhs)s / POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs} elif connector == '^': return 'POWER(%s)' % ','.join(sub_expressions) elif connector == '#': raise NotSupportedError('Bitwise XOR is not supported in Oracle.') return super().combine_expression(connector, sub_expressions) def _get_no_autofield_sequence_name(self, table): """ Manually created sequence name to keep backward compatibility for AutoFields that aren't Oracle identity columns. """ name_length = self.max_name_length() - 3 return '%s_SQ' % truncate_name(strip_quotes(table), name_length).upper() def _get_sequence_name(self, cursor, table, pk_name): cursor.execute(""" SELECT sequence_name FROM user_tab_identity_cols WHERE table_name = UPPER(%s) AND column_name = UPPER(%s)""", [table, pk_name]) row = cursor.fetchone() return self._get_no_autofield_sequence_name(table) if row is None else row[0] def bulk_insert_sql(self, fields, placeholder_rows): query = [] for row in placeholder_rows: select = [] for i, placeholder in enumerate(row): # A model without any fields has fields=[None]. if fields[i]: internal_type = getattr(fields[i], 'target_field', fields[i]).get_internal_type() placeholder = BulkInsertMapper.types.get(internal_type, '%s') % placeholder # Add columns aliases to the first select to avoid "ORA-00918: # column ambiguously defined" when two or more columns in the # first select have the same value. if not query: placeholder = '%s col_%s' % (placeholder, i) select.append(placeholder) query.append('SELECT %s FROM DUAL' % ', '.join(select)) # Bulk insert to tables with Oracle identity columns causes Oracle to # add sequence.nextval to it. Sequence.nextval cannot be used with the # UNION operator. To prevent incorrect SQL, move UNION to a subquery. return 'SELECT * FROM (%s)' % ' UNION ALL '.join(query) def subtract_temporals(self, internal_type, lhs, rhs): if internal_type == 'DateField': lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs params = (*lhs_params, *rhs_params) return "NUMTODSINTERVAL(TO_NUMBER(%s - %s), 'DAY')" % (lhs_sql, rhs_sql), params return super().subtract_temporals(internal_type, lhs, rhs) def bulk_batch_size(self, fields, objs): """Oracle restricts the number of parameters in a query.""" if fields: return self.connection.features.max_query_params // len(fields) return len(objs) def conditional_expression_supported_in_where_clause(self, expression): """ Oracle supports only EXISTS(...) or filters in the WHERE clause, others must be compared with True. """ if isinstance(expression, (Exists, Lookup, WhereNode)): return True if isinstance(expression, ExpressionWrapper) and expression.conditional: return self.conditional_expression_supported_in_where_clause(expression.expression) if isinstance(expression, RawSQL) and expression.conditional: return True return False
bf82e497851f90b53d05f86105237afceeaf7faa6aa1cb12caff8de070d45d3b
from django.db import ProgrammingError from django.utils.functional import cached_property class BaseDatabaseFeatures: gis_enabled = False # Oracle can't group by LOB (large object) data types. allows_group_by_lob = True allows_group_by_pk = False allows_group_by_selected_pks = False empty_fetchmany_value = [] update_can_self_select = True # Does the backend distinguish between '' and None? interprets_empty_strings_as_nulls = False # Does the backend allow inserting duplicate NULL rows in a nullable # unique field? All core backends implement this correctly, but other # databases such as SQL Server do not. supports_nullable_unique_constraints = True # Does the backend allow inserting duplicate rows when a unique_together # constraint exists and some fields are nullable but not all of them? supports_partially_nullable_unique_constraints = True # Does the backend support initially deferrable unique constraints? supports_deferrable_unique_constraints = False can_use_chunked_reads = True can_return_columns_from_insert = False can_return_rows_from_bulk_insert = False has_bulk_insert = True uses_savepoints = True can_release_savepoints = False # If True, don't use integer foreign keys referring to, e.g., positive # integer primary keys. related_fields_match_type = False allow_sliced_subqueries_with_in = True has_select_for_update = False has_select_for_update_nowait = False has_select_for_update_skip_locked = False has_select_for_update_of = False has_select_for_no_key_update = False # Does the database's SELECT FOR UPDATE OF syntax require a column rather # than a table? select_for_update_of_column = False # Does the default test database allow multiple connections? # Usually an indication that the test database is in-memory test_db_allows_multiple_connections = True # Can an object be saved without an explicit primary key? supports_unspecified_pk = False # Can a fixture contain forward references? i.e., are # FK constraints checked at the end of transaction, or # at the end of each save operation? supports_forward_references = True # Does the backend truncate names properly when they are too long? truncates_names = False # Is there a REAL datatype in addition to floats/doubles? has_real_datatype = False supports_subqueries_in_group_by = True # Does the backend ignore unnecessary ORDER BY clauses in subqueries? ignores_unnecessary_order_by_in_subqueries = True # Is there a true datatype for uuid? has_native_uuid_field = False # Is there a true datatype for timedeltas? has_native_duration_field = False # Does the database driver supports same type temporal data subtraction # by returning the type used to store duration field? supports_temporal_subtraction = False # Does the __regex lookup support backreferencing and grouping? supports_regex_backreferencing = True # Can date/datetime lookups be performed using a string? supports_date_lookup_using_string = True # Can datetimes with timezones be used? supports_timezones = True # Does the database have a copy of the zoneinfo database? has_zoneinfo_database = True # When performing a GROUP BY, is an ORDER BY NULL required # to remove any ordering? requires_explicit_null_ordering_when_grouping = False # Does the backend order NULL values as largest or smallest? nulls_order_largest = False # Does the backend support NULLS FIRST and NULLS LAST in ORDER BY? supports_order_by_nulls_modifier = True # Does the backend orders NULLS FIRST by default? order_by_nulls_first = False # The database's limit on the number of query parameters. max_query_params = None # Can an object have an autoincrement primary key of 0? allows_auto_pk_0 = True # Do we need to NULL a ForeignKey out, or can the constraint check be # deferred can_defer_constraint_checks = False # date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas supports_mixed_date_datetime_comparisons = True # Does the backend support tablespaces? Default to False because it isn't # in the SQL standard. supports_tablespaces = False # Does the backend reset sequences between tests? supports_sequence_reset = True # Can the backend introspect the default value of a column? can_introspect_default = True # Confirm support for introspected foreign keys # Every database can do this reliably, except MySQL, # which can't do it for MyISAM tables can_introspect_foreign_keys = True # Map fields which some backends may not be able to differentiate to the # field it's introspected as. introspected_field_types = { 'AutoField': 'AutoField', 'BigAutoField': 'BigAutoField', 'BigIntegerField': 'BigIntegerField', 'BinaryField': 'BinaryField', 'BooleanField': 'BooleanField', 'CharField': 'CharField', 'DurationField': 'DurationField', 'GenericIPAddressField': 'GenericIPAddressField', 'IntegerField': 'IntegerField', 'PositiveBigIntegerField': 'PositiveBigIntegerField', 'PositiveIntegerField': 'PositiveIntegerField', 'PositiveSmallIntegerField': 'PositiveSmallIntegerField', 'SmallAutoField': 'SmallAutoField', 'SmallIntegerField': 'SmallIntegerField', 'TimeField': 'TimeField', } # Can the backend introspect the column order (ASC/DESC) for indexes? supports_index_column_ordering = True # Does the backend support introspection of materialized views? can_introspect_materialized_views = False # Support for the DISTINCT ON clause can_distinct_on_fields = False # Does the backend prevent running SQL queries in broken transactions? atomic_transactions = True # Can we roll back DDL in a transaction? can_rollback_ddl = False # Does it support operations requiring references rename in a transaction? supports_atomic_references_rename = True # Can we issue more than one ALTER COLUMN clause in an ALTER TABLE? supports_combined_alters = False # Does it support foreign keys? supports_foreign_keys = True # Can it create foreign key constraints inline when adding columns? can_create_inline_fk = True # Does it automatically index foreign keys? indexes_foreign_keys = True # Does it support CHECK constraints? supports_column_check_constraints = True supports_table_check_constraints = True # Does the backend support introspection of CHECK constraints? can_introspect_check_constraints = True # Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value}) # parameter passing? Note this can be provided by the backend even if not # supported by the Python driver supports_paramstyle_pyformat = True # Does the backend require literal defaults, rather than parameterized ones? requires_literal_defaults = False # Does the backend require a connection reset after each material schema change? connection_persists_old_columns = False # What kind of error does the backend throw when accessing closed cursor? closed_cursor_error_class = ProgrammingError # Does 'a' LIKE 'A' match? has_case_insensitive_like = True # Suffix for backends that don't support "SELECT xxx;" queries. bare_select_suffix = '' # If NULL is implied on columns without needing to be explicitly specified implied_column_null = False # Does the backend support "select for update" queries with limit (and offset)? supports_select_for_update_with_limit = True # Does the backend ignore null expressions in GREATEST and LEAST queries unless # every expression is null? greatest_least_ignores_nulls = False # Can the backend clone databases for parallel test execution? # Defaults to False to allow third-party backends to opt-in. can_clone_databases = False # Does the backend consider table names with different casing to # be equal? ignores_table_name_case = False # Place FOR UPDATE right after FROM clause. Used on MSSQL. for_update_after_from = False # Combinatorial flags supports_select_union = True supports_select_intersection = True supports_select_difference = True supports_slicing_ordering_in_compound = False supports_parentheses_in_compound = True # Does the database support SQL 2003 FILTER (WHERE ...) in aggregate # expressions? supports_aggregate_filter_clause = False # Does the backend support indexing a TextField? supports_index_on_text_field = True # Does the backend support window expressions (expression OVER (...))? supports_over_clause = False supports_frame_range_fixed_distance = False only_supports_unbounded_with_preceding_and_following = False # Does the backend support CAST with precision? supports_cast_with_precision = True # How many second decimals does the database return when casting a value to # a type with time? time_cast_precision = 6 # SQL to create a procedure for use by the Django test suite. The # functionality of the procedure isn't important. create_test_procedure_without_params_sql = None create_test_procedure_with_int_param_sql = None # Does the backend support keyword parameters for cursor.callproc()? supports_callproc_kwargs = False # What formats does the backend EXPLAIN syntax support? supported_explain_formats = set() # Does DatabaseOperations.explain_query_prefix() raise ValueError if # unknown kwargs are passed to QuerySet.explain()? validates_explain_options = True # Does the backend support the default parameter in lead() and lag()? supports_default_in_lead_lag = True # Does the backend support ignoring constraint or uniqueness errors during # INSERT? supports_ignore_conflicts = True # Does this backend require casting the results of CASE expressions used # in UPDATE statements to ensure the expression has the correct type? requires_casted_case_in_updates = False # Does the backend support partial indexes (CREATE INDEX ... WHERE ...)? supports_partial_indexes = True supports_functions_in_partial_indexes = True # Does the backend support covering indexes (CREATE INDEX ... INCLUDE ...)? supports_covering_indexes = False # Does the backend support indexes on expressions? supports_expression_indexes = True # Does the backend treat COLLATE as an indexed expression? collate_as_index_expression = False # Does the database allow more than one constraint or index on the same # field(s)? allows_multiple_constraints_on_same_fields = True # Does the backend support boolean expressions in SELECT and GROUP BY # clauses? supports_boolean_expr_in_select_clause = True # Does the backend support JSONField? supports_json_field = True # Can the backend introspect a JSONField? can_introspect_json_field = True # Does the backend support primitives in JSONField? supports_primitives_in_json_field = True # Is there a true datatype for JSON? has_native_json_field = False # Does the backend use PostgreSQL-style JSON operators like '->'? has_json_operators = False # Does the backend support __contains and __contained_by lookups for # a JSONField? supports_json_field_contains = True # Does value__d__contains={'f': 'g'} (without a list around the dict) match # {'d': [{'f': 'g'}]}? json_key_contains_list_matching_requires_list = False # Does the backend support JSONObject() database function? has_json_object_function = True # Does the backend support column collations? supports_collation_on_charfield = True supports_collation_on_textfield = True # Does the backend support non-deterministic collations? supports_non_deterministic_collations = True # Collation names for use by the Django test suite. test_collations = { 'ci': None, # Case-insensitive. 'cs': None, # Case-sensitive. 'non_default': None, # Non-default. 'swedish_ci': None # Swedish case-insensitive. } # A set of dotted paths to tests in Django's test suite that are expected # to fail on this database. django_test_expected_failures = set() # A map of reasons to sets of dotted paths to tests in Django's test suite # that should be skipped for this database. django_test_skips = {} def __init__(self, connection): self.connection = connection @cached_property def supports_explaining_query_execution(self): """Does this backend support explaining query execution?""" return self.connection.ops.explain_prefix is not None @cached_property def supports_transactions(self): """Confirm support for transactions.""" with self.connection.cursor() as cursor: cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)') self.connection.set_autocommit(False) cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)') self.connection.rollback() self.connection.set_autocommit(True) cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST') count, = cursor.fetchone() cursor.execute('DROP TABLE ROLLBACK_TEST') return count == 0 def allows_group_by_selected_pks_on_model(self, model): if not self.allows_group_by_selected_pks: return False return model._meta.managed
ef0914cd4f3a302d9043e473b244fb716aa2b95efb43da6fdb4ecb83c95318d2
import logging from datetime import datetime from django.db.backends.ddl_references import ( Columns, Expressions, ForeignKeyName, IndexName, Statement, Table, ) from django.db.backends.utils import names_digest, split_identifier from django.db.models import Deferrable, Index from django.db.models.sql import Query from django.db.transaction import TransactionManagementError, atomic from django.utils import timezone logger = logging.getLogger('django.db.backends.schema') def _is_relevant_relation(relation, altered_field): """ When altering the given field, must constraints on its model from the given relation be temporarily dropped? """ field = relation.field if field.many_to_many: # M2M reverse field return False if altered_field.primary_key and field.to_fields == [None]: # Foreign key constraint on the primary key, which is being altered. return True # Is the constraint targeting the field being altered? return altered_field.name in field.to_fields def _all_related_fields(model): return model._meta._get_fields(forward=False, reverse=True, include_hidden=True) def _related_non_m2m_objects(old_field, new_field): # Filter out m2m objects from reverse relations. # Return (old_relation, new_relation) tuples. related_fields = zip( (obj for obj in _all_related_fields(old_field.model) if _is_relevant_relation(obj, old_field)), (obj for obj in _all_related_fields(new_field.model) if _is_relevant_relation(obj, new_field)), ) for old_rel, new_rel in related_fields: yield old_rel, new_rel yield from _related_non_m2m_objects( old_rel.remote_field, new_rel.remote_field, ) class BaseDatabaseSchemaEditor: """ This class and its subclasses are responsible for emitting schema-changing statements to the databases - model creation/removal/alteration, field renaming, index fiddling, and so on. """ # Overrideable SQL templates sql_create_table = "CREATE TABLE %(table)s (%(definition)s)" sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s" sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s" sql_delete_table = "DROP TABLE %(table)s CASCADE" sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s" sql_alter_column = "ALTER TABLE %(table)s %(changes)s" sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s" sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL" sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL" sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s" sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT" sql_alter_column_no_default_null = sql_alter_column_no_default sql_alter_column_collate = "ALTER COLUMN %(column)s TYPE %(type)s%(collation)s" sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE" sql_rename_column = "ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s" sql_update_with_default = "UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL" sql_unique_constraint = "UNIQUE (%(columns)s)%(deferrable)s" sql_check_constraint = "CHECK (%(check)s)" sql_delete_constraint = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s" sql_constraint = "CONSTRAINT %(name)s %(constraint)s" sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)" sql_delete_check = sql_delete_constraint sql_create_unique = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s UNIQUE (%(columns)s)%(deferrable)s" sql_delete_unique = sql_delete_constraint sql_create_fk = ( "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) " "REFERENCES %(to_table)s (%(to_column)s)%(deferrable)s" ) sql_create_inline_fk = None sql_create_column_inline_fk = None sql_delete_fk = sql_delete_constraint sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(include)s%(extra)s%(condition)s" sql_create_unique_index = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)%(include)s%(condition)s" sql_delete_index = "DROP INDEX %(name)s" sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)" sql_delete_pk = sql_delete_constraint sql_delete_procedure = 'DROP PROCEDURE %(procedure)s' def __init__(self, connection, collect_sql=False, atomic=True): self.connection = connection self.collect_sql = collect_sql if self.collect_sql: self.collected_sql = [] self.atomic_migration = self.connection.features.can_rollback_ddl and atomic # State-managing methods def __enter__(self): self.deferred_sql = [] if self.atomic_migration: self.atomic = atomic(self.connection.alias) self.atomic.__enter__() return self def __exit__(self, exc_type, exc_value, traceback): if exc_type is None: for sql in self.deferred_sql: self.execute(sql) if self.atomic_migration: self.atomic.__exit__(exc_type, exc_value, traceback) # Core utility functions def execute(self, sql, params=()): """Execute the given SQL statement, with optional parameters.""" # Don't perform the transactional DDL check if SQL is being collected # as it's not going to be executed anyway. if not self.collect_sql and self.connection.in_atomic_block and not self.connection.features.can_rollback_ddl: raise TransactionManagementError( "Executing DDL statements while in a transaction on databases " "that can't perform a rollback is prohibited." ) # Account for non-string statement objects. sql = str(sql) # Log the command we're running, then run it logger.debug("%s; (params %r)", sql, params, extra={'params': params, 'sql': sql}) if self.collect_sql: ending = "" if sql.rstrip().endswith(";") else ";" if params is not None: self.collected_sql.append((sql % tuple(map(self.quote_value, params))) + ending) else: self.collected_sql.append(sql + ending) else: with self.connection.cursor() as cursor: cursor.execute(sql, params) def quote_name(self, name): return self.connection.ops.quote_name(name) def table_sql(self, model): """Take a model and return its table definition.""" # Add any unique_togethers (always deferred, as some fields might be # created afterward, like geometry fields with some backends). for field_names in model._meta.unique_together: fields = [model._meta.get_field(field) for field in field_names] self.deferred_sql.append(self._create_unique_sql(model, fields)) # Create column SQL, add FK deferreds if needed. column_sqls = [] params = [] for field in model._meta.local_fields: # SQL. definition, extra_params = self.column_sql(model, field) if definition is None: continue # Check constraints can go on the column SQL here. db_params = field.db_parameters(connection=self.connection) if db_params['check']: definition += ' ' + self.sql_check_constraint % db_params # Autoincrement SQL (for backends with inline variant). col_type_suffix = field.db_type_suffix(connection=self.connection) if col_type_suffix: definition += ' %s' % col_type_suffix params.extend(extra_params) # FK. if field.remote_field and field.db_constraint: to_table = field.remote_field.model._meta.db_table to_column = field.remote_field.model._meta.get_field(field.remote_field.field_name).column if self.sql_create_inline_fk: definition += ' ' + self.sql_create_inline_fk % { 'to_table': self.quote_name(to_table), 'to_column': self.quote_name(to_column), } elif self.connection.features.supports_foreign_keys: self.deferred_sql.append(self._create_fk_sql(model, field, '_fk_%(to_table)s_%(to_column)s')) # Add the SQL to our big list. column_sqls.append('%s %s' % ( self.quote_name(field.column), definition, )) # Autoincrement SQL (for backends with post table definition # variant). if field.get_internal_type() in ('AutoField', 'BigAutoField', 'SmallAutoField'): autoinc_sql = self.connection.ops.autoinc_sql(model._meta.db_table, field.column) if autoinc_sql: self.deferred_sql.extend(autoinc_sql) constraints = [constraint.constraint_sql(model, self) for constraint in model._meta.constraints] sql = self.sql_create_table % { 'table': self.quote_name(model._meta.db_table), 'definition': ', '.join(constraint for constraint in (*column_sqls, *constraints) if constraint), } if model._meta.db_tablespace: tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace) if tablespace_sql: sql += ' ' + tablespace_sql return sql, params # Field <-> database mapping functions def _iter_column_sql(self, column_db_type, params, model, field, include_default): yield column_db_type collation = getattr(field, 'db_collation', None) if collation: yield self._collate_sql(collation) # Work out nullability. null = field.null # Include a default value, if requested. include_default = ( include_default and not self.skip_default(field) and # Don't include a default value if it's a nullable field and the # default cannot be dropped in the ALTER COLUMN statement (e.g. # MySQL longtext and longblob). not (null and self.skip_default_on_alter(field)) ) if include_default: default_value = self.effective_default(field) if default_value is not None: column_default = 'DEFAULT ' + self._column_default_sql(field) if self.connection.features.requires_literal_defaults: # Some databases can't take defaults as a parameter (Oracle). # If this is the case, the individual schema backend should # implement prepare_default(). yield column_default % self.prepare_default(default_value) else: yield column_default params.append(default_value) # Oracle treats the empty string ('') as null, so coerce the null # option whenever '' is a possible value. if (field.empty_strings_allowed and not field.primary_key and self.connection.features.interprets_empty_strings_as_nulls): null = True if not null: yield 'NOT NULL' elif not self.connection.features.implied_column_null: yield 'NULL' if field.primary_key: yield 'PRIMARY KEY' elif field.unique: yield 'UNIQUE' # Optionally add the tablespace if it's an implicitly indexed column. tablespace = field.db_tablespace or model._meta.db_tablespace if tablespace and self.connection.features.supports_tablespaces and field.unique: yield self.connection.ops.tablespace_sql(tablespace, inline=True) def column_sql(self, model, field, include_default=False): """ Return the column definition for a field. The field must already have had set_attributes_from_name() called. """ # Get the column's type and use that as the basis of the SQL. db_params = field.db_parameters(connection=self.connection) column_db_type = db_params['type'] # Check for fields that aren't actually columns (e.g. M2M). if column_db_type is None: return None, None params = [] return ' '.join( # This appends to the params being returned. self._iter_column_sql(column_db_type, params, model, field, include_default) ), params def skip_default(self, field): """ Some backends don't accept default values for certain columns types (i.e. MySQL longtext and longblob). """ return False def skip_default_on_alter(self, field): """ Some backends don't accept default values for certain columns types (i.e. MySQL longtext and longblob) in the ALTER COLUMN statement. """ return False def prepare_default(self, value): """ Only used for backends which have requires_literal_defaults feature """ raise NotImplementedError( 'subclasses of BaseDatabaseSchemaEditor for backends which have ' 'requires_literal_defaults must provide a prepare_default() method' ) def _column_default_sql(self, field): """ Return the SQL to use in a DEFAULT clause. The resulting string should contain a '%s' placeholder for a default value. """ return '%s' @staticmethod def _effective_default(field): # This method allows testing its logic without a connection. if field.has_default(): default = field.get_default() elif not field.null and field.blank and field.empty_strings_allowed: if field.get_internal_type() == "BinaryField": default = b'' else: default = '' elif getattr(field, 'auto_now', False) or getattr(field, 'auto_now_add', False): internal_type = field.get_internal_type() if internal_type == 'DateTimeField': default = timezone.now() else: default = datetime.now() if internal_type == 'DateField': default = default.date() elif internal_type == 'TimeField': default = default.time() else: default = None return default def effective_default(self, field): """Return a field's effective database default value.""" return field.get_db_prep_save(self._effective_default(field), self.connection) def quote_value(self, value): """ Return a quoted version of the value so it's safe to use in an SQL string. This is not safe against injection from user code; it is intended only for use in making SQL scripts or preparing default values for particularly tricky backends (defaults are not user-defined, though, so this is safe). """ raise NotImplementedError() # Actions def create_model(self, model): """ Create a table and any accompanying indexes or unique constraints for the given `model`. """ sql, params = self.table_sql(model) # Prevent using [] as params, in the case a literal '%' is used in the definition self.execute(sql, params or None) # Add any field index and index_together's (deferred as SQLite _remake_table needs it) self.deferred_sql.extend(self._model_indexes_sql(model)) # Make M2M tables for field in model._meta.local_many_to_many: if field.remote_field.through._meta.auto_created: self.create_model(field.remote_field.through) def delete_model(self, model): """Delete a model from the database.""" # Handle auto-created intermediary models for field in model._meta.local_many_to_many: if field.remote_field.through._meta.auto_created: self.delete_model(field.remote_field.through) # Delete the table self.execute(self.sql_delete_table % { "table": self.quote_name(model._meta.db_table), }) # Remove all deferred statements referencing the deleted table. for sql in list(self.deferred_sql): if isinstance(sql, Statement) and sql.references_table(model._meta.db_table): self.deferred_sql.remove(sql) def add_index(self, model, index): """Add an index on a model.""" if ( index.contains_expressions and not self.connection.features.supports_expression_indexes ): return None # Index.create_sql returns interpolated SQL which makes params=None a # necessity to avoid escaping attempts on execution. self.execute(index.create_sql(model, self), params=None) def remove_index(self, model, index): """Remove an index from a model.""" if ( index.contains_expressions and not self.connection.features.supports_expression_indexes ): return None self.execute(index.remove_sql(model, self)) def add_constraint(self, model, constraint): """Add a constraint to a model.""" sql = constraint.create_sql(model, self) if sql: # Constraint.create_sql returns interpolated SQL which makes # params=None a necessity to avoid escaping attempts on execution. self.execute(sql, params=None) def remove_constraint(self, model, constraint): """Remove a constraint from a model.""" sql = constraint.remove_sql(model, self) if sql: self.execute(sql) def alter_unique_together(self, model, old_unique_together, new_unique_together): """ Deal with a model changing its unique_together. The input unique_togethers must be doubly-nested, not the single-nested ["foo", "bar"] format. """ olds = {tuple(fields) for fields in old_unique_together} news = {tuple(fields) for fields in new_unique_together} # Deleted uniques for fields in olds.difference(news): self._delete_composed_index(model, fields, {'unique': True}, self.sql_delete_unique) # Created uniques for field_names in news.difference(olds): fields = [model._meta.get_field(field) for field in field_names] self.execute(self._create_unique_sql(model, fields)) def alter_index_together(self, model, old_index_together, new_index_together): """ Deal with a model changing its index_together. The input index_togethers must be doubly-nested, not the single-nested ["foo", "bar"] format. """ olds = {tuple(fields) for fields in old_index_together} news = {tuple(fields) for fields in new_index_together} # Deleted indexes for fields in olds.difference(news): self._delete_composed_index( model, fields, {'index': True, 'unique': False}, self.sql_delete_index, ) # Created indexes for field_names in news.difference(olds): fields = [model._meta.get_field(field) for field in field_names] self.execute(self._create_index_sql(model, fields=fields, suffix='_idx')) def _delete_composed_index(self, model, fields, constraint_kwargs, sql): meta_constraint_names = {constraint.name for constraint in model._meta.constraints} meta_index_names = {constraint.name for constraint in model._meta.indexes} columns = [model._meta.get_field(field).column for field in fields] constraint_names = self._constraint_names( model, columns, exclude=meta_constraint_names | meta_index_names, **constraint_kwargs ) if len(constraint_names) != 1: raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % ( len(constraint_names), model._meta.db_table, ", ".join(columns), )) self.execute(self._delete_constraint_sql(sql, model, constraint_names[0])) def alter_db_table(self, model, old_db_table, new_db_table): """Rename the table a model points to.""" if (old_db_table == new_db_table or (self.connection.features.ignores_table_name_case and old_db_table.lower() == new_db_table.lower())): return self.execute(self.sql_rename_table % { "old_table": self.quote_name(old_db_table), "new_table": self.quote_name(new_db_table), }) # Rename all references to the old table name. for sql in self.deferred_sql: if isinstance(sql, Statement): sql.rename_table_references(old_db_table, new_db_table) def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace): """Move a model's table between tablespaces.""" self.execute(self.sql_retablespace_table % { "table": self.quote_name(model._meta.db_table), "old_tablespace": self.quote_name(old_db_tablespace), "new_tablespace": self.quote_name(new_db_tablespace), }) def add_field(self, model, field): """ Create a field on a model. Usually involves adding a column, but may involve adding a table instead (for M2M fields). """ # Special-case implicit M2M tables if field.many_to_many and field.remote_field.through._meta.auto_created: return self.create_model(field.remote_field.through) # Get the column's definition definition, params = self.column_sql(model, field, include_default=True) # It might not actually have a column behind it if definition is None: return # Check constraints can go on the column SQL here db_params = field.db_parameters(connection=self.connection) if db_params['check']: definition += " " + self.sql_check_constraint % db_params if field.remote_field and self.connection.features.supports_foreign_keys and field.db_constraint: constraint_suffix = '_fk_%(to_table)s_%(to_column)s' # Add FK constraint inline, if supported. if self.sql_create_column_inline_fk: to_table = field.remote_field.model._meta.db_table to_column = field.remote_field.model._meta.get_field(field.remote_field.field_name).column namespace, _ = split_identifier(model._meta.db_table) definition += " " + self.sql_create_column_inline_fk % { 'name': self._fk_constraint_name(model, field, constraint_suffix), 'namespace': '%s.' % self.quote_name(namespace) if namespace else '', 'column': self.quote_name(field.column), 'to_table': self.quote_name(to_table), 'to_column': self.quote_name(to_column), 'deferrable': self.connection.ops.deferrable_sql() } # Otherwise, add FK constraints later. else: self.deferred_sql.append(self._create_fk_sql(model, field, constraint_suffix)) # Build the SQL and run it sql = self.sql_create_column % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(field.column), "definition": definition, } self.execute(sql, params) # Drop the default if we need to # (Django usually does not use in-database defaults) if not self.skip_default_on_alter(field) and self.effective_default(field) is not None: changes_sql, params = self._alter_column_default_sql(model, None, field, drop=True) sql = self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": changes_sql, } self.execute(sql, params) # Add an index, if required self.deferred_sql.extend(self._field_indexes_sql(model, field)) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() def remove_field(self, model, field): """ Remove a field from a model. Usually involves deleting a column, but for M2Ms may involve deleting a table. """ # Special-case implicit M2M tables if field.many_to_many and field.remote_field.through._meta.auto_created: return self.delete_model(field.remote_field.through) # It might not actually have a column behind it if field.db_parameters(connection=self.connection)['type'] is None: return # Drop any FK constraints, MySQL requires explicit deletion if field.remote_field: fk_names = self._constraint_names(model, [field.column], foreign_key=True) for fk_name in fk_names: self.execute(self._delete_fk_sql(model, fk_name)) # Delete the column sql = self.sql_delete_column % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(field.column), } self.execute(sql) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() # Remove all deferred statements referencing the deleted column. for sql in list(self.deferred_sql): if isinstance(sql, Statement) and sql.references_column(model._meta.db_table, field.column): self.deferred_sql.remove(sql) def alter_field(self, model, old_field, new_field, strict=False): """ Allow a field's type, uniqueness, nullability, default, column, constraints, etc. to be modified. `old_field` is required to compute the necessary changes. If `strict` is True, raise errors if the old column does not match `old_field` precisely. """ if not self._field_should_be_altered(old_field, new_field): return # Ensure this field is even column-based old_db_params = old_field.db_parameters(connection=self.connection) old_type = old_db_params['type'] new_db_params = new_field.db_parameters(connection=self.connection) new_type = new_db_params['type'] if ((old_type is None and old_field.remote_field is None) or (new_type is None and new_field.remote_field is None)): raise ValueError( "Cannot alter field %s into %s - they do not properly define " "db_type (are you using a badly-written custom field?)" % (old_field, new_field), ) elif old_type is None and new_type is None and ( old_field.remote_field.through and new_field.remote_field.through and old_field.remote_field.through._meta.auto_created and new_field.remote_field.through._meta.auto_created): return self._alter_many_to_many(model, old_field, new_field, strict) elif old_type is None and new_type is None and ( old_field.remote_field.through and new_field.remote_field.through and not old_field.remote_field.through._meta.auto_created and not new_field.remote_field.through._meta.auto_created): # Both sides have through models; this is a no-op. return elif old_type is None or new_type is None: raise ValueError( "Cannot alter field %s into %s - they are not compatible types " "(you cannot alter to or from M2M fields, or add or remove " "through= on M2M fields)" % (old_field, new_field) ) self._alter_field(model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict) def _alter_field(self, model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict=False): """Perform a "physical" (non-ManyToMany) field update.""" # Drop any FK constraints, we'll remake them later fks_dropped = set() if ( self.connection.features.supports_foreign_keys and old_field.remote_field and old_field.db_constraint ): fk_names = self._constraint_names(model, [old_field.column], foreign_key=True) if strict and len(fk_names) != 1: raise ValueError("Found wrong number (%s) of foreign key constraints for %s.%s" % ( len(fk_names), model._meta.db_table, old_field.column, )) for fk_name in fk_names: fks_dropped.add((old_field.column,)) self.execute(self._delete_fk_sql(model, fk_name)) # Has unique been removed? if old_field.unique and (not new_field.unique or self._field_became_primary_key(old_field, new_field)): # Find the unique constraint for this field meta_constraint_names = {constraint.name for constraint in model._meta.constraints} constraint_names = self._constraint_names( model, [old_field.column], unique=True, primary_key=False, exclude=meta_constraint_names, ) if strict and len(constraint_names) != 1: raise ValueError("Found wrong number (%s) of unique constraints for %s.%s" % ( len(constraint_names), model._meta.db_table, old_field.column, )) for constraint_name in constraint_names: self.execute(self._delete_unique_sql(model, constraint_name)) # Drop incoming FK constraints if the field is a primary key or unique, # which might be a to_field target, and things are going to change. drop_foreign_keys = ( self.connection.features.supports_foreign_keys and ( (old_field.primary_key and new_field.primary_key) or (old_field.unique and new_field.unique) ) and old_type != new_type ) if drop_foreign_keys: # '_meta.related_field' also contains M2M reverse fields, these # will be filtered out for _old_rel, new_rel in _related_non_m2m_objects(old_field, new_field): rel_fk_names = self._constraint_names( new_rel.related_model, [new_rel.field.column], foreign_key=True ) for fk_name in rel_fk_names: self.execute(self._delete_fk_sql(new_rel.related_model, fk_name)) # Removed an index? (no strict check, as multiple indexes are possible) # Remove indexes if db_index switched to False or a unique constraint # will now be used in lieu of an index. The following lines from the # truth table show all True cases; the rest are False: # # old_field.db_index | old_field.unique | new_field.db_index | new_field.unique # ------------------------------------------------------------------------------ # True | False | False | False # True | False | False | True # True | False | True | True if old_field.db_index and not old_field.unique and (not new_field.db_index or new_field.unique): # Find the index for this field meta_index_names = {index.name for index in model._meta.indexes} # Retrieve only BTREE indexes since this is what's created with # db_index=True. index_names = self._constraint_names( model, [old_field.column], index=True, type_=Index.suffix, exclude=meta_index_names, ) for index_name in index_names: # The only way to check if an index was created with # db_index=True or with Index(['field'], name='foo') # is to look at its name (refs #28053). self.execute(self._delete_index_sql(model, index_name)) # Change check constraints? if old_db_params['check'] != new_db_params['check'] and old_db_params['check']: meta_constraint_names = {constraint.name for constraint in model._meta.constraints} constraint_names = self._constraint_names( model, [old_field.column], check=True, exclude=meta_constraint_names, ) if strict and len(constraint_names) != 1: raise ValueError("Found wrong number (%s) of check constraints for %s.%s" % ( len(constraint_names), model._meta.db_table, old_field.column, )) for constraint_name in constraint_names: self.execute(self._delete_check_sql(model, constraint_name)) # Have they renamed the column? if old_field.column != new_field.column: self.execute(self._rename_field_sql(model._meta.db_table, old_field, new_field, new_type)) # Rename all references to the renamed column. for sql in self.deferred_sql: if isinstance(sql, Statement): sql.rename_column_references(model._meta.db_table, old_field.column, new_field.column) # Next, start accumulating actions to do actions = [] null_actions = [] post_actions = [] # Collation change? old_collation = getattr(old_field, 'db_collation', None) new_collation = getattr(new_field, 'db_collation', None) if old_collation != new_collation: # Collation change handles also a type change. fragment = self._alter_column_collation_sql(model, new_field, new_type, new_collation) actions.append(fragment) # Type change? elif old_type != new_type: fragment, other_actions = self._alter_column_type_sql(model, old_field, new_field, new_type) actions.append(fragment) post_actions.extend(other_actions) # When changing a column NULL constraint to NOT NULL with a given # default value, we need to perform 4 steps: # 1. Add a default for new incoming writes # 2. Update existing NULL rows with new default # 3. Replace NULL constraint with NOT NULL # 4. Drop the default again. # Default change? needs_database_default = False if old_field.null and not new_field.null: old_default = self.effective_default(old_field) new_default = self.effective_default(new_field) if ( not self.skip_default_on_alter(new_field) and old_default != new_default and new_default is not None ): needs_database_default = True actions.append(self._alter_column_default_sql(model, old_field, new_field)) # Nullability change? if old_field.null != new_field.null: fragment = self._alter_column_null_sql(model, old_field, new_field) if fragment: null_actions.append(fragment) # Only if we have a default and there is a change from NULL to NOT NULL four_way_default_alteration = ( new_field.has_default() and (old_field.null and not new_field.null) ) if actions or null_actions: if not four_way_default_alteration: # If we don't have to do a 4-way default alteration we can # directly run a (NOT) NULL alteration actions = actions + null_actions # Combine actions together if we can (e.g. postgres) if self.connection.features.supports_combined_alters and actions: sql, params = tuple(zip(*actions)) actions = [(", ".join(sql), sum(params, []))] # Apply those actions for sql, params in actions: self.execute( self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": sql, }, params, ) if four_way_default_alteration: # Update existing rows with default value self.execute( self.sql_update_with_default % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(new_field.column), "default": "%s", }, [new_default], ) # Since we didn't run a NOT NULL change before we need to do it # now for sql, params in null_actions: self.execute( self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": sql, }, params, ) if post_actions: for sql, params in post_actions: self.execute(sql, params) # If primary_key changed to False, delete the primary key constraint. if old_field.primary_key and not new_field.primary_key: self._delete_primary_key(model, strict) # Added a unique? if self._unique_should_be_added(old_field, new_field): self.execute(self._create_unique_sql(model, [new_field])) # Added an index? Add an index if db_index switched to True or a unique # constraint will no longer be used in lieu of an index. The following # lines from the truth table show all True cases; the rest are False: # # old_field.db_index | old_field.unique | new_field.db_index | new_field.unique # ------------------------------------------------------------------------------ # False | False | True | False # False | True | True | False # True | True | True | False if (not old_field.db_index or old_field.unique) and new_field.db_index and not new_field.unique: self.execute(self._create_index_sql(model, fields=[new_field])) # Type alteration on primary key? Then we need to alter the column # referring to us. rels_to_update = [] if drop_foreign_keys: rels_to_update.extend(_related_non_m2m_objects(old_field, new_field)) # Changed to become primary key? if self._field_became_primary_key(old_field, new_field): # Make the new one self.execute(self._create_primary_key_sql(model, new_field)) # Update all referencing columns rels_to_update.extend(_related_non_m2m_objects(old_field, new_field)) # Handle our type alters on the other end of rels from the PK stuff above for old_rel, new_rel in rels_to_update: rel_db_params = new_rel.field.db_parameters(connection=self.connection) rel_type = rel_db_params['type'] fragment, other_actions = self._alter_column_type_sql( new_rel.related_model, old_rel.field, new_rel.field, rel_type ) self.execute( self.sql_alter_column % { "table": self.quote_name(new_rel.related_model._meta.db_table), "changes": fragment[0], }, fragment[1], ) for sql, params in other_actions: self.execute(sql, params) # Does it have a foreign key? if (self.connection.features.supports_foreign_keys and new_field.remote_field and (fks_dropped or not old_field.remote_field or not old_field.db_constraint) and new_field.db_constraint): self.execute(self._create_fk_sql(model, new_field, "_fk_%(to_table)s_%(to_column)s")) # Rebuild FKs that pointed to us if we previously had to drop them if drop_foreign_keys: for _, rel in rels_to_update: if rel.field.db_constraint: self.execute(self._create_fk_sql(rel.related_model, rel.field, "_fk")) # Does it have check constraints we need to add? if old_db_params['check'] != new_db_params['check'] and new_db_params['check']: constraint_name = self._create_index_name(model._meta.db_table, [new_field.column], suffix='_check') self.execute(self._create_check_sql(model, constraint_name, new_db_params['check'])) # Drop the default if we need to # (Django usually does not use in-database defaults) if needs_database_default: changes_sql, params = self._alter_column_default_sql(model, old_field, new_field, drop=True) sql = self.sql_alter_column % { "table": self.quote_name(model._meta.db_table), "changes": changes_sql, } self.execute(sql, params) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() def _alter_column_null_sql(self, model, old_field, new_field): """ Hook to specialize column null alteration. Return a (sql, params) fragment to set a column to null or non-null as required by new_field, or None if no changes are required. """ if (self.connection.features.interprets_empty_strings_as_nulls and new_field.get_internal_type() in ("CharField", "TextField")): # The field is nullable in the database anyway, leave it alone. return else: new_db_params = new_field.db_parameters(connection=self.connection) sql = self.sql_alter_column_null if new_field.null else self.sql_alter_column_not_null return ( sql % { 'column': self.quote_name(new_field.column), 'type': new_db_params['type'], }, [], ) def _alter_column_default_sql(self, model, old_field, new_field, drop=False): """ Hook to specialize column default alteration. Return a (sql, params) fragment to add or drop (depending on the drop argument) a default to new_field's column. """ new_default = self.effective_default(new_field) default = self._column_default_sql(new_field) params = [new_default] if drop: params = [] elif self.connection.features.requires_literal_defaults: # Some databases (Oracle) can't take defaults as a parameter # If this is the case, the SchemaEditor for that database should # implement prepare_default(). default = self.prepare_default(new_default) params = [] new_db_params = new_field.db_parameters(connection=self.connection) if drop: if new_field.null: sql = self.sql_alter_column_no_default_null else: sql = self.sql_alter_column_no_default else: sql = self.sql_alter_column_default return ( sql % { 'column': self.quote_name(new_field.column), 'type': new_db_params['type'], 'default': default, }, params, ) def _alter_column_type_sql(self, model, old_field, new_field, new_type): """ Hook to specialize column type alteration for different backends, for cases when a creation type is different to an alteration type (e.g. SERIAL in PostgreSQL, PostGIS fields). Return a two-tuple of: an SQL fragment of (sql, params) to insert into an ALTER TABLE statement and a list of extra (sql, params) tuples to run once the field is altered. """ return ( ( self.sql_alter_column_type % { "column": self.quote_name(new_field.column), "type": new_type, }, [], ), [], ) def _alter_column_collation_sql(self, model, new_field, new_type, new_collation): return ( self.sql_alter_column_collate % { 'column': self.quote_name(new_field.column), 'type': new_type, 'collation': ' ' + self._collate_sql(new_collation) if new_collation else '', }, [], ) def _alter_many_to_many(self, model, old_field, new_field, strict): """Alter M2Ms to repoint their to= endpoints.""" # Rename the through table if old_field.remote_field.through._meta.db_table != new_field.remote_field.through._meta.db_table: self.alter_db_table(old_field.remote_field.through, old_field.remote_field.through._meta.db_table, new_field.remote_field.through._meta.db_table) # Repoint the FK to the other side self.alter_field( new_field.remote_field.through, # We need the field that points to the target model, so we can tell alter_field to change it - # this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model) old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()), new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()), ) self.alter_field( new_field.remote_field.through, # for self-referential models we need to alter field from the other end too old_field.remote_field.through._meta.get_field(old_field.m2m_field_name()), new_field.remote_field.through._meta.get_field(new_field.m2m_field_name()), ) def _create_index_name(self, table_name, column_names, suffix=""): """ Generate a unique name for an index/unique constraint. The name is divided into 3 parts: the table name, the column names, and a unique digest and suffix. """ _, table_name = split_identifier(table_name) hash_suffix_part = '%s%s' % (names_digest(table_name, *column_names, length=8), suffix) max_length = self.connection.ops.max_name_length() or 200 # If everything fits into max_length, use that name. index_name = '%s_%s_%s' % (table_name, '_'.join(column_names), hash_suffix_part) if len(index_name) <= max_length: return index_name # Shorten a long suffix. if len(hash_suffix_part) > max_length / 3: hash_suffix_part = hash_suffix_part[:max_length // 3] other_length = (max_length - len(hash_suffix_part)) // 2 - 1 index_name = '%s_%s_%s' % ( table_name[:other_length], '_'.join(column_names)[:other_length], hash_suffix_part, ) # Prepend D if needed to prevent the name from starting with an # underscore or a number (not permitted on Oracle). if index_name[0] == "_" or index_name[0].isdigit(): index_name = "D%s" % index_name[:-1] return index_name def _get_index_tablespace_sql(self, model, fields, db_tablespace=None): if db_tablespace is None: if len(fields) == 1 and fields[0].db_tablespace: db_tablespace = fields[0].db_tablespace elif model._meta.db_tablespace: db_tablespace = model._meta.db_tablespace if db_tablespace is not None: return ' ' + self.connection.ops.tablespace_sql(db_tablespace) return '' def _index_condition_sql(self, condition): if condition: return ' WHERE ' + condition return '' def _index_include_sql(self, model, columns): if not columns or not self.connection.features.supports_covering_indexes: return '' return Statement( ' INCLUDE (%(columns)s)', columns=Columns(model._meta.db_table, columns, self.quote_name), ) def _create_index_sql(self, model, *, fields=None, name=None, suffix='', using='', db_tablespace=None, col_suffixes=(), sql=None, opclasses=(), condition=None, include=None, expressions=None): """ Return the SQL statement to create the index for one or several fields or expressions. `sql` can be specified if the syntax differs from the standard (GIS indexes, ...). """ fields = fields or [] expressions = expressions or [] compiler = Query(model, alias_cols=False).get_compiler( connection=self.connection, ) tablespace_sql = self._get_index_tablespace_sql(model, fields, db_tablespace=db_tablespace) columns = [field.column for field in fields] sql_create_index = sql or self.sql_create_index table = model._meta.db_table def create_index_name(*args, **kwargs): nonlocal name if name is None: name = self._create_index_name(*args, **kwargs) return self.quote_name(name) return Statement( sql_create_index, table=Table(table, self.quote_name), name=IndexName(table, columns, suffix, create_index_name), using=using, columns=( self._index_columns(table, columns, col_suffixes, opclasses) if columns else Expressions(table, expressions, compiler, self.quote_value) ), extra=tablespace_sql, condition=self._index_condition_sql(condition), include=self._index_include_sql(model, include), ) def _delete_index_sql(self, model, name, sql=None): return Statement( sql or self.sql_delete_index, table=Table(model._meta.db_table, self.quote_name), name=self.quote_name(name), ) def _index_columns(self, table, columns, col_suffixes, opclasses): return Columns(table, columns, self.quote_name, col_suffixes=col_suffixes) def _model_indexes_sql(self, model): """ Return a list of all index SQL statements (field indexes, index_together, Meta.indexes) for the specified model. """ if not model._meta.managed or model._meta.proxy or model._meta.swapped: return [] output = [] for field in model._meta.local_fields: output.extend(self._field_indexes_sql(model, field)) for field_names in model._meta.index_together: fields = [model._meta.get_field(field) for field in field_names] output.append(self._create_index_sql(model, fields=fields, suffix='_idx')) for index in model._meta.indexes: if ( not index.contains_expressions or self.connection.features.supports_expression_indexes ): output.append(index.create_sql(model, self)) return output def _field_indexes_sql(self, model, field): """ Return a list of all index SQL statements for the specified field. """ output = [] if self._field_should_be_indexed(model, field): output.append(self._create_index_sql(model, fields=[field])) return output def _field_should_be_altered(self, old_field, new_field): _, old_path, old_args, old_kwargs = old_field.deconstruct() _, new_path, new_args, new_kwargs = new_field.deconstruct() # Don't alter when: # - changing only a field name # - changing an attribute that doesn't affect the schema # - adding only a db_column and the column name is not changed non_database_attrs = [ 'blank', 'db_column', 'editable', 'error_messages', 'help_text', 'limit_choices_to', # Database-level options are not supported, see #21961. 'on_delete', 'related_name', 'related_query_name', 'validators', 'verbose_name', ] for attr in non_database_attrs: old_kwargs.pop(attr, None) new_kwargs.pop(attr, None) return ( self.quote_name(old_field.column) != self.quote_name(new_field.column) or (old_path, old_args, old_kwargs) != (new_path, new_args, new_kwargs) ) def _field_should_be_indexed(self, model, field): return field.db_index and not field.unique def _field_became_primary_key(self, old_field, new_field): return not old_field.primary_key and new_field.primary_key def _unique_should_be_added(self, old_field, new_field): return (not old_field.unique and new_field.unique) or ( old_field.primary_key and not new_field.primary_key and new_field.unique ) def _rename_field_sql(self, table, old_field, new_field, new_type): return self.sql_rename_column % { "table": self.quote_name(table), "old_column": self.quote_name(old_field.column), "new_column": self.quote_name(new_field.column), "type": new_type, } def _create_fk_sql(self, model, field, suffix): table = Table(model._meta.db_table, self.quote_name) name = self._fk_constraint_name(model, field, suffix) column = Columns(model._meta.db_table, [field.column], self.quote_name) to_table = Table(field.target_field.model._meta.db_table, self.quote_name) to_column = Columns(field.target_field.model._meta.db_table, [field.target_field.column], self.quote_name) deferrable = self.connection.ops.deferrable_sql() return Statement( self.sql_create_fk, table=table, name=name, column=column, to_table=to_table, to_column=to_column, deferrable=deferrable, ) def _fk_constraint_name(self, model, field, suffix): def create_fk_name(*args, **kwargs): return self.quote_name(self._create_index_name(*args, **kwargs)) return ForeignKeyName( model._meta.db_table, [field.column], split_identifier(field.target_field.model._meta.db_table)[1], [field.target_field.column], suffix, create_fk_name, ) def _delete_fk_sql(self, model, name): return self._delete_constraint_sql(self.sql_delete_fk, model, name) def _deferrable_constraint_sql(self, deferrable): if deferrable is None: return '' if deferrable == Deferrable.DEFERRED: return ' DEFERRABLE INITIALLY DEFERRED' if deferrable == Deferrable.IMMEDIATE: return ' DEFERRABLE INITIALLY IMMEDIATE' def _unique_sql( self, model, fields, name, condition=None, deferrable=None, include=None, opclasses=None, expressions=None, ): if ( deferrable and not self.connection.features.supports_deferrable_unique_constraints ): return None if condition or include or opclasses or expressions: # Databases support conditional, covering, and functional unique # constraints via a unique index. sql = self._create_unique_sql( model, fields, name=name, condition=condition, include=include, opclasses=opclasses, expressions=expressions, ) if sql: self.deferred_sql.append(sql) return None constraint = self.sql_unique_constraint % { 'columns': ', '.join([self.quote_name(field.column) for field in fields]), 'deferrable': self._deferrable_constraint_sql(deferrable), } return self.sql_constraint % { 'name': self.quote_name(name), 'constraint': constraint, } def _create_unique_sql( self, model, fields, name=None, condition=None, deferrable=None, include=None, opclasses=None, expressions=None, ): if ( ( deferrable and not self.connection.features.supports_deferrable_unique_constraints ) or (condition and not self.connection.features.supports_partial_indexes) or (include and not self.connection.features.supports_covering_indexes) or (expressions and not self.connection.features.supports_expression_indexes) ): return None def create_unique_name(*args, **kwargs): return self.quote_name(self._create_index_name(*args, **kwargs)) compiler = Query(model, alias_cols=False).get_compiler(connection=self.connection) table = model._meta.db_table columns = [field.column for field in fields] if name is None: name = IndexName(table, columns, '_uniq', create_unique_name) else: name = self.quote_name(name) if condition or include or opclasses or expressions: sql = self.sql_create_unique_index else: sql = self.sql_create_unique if columns: columns = self._index_columns(table, columns, col_suffixes=(), opclasses=opclasses) else: columns = Expressions(table, expressions, compiler, self.quote_value) return Statement( sql, table=Table(table, self.quote_name), name=name, columns=columns, condition=self._index_condition_sql(condition), deferrable=self._deferrable_constraint_sql(deferrable), include=self._index_include_sql(model, include), ) def _delete_unique_sql( self, model, name, condition=None, deferrable=None, include=None, opclasses=None, expressions=None, ): if ( ( deferrable and not self.connection.features.supports_deferrable_unique_constraints ) or (condition and not self.connection.features.supports_partial_indexes) or (include and not self.connection.features.supports_covering_indexes) or (expressions and not self.connection.features.supports_expression_indexes) ): return None if condition or include or opclasses or expressions: sql = self.sql_delete_index else: sql = self.sql_delete_unique return self._delete_constraint_sql(sql, model, name) def _check_sql(self, name, check): return self.sql_constraint % { 'name': self.quote_name(name), 'constraint': self.sql_check_constraint % {'check': check}, } def _create_check_sql(self, model, name, check): return Statement( self.sql_create_check, table=Table(model._meta.db_table, self.quote_name), name=self.quote_name(name), check=check, ) def _delete_check_sql(self, model, name): return self._delete_constraint_sql(self.sql_delete_check, model, name) def _delete_constraint_sql(self, template, model, name): return Statement( template, table=Table(model._meta.db_table, self.quote_name), name=self.quote_name(name), ) def _constraint_names(self, model, column_names=None, unique=None, primary_key=None, index=None, foreign_key=None, check=None, type_=None, exclude=None): """Return all constraint names matching the columns and conditions.""" if column_names is not None: column_names = [ self.connection.introspection.identifier_converter(name) for name in column_names ] with self.connection.cursor() as cursor: constraints = self.connection.introspection.get_constraints(cursor, model._meta.db_table) result = [] for name, infodict in constraints.items(): if column_names is None or column_names == infodict['columns']: if unique is not None and infodict['unique'] != unique: continue if primary_key is not None and infodict['primary_key'] != primary_key: continue if index is not None and infodict['index'] != index: continue if check is not None and infodict['check'] != check: continue if foreign_key is not None and not infodict['foreign_key']: continue if type_ is not None and infodict['type'] != type_: continue if not exclude or name not in exclude: result.append(name) return result def _delete_primary_key(self, model, strict=False): constraint_names = self._constraint_names(model, primary_key=True) if strict and len(constraint_names) != 1: raise ValueError('Found wrong number (%s) of PK constraints for %s' % ( len(constraint_names), model._meta.db_table, )) for constraint_name in constraint_names: self.execute(self._delete_primary_key_sql(model, constraint_name)) def _create_primary_key_sql(self, model, field): return Statement( self.sql_create_pk, table=Table(model._meta.db_table, self.quote_name), name=self.quote_name( self._create_index_name(model._meta.db_table, [field.column], suffix="_pk") ), columns=Columns(model._meta.db_table, [field.column], self.quote_name), ) def _delete_primary_key_sql(self, model, name): return self._delete_constraint_sql(self.sql_delete_pk, model, name) def _collate_sql(self, collation): return 'COLLATE ' + self.quote_name(collation) def remove_procedure(self, procedure_name, param_types=()): sql = self.sql_delete_procedure % { 'procedure': self.quote_name(procedure_name), 'param_types': ','.join(param_types), } self.execute(sql)
23972aebf88221fa1893282f58e48c5d03595bc345f96601cb930a23d2979570
import os import subprocess class BaseDatabaseClient: """Encapsulate backend-specific methods for opening a client shell.""" # This should be a string representing the name of the executable # (e.g., "psql"). Subclasses must override this. executable_name = None def __init__(self, connection): # connection is an instance of BaseDatabaseWrapper. self.connection = connection @classmethod def settings_to_cmd_args_env(cls, settings_dict, parameters): raise NotImplementedError( 'subclasses of BaseDatabaseClient must provide a ' 'settings_to_cmd_args_env() method or override a runshell().' ) def runshell(self, parameters): args, env = self.settings_to_cmd_args_env(self.connection.settings_dict, parameters) env = {**os.environ, **env} if env else None subprocess.run(args, env=env, check=True)
d58d5cf21f018e449e1b86e45eb0091aef696a88b99ffc6bcae27454a89df0f5
import operator from django.db.backends.base.features import BaseDatabaseFeatures from django.utils.functional import cached_property class DatabaseFeatures(BaseDatabaseFeatures): empty_fetchmany_value = () allows_group_by_pk = True related_fields_match_type = True # MySQL doesn't support sliced subqueries with IN/ALL/ANY/SOME. allow_sliced_subqueries_with_in = False has_select_for_update = True supports_forward_references = False supports_regex_backreferencing = False supports_date_lookup_using_string = False supports_timezones = False requires_explicit_null_ordering_when_grouping = True can_release_savepoints = True atomic_transactions = False can_clone_databases = True supports_temporal_subtraction = True supports_select_intersection = False supports_select_difference = False supports_slicing_ordering_in_compound = True supports_index_on_text_field = False has_case_insensitive_like = False create_test_procedure_without_params_sql = """ CREATE PROCEDURE test_procedure () BEGIN DECLARE V_I INTEGER; SET V_I = 1; END; """ create_test_procedure_with_int_param_sql = """ CREATE PROCEDURE test_procedure (P_I INTEGER) BEGIN DECLARE V_I INTEGER; SET V_I = P_I; END; """ # Neither MySQL nor MariaDB support partial indexes. supports_partial_indexes = False # COLLATE must be wrapped in parentheses because MySQL treats COLLATE as an # indexed expression. collate_as_index_expression = True supports_order_by_nulls_modifier = False order_by_nulls_first = True @cached_property def test_collations(self): charset = 'utf8' if self.connection.mysql_is_mariadb and self.connection.mysql_version >= (10, 6): # utf8 is an alias for utf8mb3 in MariaDB 10.6+. charset = 'utf8mb3' return { 'ci': f'{charset}_general_ci', 'non_default': f'{charset}_esperanto_ci', 'swedish_ci': f'{charset}_swedish_ci', } @cached_property def django_test_skips(self): skips = { "This doesn't work on MySQL.": { 'db_functions.comparison.test_greatest.GreatestTests.test_coalesce_workaround', 'db_functions.comparison.test_least.LeastTests.test_coalesce_workaround', }, 'Running on MySQL requires utf8mb4 encoding (#18392).': { 'model_fields.test_textfield.TextFieldTests.test_emoji', 'model_fields.test_charfield.TestCharField.test_emoji', }, "MySQL doesn't support functional indexes on a function that " "returns JSON": { 'schema.tests.SchemaTests.test_func_index_json_key_transform', }, "MySQL supports multiplying and dividing DurationFields by a " "scalar value but it's not implemented (#25287).": { 'expressions.tests.FTimeDeltaTests.test_durationfield_multiply_divide', }, } if 'ONLY_FULL_GROUP_BY' in self.connection.sql_mode: skips.update({ 'GROUP BY optimization does not work properly when ' 'ONLY_FULL_GROUP_BY mode is enabled on MySQL, see #31331.': { 'aggregation.tests.AggregateTestCase.test_aggregation_subquery_annotation_multivalued', 'annotations.tests.NonAggregateAnnotationTestCase.test_annotation_aggregate_with_m2o', }, }) if not self.connection.mysql_is_mariadb and self.connection.mysql_version < (8,): skips.update({ 'Casting to datetime/time is not supported by MySQL < 8.0. (#30224)': { 'aggregation.tests.AggregateTestCase.test_aggregation_default_using_time_from_python', 'aggregation.tests.AggregateTestCase.test_aggregation_default_using_datetime_from_python', }, 'MySQL < 8.0 returns string type instead of datetime/time. (#30224)': { 'aggregation.tests.AggregateTestCase.test_aggregation_default_using_time_from_database', 'aggregation.tests.AggregateTestCase.test_aggregation_default_using_datetime_from_database', }, }) if ( self.connection.mysql_is_mariadb and (10, 4, 3) < self.connection.mysql_version < (10, 5, 2) ): skips.update({ 'https://jira.mariadb.org/browse/MDEV-19598': { 'schema.tests.SchemaTests.test_alter_not_unique_field_to_primary_key', }, }) if ( self.connection.mysql_is_mariadb and (10, 4, 12) < self.connection.mysql_version < (10, 5) ): skips.update({ 'https://jira.mariadb.org/browse/MDEV-22775': { 'schema.tests.SchemaTests.test_alter_pk_with_self_referential_field', }, }) if not self.supports_explain_analyze: skips.update({ 'MariaDB and MySQL >= 8.0.18 specific.': { 'queries.test_explain.ExplainTests.test_mysql_analyze', }, }) return skips @cached_property def _mysql_storage_engine(self): "Internal method used in Django tests. Don't rely on this from your code" return self.connection.mysql_server_data['default_storage_engine'] @cached_property def allows_auto_pk_0(self): """ Autoincrement primary key can be set to 0 if it doesn't generate new autoincrement values. """ return 'NO_AUTO_VALUE_ON_ZERO' in self.connection.sql_mode @cached_property def update_can_self_select(self): return self.connection.mysql_is_mariadb and self.connection.mysql_version >= (10, 3, 2) @cached_property def can_introspect_foreign_keys(self): "Confirm support for introspected foreign keys" return self._mysql_storage_engine != 'MyISAM' @cached_property def introspected_field_types(self): return { **super().introspected_field_types, 'BinaryField': 'TextField', 'BooleanField': 'IntegerField', 'DurationField': 'BigIntegerField', 'GenericIPAddressField': 'CharField', } @cached_property def can_return_columns_from_insert(self): return self.connection.mysql_is_mariadb and self.connection.mysql_version >= (10, 5, 0) can_return_rows_from_bulk_insert = property(operator.attrgetter('can_return_columns_from_insert')) @cached_property def has_zoneinfo_database(self): return self.connection.mysql_server_data['has_zoneinfo_database'] @cached_property def is_sql_auto_is_null_enabled(self): return self.connection.mysql_server_data['sql_auto_is_null'] @cached_property def supports_over_clause(self): if self.connection.mysql_is_mariadb: return True return self.connection.mysql_version >= (8, 0, 2) supports_frame_range_fixed_distance = property(operator.attrgetter('supports_over_clause')) @cached_property def supports_column_check_constraints(self): if self.connection.mysql_is_mariadb: return self.connection.mysql_version >= (10, 2, 1) return self.connection.mysql_version >= (8, 0, 16) supports_table_check_constraints = property(operator.attrgetter('supports_column_check_constraints')) @cached_property def can_introspect_check_constraints(self): if self.connection.mysql_is_mariadb: version = self.connection.mysql_version return (version >= (10, 2, 22) and version < (10, 3)) or version >= (10, 3, 10) return self.connection.mysql_version >= (8, 0, 16) @cached_property def has_select_for_update_skip_locked(self): if self.connection.mysql_is_mariadb: return self.connection.mysql_version >= (10, 6) return self.connection.mysql_version >= (8, 0, 1) @cached_property def has_select_for_update_nowait(self): if self.connection.mysql_is_mariadb: return self.connection.mysql_version >= (10, 3, 0) return self.connection.mysql_version >= (8, 0, 1) @cached_property def has_select_for_update_of(self): return not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (8, 0, 1) @cached_property def supports_explain_analyze(self): return self.connection.mysql_is_mariadb or self.connection.mysql_version >= (8, 0, 18) @cached_property def supported_explain_formats(self): # Alias MySQL's TRADITIONAL to TEXT for consistency with other # backends. formats = {'JSON', 'TEXT', 'TRADITIONAL'} if not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (8, 0, 16): formats.add('TREE') return formats @cached_property def supports_transactions(self): """ All storage engines except MyISAM support transactions. """ return self._mysql_storage_engine != 'MyISAM' @cached_property def ignores_table_name_case(self): return self.connection.mysql_server_data['lower_case_table_names'] @cached_property def supports_default_in_lead_lag(self): # To be added in https://jira.mariadb.org/browse/MDEV-12981. return not self.connection.mysql_is_mariadb @cached_property def supports_json_field(self): if self.connection.mysql_is_mariadb: return self.connection.mysql_version >= (10, 2, 7) return self.connection.mysql_version >= (5, 7, 8) @cached_property def can_introspect_json_field(self): if self.connection.mysql_is_mariadb: return self.supports_json_field and self.can_introspect_check_constraints return self.supports_json_field @cached_property def supports_index_column_ordering(self): return ( not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (8, 0, 1) ) @cached_property def supports_expression_indexes(self): return ( not self.connection.mysql_is_mariadb and self.connection.mysql_version >= (8, 0, 13) )
76aa3988f37ba83142216f89e6aa8b9083a89c3e51984f99d89c7add289b04df
from django.core.exceptions import FieldError from django.db.models.expressions import Col from django.db.models.sql import compiler class SQLCompiler(compiler.SQLCompiler): def as_subquery_condition(self, alias, columns, compiler): qn = compiler.quote_name_unless_alias qn2 = self.connection.ops.quote_name sql, params = self.as_sql() return '(%s) IN (%s)' % (', '.join('%s.%s' % (qn(alias), qn2(column)) for column in columns), sql), params class SQLInsertCompiler(compiler.SQLInsertCompiler, SQLCompiler): pass class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler): def as_sql(self): # Prefer the non-standard DELETE FROM syntax over the SQL generated by # the SQLDeleteCompiler's default implementation when multiple tables # are involved since MySQL/MariaDB will generate a more efficient query # plan than when using a subquery. where, having = self.query.where.split_having() if self.single_alias or having: # DELETE FROM cannot be used when filtering against aggregates # since it doesn't allow for GROUP BY and HAVING clauses. return super().as_sql() result = [ 'DELETE %s FROM' % self.quote_name_unless_alias( self.query.get_initial_alias() ) ] from_sql, from_params = self.get_from_clause() result.extend(from_sql) where_sql, where_params = self.compile(where) if where_sql: result.append('WHERE %s' % where_sql) return ' '.join(result), tuple(from_params) + tuple(where_params) class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler): def as_sql(self): update_query, update_params = super().as_sql() # MySQL and MariaDB support UPDATE ... ORDER BY syntax. if self.query.order_by: order_by_sql = [] order_by_params = [] db_table = self.query.get_meta().db_table try: for resolved, (sql, params, _) in self.get_order_by(): if ( isinstance(resolved.expression, Col) and resolved.expression.alias != db_table ): # Ignore ordering if it contains joined fields, because # they cannot be used in the ORDER BY clause. raise FieldError order_by_sql.append(sql) order_by_params.extend(params) update_query += ' ORDER BY ' + ', '.join(order_by_sql) update_params += tuple(order_by_params) except FieldError: # Ignore ordering if it contains annotations, because they're # removed in .update() and cannot be resolved. pass return update_query, update_params class SQLAggregateCompiler(compiler.SQLAggregateCompiler, SQLCompiler): pass