hash
stringlengths 64
64
| content
stringlengths 0
1.51M
|
---|---|
de131ee367fb3b8e62df32c5c929951270accc4ae488dd1519d3093700d9379e | from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
from django.db.models import Q
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from .models import (
AllowsNullGFK,
Animal,
Carrot,
Comparison,
ConcreteRelatedModel,
ForConcreteModelModel,
ForProxyModelModel,
Gecko,
ManualPK,
Mineral,
ProxyRelatedModel,
Rock,
TaggedItem,
ValuableRock,
ValuableTaggedItem,
Vegetable,
)
class GenericRelationsTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.lion = Animal.objects.create(common_name="Lion", latin_name="Panthera leo")
cls.platypus = Animal.objects.create(
common_name="Platypus",
latin_name="Ornithorhynchus anatinus",
)
Vegetable.objects.create(name="Eggplant", is_yucky=True)
cls.bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
cls.quartz = Mineral.objects.create(name="Quartz", hardness=7)
# Tagging stuff.
cls.fatty = cls.bacon.tags.create(tag="fatty")
cls.salty = cls.bacon.tags.create(tag="salty")
cls.yellow = cls.lion.tags.create(tag="yellow")
cls.hairy = cls.lion.tags.create(tag="hairy")
def comp_func(self, obj):
# Original list of tags:
return obj.tag, obj.content_type.model_class(), obj.object_id
async def test_generic_async_acreate(self):
await self.bacon.tags.acreate(tag="orange")
self.assertEqual(await self.bacon.tags.acount(), 3)
def test_generic_update_or_create_when_created(self):
"""
Should be able to use update_or_create from the generic related manager
to create a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag, created = self.bacon.tags.update_or_create(tag="stinky")
self.assertTrue(created)
self.assertEqual(count + 1, self.bacon.tags.count())
def test_generic_update_or_create_when_updated(self):
"""
Should be able to use update_or_create from the generic related manager
to update a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag = self.bacon.tags.create(tag="stinky")
self.assertEqual(count + 1, self.bacon.tags.count())
tag, created = self.bacon.tags.update_or_create(
defaults={"tag": "juicy"}, id=tag.id
)
self.assertFalse(created)
self.assertEqual(count + 1, self.bacon.tags.count())
self.assertEqual(tag.tag, "juicy")
async def test_generic_async_aupdate_or_create(self):
tag, created = await self.bacon.tags.aupdate_or_create(
id=self.fatty.id, defaults={"tag": "orange"}
)
self.assertIs(created, False)
self.assertEqual(tag.tag, "orange")
self.assertEqual(await self.bacon.tags.acount(), 2)
tag, created = await self.bacon.tags.aupdate_or_create(tag="pink")
self.assertIs(created, True)
self.assertEqual(await self.bacon.tags.acount(), 3)
self.assertEqual(tag.tag, "pink")
def test_generic_get_or_create_when_created(self):
"""
Should be able to use get_or_create from the generic related manager
to create a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag, created = self.bacon.tags.get_or_create(tag="stinky")
self.assertTrue(created)
self.assertEqual(count + 1, self.bacon.tags.count())
def test_generic_get_or_create_when_exists(self):
"""
Should be able to use get_or_create from the generic related manager
to get a tag. Refs #23611.
"""
count = self.bacon.tags.count()
tag = self.bacon.tags.create(tag="stinky")
self.assertEqual(count + 1, self.bacon.tags.count())
tag, created = self.bacon.tags.get_or_create(
id=tag.id, defaults={"tag": "juicy"}
)
self.assertFalse(created)
self.assertEqual(count + 1, self.bacon.tags.count())
# shouldn't had changed the tag
self.assertEqual(tag.tag, "stinky")
async def test_generic_async_aget_or_create(self):
tag, created = await self.bacon.tags.aget_or_create(
id=self.fatty.id, defaults={"tag": "orange"}
)
self.assertIs(created, False)
self.assertEqual(tag.tag, "fatty")
self.assertEqual(await self.bacon.tags.acount(), 2)
tag, created = await self.bacon.tags.aget_or_create(tag="orange")
self.assertIs(created, True)
self.assertEqual(await self.bacon.tags.acount(), 3)
self.assertEqual(tag.tag, "orange")
def test_generic_relations_m2m_mimic(self):
"""
Objects with declared GenericRelations can be tagged directly -- the
API mimics the many-to-many API.
"""
self.assertSequenceEqual(self.lion.tags.all(), [self.hairy, self.yellow])
self.assertSequenceEqual(self.bacon.tags.all(), [self.fatty, self.salty])
def test_access_content_object(self):
"""
Test accessing the content object like a foreign key.
"""
tagged_item = TaggedItem.objects.get(tag="salty")
self.assertEqual(tagged_item.content_object, self.bacon)
def test_query_content_object(self):
qs = TaggedItem.objects.filter(animal__isnull=False).order_by(
"animal__common_name", "tag"
)
self.assertSequenceEqual(qs, [self.hairy, self.yellow])
mpk = ManualPK.objects.create(id=1)
mpk.tags.create(tag="mpk")
qs = TaggedItem.objects.filter(
Q(animal__isnull=False) | Q(manualpk__id=1)
).order_by("tag")
self.assertQuerySetEqual(qs, ["hairy", "mpk", "yellow"], lambda x: x.tag)
def test_exclude_generic_relations(self):
"""
Test lookups over an object without GenericRelations.
"""
# Recall that the Mineral class doesn't have an explicit GenericRelation
# defined. That's OK, because you can create TaggedItems explicitly.
# However, excluding GenericRelations means your lookups have to be a
# bit more explicit.
shiny = TaggedItem.objects.create(content_object=self.quartz, tag="shiny")
clearish = TaggedItem.objects.create(content_object=self.quartz, tag="clearish")
ctype = ContentType.objects.get_for_model(self.quartz)
q = TaggedItem.objects.filter(
content_type__pk=ctype.id, object_id=self.quartz.id
)
self.assertSequenceEqual(q, [clearish, shiny])
def test_access_via_content_type(self):
"""
Test lookups through content type.
"""
self.lion.delete()
self.platypus.tags.create(tag="fatty")
ctype = ContentType.objects.get_for_model(self.platypus)
self.assertSequenceEqual(
Animal.objects.filter(tags__content_type=ctype),
[self.platypus],
)
def test_set_foreign_key(self):
"""
You can set a generic foreign key in the way you'd expect.
"""
tag1 = TaggedItem.objects.create(content_object=self.quartz, tag="shiny")
tag1.content_object = self.platypus
tag1.save()
self.assertSequenceEqual(self.platypus.tags.all(), [tag1])
def test_queries_across_generic_relations(self):
"""
Queries across generic relations respect the content types. Even though
there are two TaggedItems with a tag of "fatty", this query only pulls
out the one with the content type related to Animals.
"""
self.assertSequenceEqual(
Animal.objects.order_by("common_name"),
[self.lion, self.platypus],
)
def test_queries_content_type_restriction(self):
"""
Create another fatty tagged instance with different PK to ensure there
is a content type restriction in the generated queries below.
"""
mpk = ManualPK.objects.create(id=self.lion.pk)
mpk.tags.create(tag="fatty")
self.platypus.tags.create(tag="fatty")
self.assertSequenceEqual(
Animal.objects.filter(tags__tag="fatty"),
[self.platypus],
)
self.assertSequenceEqual(
Animal.objects.exclude(tags__tag="fatty"),
[self.lion],
)
def test_object_deletion_with_generic_relation(self):
"""
If you delete an object with an explicit Generic relation, the related
objects are deleted when the source object is deleted.
"""
self.assertQuerySetEqual(
TaggedItem.objects.all(),
[
("fatty", Vegetable, self.bacon.pk),
("hairy", Animal, self.lion.pk),
("salty", Vegetable, self.bacon.pk),
("yellow", Animal, self.lion.pk),
],
self.comp_func,
)
self.lion.delete()
self.assertQuerySetEqual(
TaggedItem.objects.all(),
[
("fatty", Vegetable, self.bacon.pk),
("salty", Vegetable, self.bacon.pk),
],
self.comp_func,
)
def test_object_deletion_without_generic_relation(self):
"""
If Generic Relation is not explicitly defined, any related objects
remain after deletion of the source object.
"""
TaggedItem.objects.create(content_object=self.quartz, tag="clearish")
quartz_pk = self.quartz.pk
self.quartz.delete()
self.assertQuerySetEqual(
TaggedItem.objects.all(),
[
("clearish", Mineral, quartz_pk),
("fatty", Vegetable, self.bacon.pk),
("hairy", Animal, self.lion.pk),
("salty", Vegetable, self.bacon.pk),
("yellow", Animal, self.lion.pk),
],
self.comp_func,
)
def test_tag_deletion_related_objects_unaffected(self):
"""
If you delete a tag, the objects using the tag are unaffected (other
than losing a tag).
"""
ctype = ContentType.objects.get_for_model(self.lion)
tag = TaggedItem.objects.get(
content_type__pk=ctype.id, object_id=self.lion.id, tag="hairy"
)
tag.delete()
self.assertSequenceEqual(self.lion.tags.all(), [self.yellow])
self.assertQuerySetEqual(
TaggedItem.objects.all(),
[
("fatty", Vegetable, self.bacon.pk),
("salty", Vegetable, self.bacon.pk),
("yellow", Animal, self.lion.pk),
],
self.comp_func,
)
def test_add_bulk(self):
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
t1 = TaggedItem.objects.create(content_object=self.quartz, tag="shiny")
t2 = TaggedItem.objects.create(content_object=self.quartz, tag="clearish")
# One update() query.
with self.assertNumQueries(1):
bacon.tags.add(t1, t2)
self.assertEqual(t1.content_object, bacon)
self.assertEqual(t2.content_object, bacon)
def test_add_bulk_false(self):
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
t1 = TaggedItem.objects.create(content_object=self.quartz, tag="shiny")
t2 = TaggedItem.objects.create(content_object=self.quartz, tag="clearish")
# One save() for each object.
with self.assertNumQueries(2):
bacon.tags.add(t1, t2, bulk=False)
self.assertEqual(t1.content_object, bacon)
self.assertEqual(t2.content_object, bacon)
def test_add_rejects_unsaved_objects(self):
t1 = TaggedItem(content_object=self.quartz, tag="shiny")
msg = (
"<TaggedItem: shiny> instance isn't saved. Use bulk=False or save the "
"object first."
)
with self.assertRaisesMessage(ValueError, msg):
self.bacon.tags.add(t1)
def test_add_rejects_wrong_instances(self):
msg = "'TaggedItem' instance expected, got <Animal: Lion>"
with self.assertRaisesMessage(TypeError, msg):
self.bacon.tags.add(self.lion)
async def test_aadd(self):
bacon = await Vegetable.objects.acreate(name="Bacon", is_yucky=False)
t1 = await TaggedItem.objects.acreate(content_object=self.quartz, tag="shiny")
t2 = await TaggedItem.objects.acreate(content_object=self.quartz, tag="fatty")
await bacon.tags.aadd(t1, t2, bulk=False)
self.assertEqual(await bacon.tags.acount(), 2)
def test_set(self):
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
fatty = bacon.tags.create(tag="fatty")
salty = bacon.tags.create(tag="salty")
bacon.tags.set([fatty, salty])
self.assertSequenceEqual(bacon.tags.all(), [fatty, salty])
bacon.tags.set([fatty])
self.assertSequenceEqual(bacon.tags.all(), [fatty])
bacon.tags.set([])
self.assertSequenceEqual(bacon.tags.all(), [])
bacon.tags.set([fatty, salty], bulk=False, clear=True)
self.assertSequenceEqual(bacon.tags.all(), [fatty, salty])
bacon.tags.set([fatty], bulk=False, clear=True)
self.assertSequenceEqual(bacon.tags.all(), [fatty])
bacon.tags.set([], clear=True)
self.assertSequenceEqual(bacon.tags.all(), [])
async def test_aset(self):
bacon = await Vegetable.objects.acreate(name="Bacon", is_yucky=False)
fatty = await bacon.tags.acreate(tag="fatty")
await bacon.tags.aset([fatty])
self.assertEqual(await bacon.tags.acount(), 1)
await bacon.tags.aset([])
self.assertEqual(await bacon.tags.acount(), 0)
await bacon.tags.aset([fatty], bulk=False, clear=True)
self.assertEqual(await bacon.tags.acount(), 1)
def test_assign(self):
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
fatty = bacon.tags.create(tag="fatty")
salty = bacon.tags.create(tag="salty")
bacon.tags.set([fatty, salty])
self.assertSequenceEqual(bacon.tags.all(), [fatty, salty])
bacon.tags.set([fatty])
self.assertSequenceEqual(bacon.tags.all(), [fatty])
bacon.tags.set([])
self.assertSequenceEqual(bacon.tags.all(), [])
def test_assign_with_queryset(self):
# Querysets used in reverse GFK assignments are pre-evaluated so their
# value isn't affected by the clearing operation
# in ManyRelatedManager.set() (#19816).
bacon = Vegetable.objects.create(name="Bacon", is_yucky=False)
bacon.tags.create(tag="fatty")
bacon.tags.create(tag="salty")
self.assertEqual(2, bacon.tags.count())
qs = bacon.tags.filter(tag="fatty")
bacon.tags.set(qs)
self.assertEqual(1, bacon.tags.count())
self.assertEqual(1, qs.count())
def test_clear(self):
self.assertSequenceEqual(
TaggedItem.objects.order_by("tag"),
[self.fatty, self.hairy, self.salty, self.yellow],
)
self.bacon.tags.clear()
self.assertSequenceEqual(self.bacon.tags.all(), [])
self.assertSequenceEqual(
TaggedItem.objects.order_by("tag"),
[self.hairy, self.yellow],
)
async def test_aclear(self):
await self.bacon.tags.aclear()
self.assertEqual(await self.bacon.tags.acount(), 0)
def test_remove(self):
self.assertSequenceEqual(
TaggedItem.objects.order_by("tag"),
[self.fatty, self.hairy, self.salty, self.yellow],
)
self.bacon.tags.remove(self.fatty)
self.assertSequenceEqual(self.bacon.tags.all(), [self.salty])
self.assertSequenceEqual(
TaggedItem.objects.order_by("tag"),
[self.hairy, self.salty, self.yellow],
)
async def test_aremove(self):
await self.bacon.tags.aremove(self.fatty)
self.assertEqual(await self.bacon.tags.acount(), 1)
await self.bacon.tags.aremove(self.salty)
self.assertEqual(await self.bacon.tags.acount(), 0)
def test_generic_relation_related_name_default(self):
# GenericRelation isn't usable from the reverse side by default.
msg = (
"Cannot resolve keyword 'vegetable' into field. Choices are: "
"animal, content_object, content_type, content_type_id, id, "
"manualpk, object_id, tag, valuabletaggeditem"
)
with self.assertRaisesMessage(FieldError, msg):
TaggedItem.objects.filter(vegetable__isnull=True)
def test_multiple_gfk(self):
# Simple tests for multiple GenericForeignKeys
# only uses one model, since the above tests should be sufficient.
tiger = Animal.objects.create(common_name="tiger")
cheetah = Animal.objects.create(common_name="cheetah")
bear = Animal.objects.create(common_name="bear")
# Create directly
c1 = Comparison.objects.create(
first_obj=cheetah, other_obj=tiger, comparative="faster"
)
c2 = Comparison.objects.create(
first_obj=tiger, other_obj=cheetah, comparative="cooler"
)
# Create using GenericRelation
c3 = tiger.comparisons.create(other_obj=bear, comparative="cooler")
c4 = tiger.comparisons.create(other_obj=cheetah, comparative="stronger")
self.assertSequenceEqual(cheetah.comparisons.all(), [c1])
# Filtering works
self.assertCountEqual(
tiger.comparisons.filter(comparative="cooler"),
[c2, c3],
)
# Filtering and deleting works
subjective = ["cooler"]
tiger.comparisons.filter(comparative__in=subjective).delete()
self.assertCountEqual(Comparison.objects.all(), [c1, c4])
# If we delete cheetah, Comparisons with cheetah as 'first_obj' will be
# deleted since Animal has an explicit GenericRelation to Comparison
# through first_obj. Comparisons with cheetah as 'other_obj' will not
# be deleted.
cheetah.delete()
self.assertSequenceEqual(Comparison.objects.all(), [c4])
def test_gfk_subclasses(self):
# GenericForeignKey should work with subclasses (see #8309)
quartz = Mineral.objects.create(name="Quartz", hardness=7)
valuedtag = ValuableTaggedItem.objects.create(
content_object=quartz, tag="shiny", value=10
)
self.assertEqual(valuedtag.content_object, quartz)
def test_generic_relation_to_inherited_child(self):
# GenericRelations to models that use multi-table inheritance work.
granite = ValuableRock.objects.create(name="granite", hardness=5)
ValuableTaggedItem.objects.create(
content_object=granite, tag="countertop", value=1
)
self.assertEqual(ValuableRock.objects.filter(tags__value=1).count(), 1)
# We're generating a slightly inefficient query for tags__tag - we
# first join ValuableRock -> TaggedItem -> ValuableTaggedItem, and then
# we fetch tag by joining TaggedItem from ValuableTaggedItem. The last
# join isn't necessary, as TaggedItem <-> ValuableTaggedItem is a
# one-to-one join.
self.assertEqual(ValuableRock.objects.filter(tags__tag="countertop").count(), 1)
granite.delete() # deleting the rock should delete the related tag.
self.assertEqual(ValuableTaggedItem.objects.count(), 0)
def test_gfk_manager(self):
# GenericForeignKey should not use the default manager (which may
# filter objects).
tailless = Gecko.objects.create(has_tail=False)
tag = TaggedItem.objects.create(content_object=tailless, tag="lizard")
self.assertEqual(tag.content_object, tailless)
def test_subclasses_with_gen_rel(self):
"""
Concrete model subclasses with generic relations work
correctly (ticket 11263).
"""
granite = Rock.objects.create(name="granite", hardness=5)
TaggedItem.objects.create(content_object=granite, tag="countertop")
self.assertEqual(Rock.objects.get(tags__tag="countertop"), granite)
def test_subclasses_with_parent_gen_rel(self):
"""
Generic relations on a base class (Vegetable) work correctly in
subclasses (Carrot).
"""
bear = Carrot.objects.create(name="carrot")
TaggedItem.objects.create(content_object=bear, tag="orange")
self.assertEqual(Carrot.objects.get(tags__tag="orange"), bear)
def test_get_or_create(self):
# get_or_create should work with virtual fields (content_object)
quartz = Mineral.objects.create(name="Quartz", hardness=7)
tag, created = TaggedItem.objects.get_or_create(
tag="shiny", defaults={"content_object": quartz}
)
self.assertTrue(created)
self.assertEqual(tag.tag, "shiny")
self.assertEqual(tag.content_object.id, quartz.id)
def test_update_or_create_defaults(self):
# update_or_create should work with virtual fields (content_object)
quartz = Mineral.objects.create(name="Quartz", hardness=7)
diamond = Mineral.objects.create(name="Diamond", hardness=7)
tag, created = TaggedItem.objects.update_or_create(
tag="shiny", defaults={"content_object": quartz}
)
self.assertTrue(created)
self.assertEqual(tag.content_object.id, quartz.id)
tag, created = TaggedItem.objects.update_or_create(
tag="shiny", defaults={"content_object": diamond}
)
self.assertFalse(created)
self.assertEqual(tag.content_object.id, diamond.id)
def test_query_content_type(self):
msg = "Field 'content_object' does not generate an automatic reverse relation"
with self.assertRaisesMessage(FieldError, msg):
TaggedItem.objects.get(content_object="")
def test_unsaved_generic_foreign_key_parent_save(self):
quartz = Mineral(name="Quartz", hardness=7)
tagged_item = TaggedItem(tag="shiny", content_object=quartz)
msg = (
"save() prohibited to prevent data loss due to unsaved related object "
"'content_object'."
)
with self.assertRaisesMessage(ValueError, msg):
tagged_item.save()
@skipUnlessDBFeature("has_bulk_insert")
def test_unsaved_generic_foreign_key_parent_bulk_create(self):
quartz = Mineral(name="Quartz", hardness=7)
tagged_item = TaggedItem(tag="shiny", content_object=quartz)
msg = (
"bulk_create() prohibited to prevent data loss due to unsaved related "
"object 'content_object'."
)
with self.assertRaisesMessage(ValueError, msg):
TaggedItem.objects.bulk_create([tagged_item])
def test_cache_invalidation_for_content_type_id(self):
# Create a Vegetable and Mineral with the same id.
new_id = (
max(
Vegetable.objects.order_by("-id")[0].id,
Mineral.objects.order_by("-id")[0].id,
)
+ 1
)
broccoli = Vegetable.objects.create(id=new_id, name="Broccoli")
diamond = Mineral.objects.create(id=new_id, name="Diamond", hardness=7)
tag = TaggedItem.objects.create(content_object=broccoli, tag="yummy")
tag.content_type = ContentType.objects.get_for_model(diamond)
self.assertEqual(tag.content_object, diamond)
def test_cache_invalidation_for_object_id(self):
broccoli = Vegetable.objects.create(name="Broccoli")
cauliflower = Vegetable.objects.create(name="Cauliflower")
tag = TaggedItem.objects.create(content_object=broccoli, tag="yummy")
tag.object_id = cauliflower.id
self.assertEqual(tag.content_object, cauliflower)
def test_assign_content_object_in_init(self):
spinach = Vegetable(name="spinach")
tag = TaggedItem(content_object=spinach)
self.assertEqual(tag.content_object, spinach)
def test_create_after_prefetch(self):
platypus = Animal.objects.prefetch_related("tags").get(pk=self.platypus.pk)
self.assertSequenceEqual(platypus.tags.all(), [])
weird_tag = platypus.tags.create(tag="weird")
self.assertSequenceEqual(platypus.tags.all(), [weird_tag])
def test_add_after_prefetch(self):
platypus = Animal.objects.prefetch_related("tags").get(pk=self.platypus.pk)
self.assertSequenceEqual(platypus.tags.all(), [])
weird_tag = TaggedItem.objects.create(tag="weird", content_object=platypus)
platypus.tags.add(weird_tag)
self.assertSequenceEqual(platypus.tags.all(), [weird_tag])
def test_remove_after_prefetch(self):
weird_tag = self.platypus.tags.create(tag="weird")
platypus = Animal.objects.prefetch_related("tags").get(pk=self.platypus.pk)
self.assertSequenceEqual(platypus.tags.all(), [weird_tag])
platypus.tags.remove(weird_tag)
self.assertSequenceEqual(platypus.tags.all(), [])
def test_clear_after_prefetch(self):
weird_tag = self.platypus.tags.create(tag="weird")
platypus = Animal.objects.prefetch_related("tags").get(pk=self.platypus.pk)
self.assertSequenceEqual(platypus.tags.all(), [weird_tag])
platypus.tags.clear()
self.assertSequenceEqual(platypus.tags.all(), [])
def test_set_after_prefetch(self):
platypus = Animal.objects.prefetch_related("tags").get(pk=self.platypus.pk)
self.assertSequenceEqual(platypus.tags.all(), [])
furry_tag = TaggedItem.objects.create(tag="furry", content_object=platypus)
platypus.tags.set([furry_tag])
self.assertSequenceEqual(platypus.tags.all(), [furry_tag])
weird_tag = TaggedItem.objects.create(tag="weird", content_object=platypus)
platypus.tags.set([weird_tag])
self.assertSequenceEqual(platypus.tags.all(), [weird_tag])
def test_add_then_remove_after_prefetch(self):
furry_tag = self.platypus.tags.create(tag="furry")
platypus = Animal.objects.prefetch_related("tags").get(pk=self.platypus.pk)
self.assertSequenceEqual(platypus.tags.all(), [furry_tag])
weird_tag = self.platypus.tags.create(tag="weird")
platypus.tags.add(weird_tag)
self.assertSequenceEqual(platypus.tags.all(), [furry_tag, weird_tag])
platypus.tags.remove(weird_tag)
self.assertSequenceEqual(platypus.tags.all(), [furry_tag])
def test_prefetch_related_different_content_types(self):
TaggedItem.objects.create(content_object=self.platypus, tag="prefetch_tag_1")
TaggedItem.objects.create(
content_object=Vegetable.objects.create(name="Broccoli"),
tag="prefetch_tag_2",
)
TaggedItem.objects.create(
content_object=Animal.objects.create(common_name="Bear"),
tag="prefetch_tag_3",
)
qs = TaggedItem.objects.filter(
tag__startswith="prefetch_tag_",
).prefetch_related("content_object", "content_object__tags")
with self.assertNumQueries(4):
tags = list(qs)
for tag in tags:
self.assertSequenceEqual(tag.content_object.tags.all(), [tag])
def test_prefetch_related_custom_object_id(self):
tiger = Animal.objects.create(common_name="tiger")
cheetah = Animal.objects.create(common_name="cheetah")
Comparison.objects.create(
first_obj=cheetah,
other_obj=tiger,
comparative="faster",
)
Comparison.objects.create(
first_obj=tiger,
other_obj=cheetah,
comparative="cooler",
)
qs = Comparison.objects.prefetch_related("first_obj__comparisons")
for comparison in qs:
self.assertSequenceEqual(
comparison.first_obj.comparisons.all(), [comparison]
)
class ProxyRelatedModelTest(TestCase):
def test_default_behavior(self):
"""
The default for for_concrete_model should be True
"""
base = ForConcreteModelModel()
base.obj = rel = ProxyRelatedModel.objects.create()
base.save()
base = ForConcreteModelModel.objects.get(pk=base.pk)
rel = ConcreteRelatedModel.objects.get(pk=rel.pk)
self.assertEqual(base.obj, rel)
def test_works_normally(self):
"""
When for_concrete_model is False, we should still be able to get
an instance of the concrete class.
"""
base = ForProxyModelModel()
base.obj = rel = ConcreteRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
self.assertEqual(base.obj, rel)
def test_proxy_is_returned(self):
"""
Instances of the proxy should be returned when
for_concrete_model is False.
"""
base = ForProxyModelModel()
base.obj = ProxyRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
self.assertIsInstance(base.obj, ProxyRelatedModel)
def test_query(self):
base = ForProxyModelModel()
base.obj = rel = ConcreteRelatedModel.objects.create()
base.save()
self.assertEqual(rel, ConcreteRelatedModel.objects.get(bases__id=base.id))
def test_query_proxy(self):
base = ForProxyModelModel()
base.obj = rel = ProxyRelatedModel.objects.create()
base.save()
self.assertEqual(rel, ProxyRelatedModel.objects.get(bases__id=base.id))
def test_generic_relation(self):
base = ForProxyModelModel()
base.obj = ProxyRelatedModel.objects.create()
base.save()
base = ForProxyModelModel.objects.get(pk=base.pk)
rel = ProxyRelatedModel.objects.get(pk=base.obj.pk)
self.assertEqual(base, rel.bases.get())
def test_generic_relation_set(self):
base = ForProxyModelModel()
base.obj = ConcreteRelatedModel.objects.create()
base.save()
newrel = ConcreteRelatedModel.objects.create()
newrel.bases.set([base])
newrel = ConcreteRelatedModel.objects.get(pk=newrel.pk)
self.assertEqual(base, newrel.bases.get())
class TestInitWithNoneArgument(SimpleTestCase):
def test_none_allowed(self):
# AllowsNullGFK doesn't require a content_type, so None argument should
# also be allowed.
AllowsNullGFK(content_object=None)
# TaggedItem requires a content_type but initializing with None should
# be allowed.
TaggedItem(content_object=None)
|
58dda2641fa8126c42aaa6c1a97c0a725d084f651ed3ba9fb79847e818c28902 | """
Testing using the Test Client
The test client is a class that can act like a simple
browser for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
``Client`` objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the ``Client`` instance.
This is not intended as a replacement for Twill, Selenium, or
other browser automation frameworks - it is here to allow
testing against the contexts and templates produced by a view,
rather than the HTML rendered to the end-user.
"""
import itertools
import pickle
import tempfile
from unittest import mock
from django.contrib.auth.models import User
from django.core import mail
from django.http import HttpResponse, HttpResponseNotAllowed
from django.test import (
AsyncRequestFactory,
Client,
RequestFactory,
SimpleTestCase,
TestCase,
modify_settings,
override_settings,
)
from django.urls import reverse_lazy
from django.utils.decorators import async_only_middleware
from django.views.generic import RedirectView
from .views import TwoArgException, get_view, post_view, trace_view
def middleware_urlconf(get_response):
def middleware(request):
request.urlconf = "test_client.urls_middleware_urlconf"
return get_response(request)
return middleware
@async_only_middleware
def async_middleware_urlconf(get_response):
async def middleware(request):
request.urlconf = "test_client.urls_middleware_urlconf"
return await get_response(request)
return middleware
@override_settings(ROOT_URLCONF="test_client.urls")
class ClientTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_user(username="testclient", password="password")
cls.u2 = User.objects.create_user(
username="inactive", password="password", is_active=False
)
def test_get_view(self):
"GET a view"
# The data is ignored, but let's check it doesn't crash the system
# anyway.
data = {"var": "\xf2"}
response = self.client.get("/get_view/", data)
# Check some response details
self.assertContains(response, "This is a test")
self.assertEqual(response.context["var"], "\xf2")
self.assertEqual(response.templates[0].name, "GET Template")
def test_pickling_response(self):
tests = ["/cbv_view/", "/get_view/"]
for url in tests:
with self.subTest(url=url):
response = self.client.get(url)
dump = pickle.dumps(response)
response_from_pickle = pickle.loads(dump)
self.assertEqual(repr(response), repr(response_from_pickle))
async def test_pickling_response_async(self):
response = await self.async_client.get("/async_get_view/")
dump = pickle.dumps(response)
response_from_pickle = pickle.loads(dump)
self.assertEqual(repr(response), repr(response_from_pickle))
def test_query_string_encoding(self):
# WSGI requires latin-1 encoded strings.
response = self.client.get("/get_view/?var=1\ufffd")
self.assertEqual(response.context["var"], "1\ufffd")
def test_get_data_none(self):
msg = (
"Cannot encode None for key 'value' in a query string. Did you "
"mean to pass an empty string or omit the value?"
)
with self.assertRaisesMessage(TypeError, msg):
self.client.get("/get_view/", {"value": None})
def test_get_post_view(self):
"GET a view that normally expects POSTs"
response = self.client.get("/post_view/", {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, "Empty GET Template")
self.assertTemplateUsed(response, "Empty GET Template")
self.assertTemplateNotUsed(response, "Empty POST Template")
def test_empty_post(self):
"POST an empty dictionary to a view"
response = self.client.post("/post_view/", {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, "Empty POST Template")
self.assertTemplateNotUsed(response, "Empty GET Template")
self.assertTemplateUsed(response, "Empty POST Template")
def test_post(self):
"POST some data to a view"
post_data = {"value": 37}
response = self.client.post("/post_view/", post_data)
# Check some response details
self.assertContains(response, "Data received")
self.assertEqual(response.context["data"], "37")
self.assertEqual(response.templates[0].name, "POST Template")
def test_post_data_none(self):
msg = (
"Cannot encode None for key 'value' as POST data. Did you mean "
"to pass an empty string or omit the value?"
)
with self.assertRaisesMessage(TypeError, msg):
self.client.post("/post_view/", {"value": None})
def test_json_serialization(self):
"""The test client serializes JSON data."""
methods = ("post", "put", "patch", "delete")
tests = (
({"value": 37}, {"value": 37}),
([37, True], [37, True]),
((37, False), [37, False]),
)
for method in methods:
with self.subTest(method=method):
for data, expected in tests:
with self.subTest(data):
client_method = getattr(self.client, method)
method_name = method.upper()
response = client_method(
"/json_view/", data, content_type="application/json"
)
self.assertContains(response, "Viewing %s page." % method_name)
self.assertEqual(response.context["data"], expected)
def test_json_encoder_argument(self):
"""The test Client accepts a json_encoder."""
mock_encoder = mock.MagicMock()
mock_encoding = mock.MagicMock()
mock_encoder.return_value = mock_encoding
mock_encoding.encode.return_value = '{"value": 37}'
client = self.client_class(json_encoder=mock_encoder)
# Vendored tree JSON content types are accepted.
client.post(
"/json_view/", {"value": 37}, content_type="application/vnd.api+json"
)
self.assertTrue(mock_encoder.called)
self.assertTrue(mock_encoding.encode.called)
def test_put(self):
response = self.client.put("/put_view/", {"foo": "bar"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, "PUT Template")
self.assertEqual(response.context["data"], "{'foo': 'bar'}")
self.assertEqual(response.context["Content-Length"], "14")
def test_trace(self):
"""TRACE a view"""
response = self.client.trace("/trace_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["method"], "TRACE")
self.assertEqual(response.templates[0].name, "TRACE Template")
def test_response_headers(self):
"Check the value of HTTP headers returned in a response"
response = self.client.get("/header_view/")
self.assertEqual(response.headers["X-DJANGO-TEST"], "Slartibartfast")
def test_response_attached_request(self):
"""
The returned response has a ``request`` attribute with the originating
environ dict and a ``wsgi_request`` with the originating WSGIRequest.
"""
response = self.client.get("/header_view/")
self.assertTrue(hasattr(response, "request"))
self.assertTrue(hasattr(response, "wsgi_request"))
for key, value in response.request.items():
self.assertIn(key, response.wsgi_request.environ)
self.assertEqual(response.wsgi_request.environ[key], value)
def test_response_resolver_match(self):
"""
The response contains a ResolverMatch instance.
"""
response = self.client.get("/header_view/")
self.assertTrue(hasattr(response, "resolver_match"))
def test_response_resolver_match_redirect_follow(self):
"""
The response ResolverMatch instance contains the correct
information when following redirects.
"""
response = self.client.get("/redirect_view/", follow=True)
self.assertEqual(response.resolver_match.url_name, "get_view")
def test_response_resolver_match_regular_view(self):
"""
The response ResolverMatch instance contains the correct
information when accessing a regular view.
"""
response = self.client.get("/get_view/")
self.assertEqual(response.resolver_match.url_name, "get_view")
def test_response_resolver_match_class_based_view(self):
"""
The response ResolverMatch instance can be used to access the CBV view
class.
"""
response = self.client.get("/accounts/")
self.assertIs(response.resolver_match.func.view_class, RedirectView)
@modify_settings(MIDDLEWARE={"prepend": "test_client.tests.middleware_urlconf"})
def test_response_resolver_match_middleware_urlconf(self):
response = self.client.get("/middleware_urlconf_view/")
self.assertEqual(response.resolver_match.url_name, "middleware_urlconf_view")
def test_raw_post(self):
"POST raw data (with a content type) to a view"
test_doc = """<?xml version="1.0" encoding="utf-8"?>
<library><book><title>Blink</title><author>Malcolm Gladwell</author></book>
</library>
"""
response = self.client.post(
"/raw_post_view/", test_doc, content_type="text/xml"
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, "Book template")
self.assertEqual(response.content, b"Blink - Malcolm Gladwell")
def test_insecure(self):
"GET a URL through http"
response = self.client.get("/secure_view/", secure=False)
self.assertFalse(response.test_was_secure_request)
self.assertEqual(response.test_server_port, "80")
def test_secure(self):
"GET a URL through https"
response = self.client.get("/secure_view/", secure=True)
self.assertTrue(response.test_was_secure_request)
self.assertEqual(response.test_server_port, "443")
def test_redirect(self):
"GET a URL that redirects elsewhere"
response = self.client.get("/redirect_view/")
self.assertRedirects(response, "/get_view/")
def test_redirect_with_query(self):
"GET a URL that redirects with given GET parameters"
response = self.client.get("/redirect_view/", {"var": "value"})
self.assertRedirects(response, "/get_view/?var=value")
def test_redirect_with_query_ordering(self):
"""assertRedirects() ignores the order of query string parameters."""
response = self.client.get("/redirect_view/", {"var": "value", "foo": "bar"})
self.assertRedirects(response, "/get_view/?var=value&foo=bar")
self.assertRedirects(response, "/get_view/?foo=bar&var=value")
def test_permanent_redirect(self):
"GET a URL that redirects permanently elsewhere"
response = self.client.get("/permanent_redirect_view/")
self.assertRedirects(response, "/get_view/", status_code=301)
def test_temporary_redirect(self):
"GET a URL that does a non-permanent redirect"
response = self.client.get("/temporary_redirect_view/")
self.assertRedirects(response, "/get_view/", status_code=302)
def test_redirect_to_strange_location(self):
"GET a URL that redirects to a non-200 page"
response = self.client.get("/double_redirect_view/")
# The response was a 302, and that the attempt to get the redirection
# location returned 301 when retrieved
self.assertRedirects(
response, "/permanent_redirect_view/", target_status_code=301
)
def test_follow_redirect(self):
"A URL that redirects can be followed to termination."
response = self.client.get("/double_redirect_view/", follow=True)
self.assertRedirects(
response, "/get_view/", status_code=302, target_status_code=200
)
self.assertEqual(len(response.redirect_chain), 2)
def test_follow_relative_redirect(self):
"A URL with a relative redirect can be followed."
response = self.client.get("/accounts/", follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.request["PATH_INFO"], "/accounts/login/")
def test_follow_relative_redirect_no_trailing_slash(self):
"A URL with a relative redirect with no trailing slash can be followed."
response = self.client.get("/accounts/no_trailing_slash", follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.request["PATH_INFO"], "/accounts/login/")
def test_redirect_to_querystring_only(self):
"""A URL that consists of a querystring only can be followed"""
response = self.client.post("/post_then_get_view/", follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.request["PATH_INFO"], "/post_then_get_view/")
self.assertEqual(response.content, b"The value of success is true.")
def test_follow_307_and_308_redirect(self):
"""
A 307 or 308 redirect preserves the request method after the redirect.
"""
methods = ("get", "post", "head", "options", "put", "patch", "delete", "trace")
codes = (307, 308)
for method, code in itertools.product(methods, codes):
with self.subTest(method=method, code=code):
req_method = getattr(self.client, method)
response = req_method(
"/redirect_view_%s/" % code, data={"value": "test"}, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.request["PATH_INFO"], "/post_view/")
self.assertEqual(response.request["REQUEST_METHOD"], method.upper())
def test_follow_307_and_308_preserves_query_string(self):
methods = ("post", "options", "put", "patch", "delete", "trace")
codes = (307, 308)
for method, code in itertools.product(methods, codes):
with self.subTest(method=method, code=code):
req_method = getattr(self.client, method)
response = req_method(
"/redirect_view_%s_query_string/" % code,
data={"value": "test"},
follow=True,
)
self.assertRedirects(
response, "/post_view/?hello=world", status_code=code
)
self.assertEqual(response.request["QUERY_STRING"], "hello=world")
def test_follow_307_and_308_get_head_query_string(self):
methods = ("get", "head")
codes = (307, 308)
for method, code in itertools.product(methods, codes):
with self.subTest(method=method, code=code):
req_method = getattr(self.client, method)
response = req_method(
"/redirect_view_%s_query_string/" % code,
data={"value": "test"},
follow=True,
)
self.assertRedirects(
response, "/post_view/?hello=world", status_code=code
)
self.assertEqual(response.request["QUERY_STRING"], "value=test")
def test_follow_307_and_308_preserves_post_data(self):
for code in (307, 308):
with self.subTest(code=code):
response = self.client.post(
"/redirect_view_%s/" % code, data={"value": "test"}, follow=True
)
self.assertContains(response, "test is the value")
def test_follow_307_and_308_preserves_put_body(self):
for code in (307, 308):
with self.subTest(code=code):
response = self.client.put(
"/redirect_view_%s/?to=/put_view/" % code, data="a=b", follow=True
)
self.assertContains(response, "a=b is the body")
def test_follow_307_and_308_preserves_get_params(self):
data = {"var": 30, "to": "/get_view/"}
for code in (307, 308):
with self.subTest(code=code):
response = self.client.get(
"/redirect_view_%s/" % code, data=data, follow=True
)
self.assertContains(response, "30 is the value")
def test_redirect_http(self):
"""GET a URL that redirects to an HTTP URI."""
response = self.client.get("/http_redirect_view/", follow=True)
self.assertFalse(response.test_was_secure_request)
def test_redirect_https(self):
"""GET a URL that redirects to an HTTPS URI."""
response = self.client.get("/https_redirect_view/", follow=True)
self.assertTrue(response.test_was_secure_request)
def test_notfound_response(self):
"GET a URL that responds as '404:Not Found'"
response = self.client.get("/bad_view/")
self.assertContains(response, "MAGIC", status_code=404)
def test_valid_form(self):
"POST valid data to a form"
post_data = {
"text": "Hello World",
"email": "[email protected]",
"value": 37,
"single": "b",
"multi": ("b", "c", "e"),
}
response = self.client.post("/form_view/", post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Valid POST Template")
def test_valid_form_with_hints(self):
"GET a form, providing hints in the GET data"
hints = {"text": "Hello World", "multi": ("b", "c", "e")}
response = self.client.get("/form_view/", data=hints)
# The multi-value data has been rolled out ok
self.assertContains(response, "Select a valid choice.", 0)
self.assertTemplateUsed(response, "Form GET Template")
def test_incomplete_data_form(self):
"POST incomplete data to a form"
post_data = {"text": "Hello World", "value": 37}
response = self.client.post("/form_view/", post_data)
self.assertContains(response, "This field is required.", 3)
self.assertTemplateUsed(response, "Invalid POST Template")
form = response.context["form"]
self.assertFormError(form, "email", "This field is required.")
self.assertFormError(form, "single", "This field is required.")
self.assertFormError(form, "multi", "This field is required.")
def test_form_error(self):
"POST erroneous data to a form"
post_data = {
"text": "Hello World",
"email": "not an email address",
"value": 37,
"single": "b",
"multi": ("b", "c", "e"),
}
response = self.client.post("/form_view/", post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(
response.context["form"], "email", "Enter a valid email address."
)
def test_valid_form_with_template(self):
"POST valid data to a form using multiple templates"
post_data = {
"text": "Hello World",
"email": "[email protected]",
"value": 37,
"single": "b",
"multi": ("b", "c", "e"),
}
response = self.client.post("/form_view_with_template/", post_data)
self.assertContains(response, "POST data OK")
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, "base.html")
self.assertTemplateNotUsed(response, "Valid POST Template")
def test_incomplete_data_form_with_template(self):
"POST incomplete data to a form using multiple templates"
post_data = {"text": "Hello World", "value": 37}
response = self.client.post("/form_view_with_template/", post_data)
self.assertContains(response, "POST data has errors")
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, "base.html")
self.assertTemplateNotUsed(response, "Invalid POST Template")
form = response.context["form"]
self.assertFormError(form, "email", "This field is required.")
self.assertFormError(form, "single", "This field is required.")
self.assertFormError(form, "multi", "This field is required.")
def test_form_error_with_template(self):
"POST erroneous data to a form using multiple templates"
post_data = {
"text": "Hello World",
"email": "not an email address",
"value": 37,
"single": "b",
"multi": ("b", "c", "e"),
}
response = self.client.post("/form_view_with_template/", post_data)
self.assertContains(response, "POST data has errors")
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, "base.html")
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(
response.context["form"], "email", "Enter a valid email address."
)
def test_unknown_page(self):
"GET an invalid URL"
response = self.client.get("/unknown_view/")
# The response was a 404
self.assertEqual(response.status_code, 404)
def test_url_parameters(self):
"Make sure that URL ;-parameters are not stripped."
response = self.client.get("/unknown_view/;some-parameter")
# The path in the response includes it (ignore that it's a 404)
self.assertEqual(response.request["PATH_INFO"], "/unknown_view/;some-parameter")
def test_view_with_login(self):
"Request a page that is protected with @login_required"
# Get the page without logging in. Should result in 302.
response = self.client.get("/login_protected_view/")
self.assertRedirects(response, "/accounts/login/?next=/login_protected_view/")
# Log in
login = self.client.login(username="testclient", password="password")
self.assertTrue(login, "Could not log in")
# Request a page that requires a login
response = self.client.get("/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].username, "testclient")
@override_settings(
INSTALLED_APPS=["django.contrib.auth"],
SESSION_ENGINE="django.contrib.sessions.backends.file",
)
def test_view_with_login_when_sessions_app_is_not_installed(self):
self.test_view_with_login()
def test_view_with_force_login(self):
"Request a page that is protected with @login_required"
# Get the page without logging in. Should result in 302.
response = self.client.get("/login_protected_view/")
self.assertRedirects(response, "/accounts/login/?next=/login_protected_view/")
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get("/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].username, "testclient")
def test_view_with_method_login(self):
"Request a page that is protected with a @login_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get("/login_protected_method_view/")
self.assertRedirects(
response, "/accounts/login/?next=/login_protected_method_view/"
)
# Log in
login = self.client.login(username="testclient", password="password")
self.assertTrue(login, "Could not log in")
# Request a page that requires a login
response = self.client.get("/login_protected_method_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].username, "testclient")
def test_view_with_method_force_login(self):
"Request a page that is protected with a @login_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get("/login_protected_method_view/")
self.assertRedirects(
response, "/accounts/login/?next=/login_protected_method_view/"
)
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get("/login_protected_method_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].username, "testclient")
def test_view_with_login_and_custom_redirect(self):
"""
Request a page that is protected with
@login_required(redirect_field_name='redirect_to')
"""
# Get the page without logging in. Should result in 302.
response = self.client.get("/login_protected_view_custom_redirect/")
self.assertRedirects(
response,
"/accounts/login/?redirect_to=/login_protected_view_custom_redirect/",
)
# Log in
login = self.client.login(username="testclient", password="password")
self.assertTrue(login, "Could not log in")
# Request a page that requires a login
response = self.client.get("/login_protected_view_custom_redirect/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].username, "testclient")
def test_view_with_force_login_and_custom_redirect(self):
"""
Request a page that is protected with
@login_required(redirect_field_name='redirect_to')
"""
# Get the page without logging in. Should result in 302.
response = self.client.get("/login_protected_view_custom_redirect/")
self.assertRedirects(
response,
"/accounts/login/?redirect_to=/login_protected_view_custom_redirect/",
)
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get("/login_protected_view_custom_redirect/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].username, "testclient")
def test_view_with_bad_login(self):
"Request a page that is protected with @login, but use bad credentials"
login = self.client.login(username="otheruser", password="nopassword")
self.assertFalse(login)
def test_view_with_inactive_login(self):
"""
An inactive user may login if the authenticate backend allows it.
"""
credentials = {"username": "inactive", "password": "password"}
self.assertFalse(self.client.login(**credentials))
with self.settings(
AUTHENTICATION_BACKENDS=[
"django.contrib.auth.backends.AllowAllUsersModelBackend"
]
):
self.assertTrue(self.client.login(**credentials))
@override_settings(
AUTHENTICATION_BACKENDS=[
"django.contrib.auth.backends.ModelBackend",
"django.contrib.auth.backends.AllowAllUsersModelBackend",
]
)
def test_view_with_inactive_force_login(self):
"Request a page that is protected with @login, but use an inactive login"
# Get the page without logging in. Should result in 302.
response = self.client.get("/login_protected_view/")
self.assertRedirects(response, "/accounts/login/?next=/login_protected_view/")
# Log in
self.client.force_login(
self.u2, backend="django.contrib.auth.backends.AllowAllUsersModelBackend"
)
# Request a page that requires a login
response = self.client.get("/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].username, "inactive")
def test_logout(self):
"Request a logout after logging in"
# Log in
self.client.login(username="testclient", password="password")
# Request a page that requires a login
response = self.client.get("/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].username, "testclient")
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get("/login_protected_view/")
self.assertRedirects(response, "/accounts/login/?next=/login_protected_view/")
def test_logout_with_force_login(self):
"Request a logout after logging in"
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get("/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].username, "testclient")
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get("/login_protected_view/")
self.assertRedirects(response, "/accounts/login/?next=/login_protected_view/")
@override_settings(
AUTHENTICATION_BACKENDS=[
"django.contrib.auth.backends.ModelBackend",
"test_client.auth_backends.TestClientBackend",
],
)
def test_force_login_with_backend(self):
"""
Request a page that is protected with @login_required when using
force_login() and passing a backend.
"""
# Get the page without logging in. Should result in 302.
response = self.client.get("/login_protected_view/")
self.assertRedirects(response, "/accounts/login/?next=/login_protected_view/")
# Log in
self.client.force_login(
self.u1, backend="test_client.auth_backends.TestClientBackend"
)
self.assertEqual(self.u1.backend, "test_client.auth_backends.TestClientBackend")
# Request a page that requires a login
response = self.client.get("/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].username, "testclient")
@override_settings(
AUTHENTICATION_BACKENDS=[
"django.contrib.auth.backends.ModelBackend",
"test_client.auth_backends.TestClientBackend",
],
)
def test_force_login_without_backend(self):
"""
force_login() without passing a backend and with multiple backends
configured should automatically use the first backend.
"""
self.client.force_login(self.u1)
response = self.client.get("/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].username, "testclient")
self.assertEqual(self.u1.backend, "django.contrib.auth.backends.ModelBackend")
@override_settings(
AUTHENTICATION_BACKENDS=[
"test_client.auth_backends.BackendWithoutGetUserMethod",
"django.contrib.auth.backends.ModelBackend",
]
)
def test_force_login_with_backend_missing_get_user(self):
"""
force_login() skips auth backends without a get_user() method.
"""
self.client.force_login(self.u1)
self.assertEqual(self.u1.backend, "django.contrib.auth.backends.ModelBackend")
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.signed_cookies")
def test_logout_cookie_sessions(self):
self.test_logout()
def test_view_with_permissions(self):
"Request a page that is protected with @permission_required"
# Get the page without logging in. Should result in 302.
response = self.client.get("/permission_protected_view/")
self.assertRedirects(
response, "/accounts/login/?next=/permission_protected_view/"
)
# Log in
login = self.client.login(username="testclient", password="password")
self.assertTrue(login, "Could not log in")
# Log in with wrong permissions. Should result in 302.
response = self.client.get("/permission_protected_view/")
self.assertRedirects(
response, "/accounts/login/?next=/permission_protected_view/"
)
# TODO: Log in with right permissions and request the page again
def test_view_with_permissions_exception(self):
"""
Request a page that is protected with @permission_required but raises
an exception.
"""
# Get the page without logging in. Should result in 403.
response = self.client.get("/permission_protected_view_exception/")
self.assertEqual(response.status_code, 403)
# Log in
login = self.client.login(username="testclient", password="password")
self.assertTrue(login, "Could not log in")
# Log in with wrong permissions. Should result in 403.
response = self.client.get("/permission_protected_view_exception/")
self.assertEqual(response.status_code, 403)
def test_view_with_method_permissions(self):
"Request a page that is protected with a @permission_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get("/permission_protected_method_view/")
self.assertRedirects(
response, "/accounts/login/?next=/permission_protected_method_view/"
)
# Log in
login = self.client.login(username="testclient", password="password")
self.assertTrue(login, "Could not log in")
# Log in with wrong permissions. Should result in 302.
response = self.client.get("/permission_protected_method_view/")
self.assertRedirects(
response, "/accounts/login/?next=/permission_protected_method_view/"
)
# TODO: Log in with right permissions and request the page again
def test_external_redirect(self):
response = self.client.get("/django_project_redirect/")
self.assertRedirects(
response, "https://www.djangoproject.com/", fetch_redirect_response=False
)
def test_external_redirect_without_trailing_slash(self):
"""
Client._handle_redirects() with an empty path.
"""
response = self.client.get("/no_trailing_slash_external_redirect/", follow=True)
self.assertRedirects(response, "https://testserver")
def test_external_redirect_with_fetch_error_msg(self):
"""
assertRedirects without fetch_redirect_response=False raises
a relevant ValueError rather than a non-descript AssertionError.
"""
response = self.client.get("/django_project_redirect/")
msg = (
"The test client is unable to fetch remote URLs (got "
"https://www.djangoproject.com/). If the host is served by Django, "
"add 'www.djangoproject.com' to ALLOWED_HOSTS. "
"Otherwise, use assertRedirects(..., fetch_redirect_response=False)."
)
with self.assertRaisesMessage(ValueError, msg):
self.assertRedirects(response, "https://www.djangoproject.com/")
def test_session_modifying_view(self):
"Request a page that modifies the session"
# Session value isn't set initially
with self.assertRaises(KeyError):
self.client.session["tobacconist"]
self.client.post("/session_view/")
# The session was modified
self.assertEqual(self.client.session["tobacconist"], "hovercraft")
@override_settings(
INSTALLED_APPS=[],
SESSION_ENGINE="django.contrib.sessions.backends.file",
)
def test_sessions_app_is_not_installed(self):
self.test_session_modifying_view()
@override_settings(
INSTALLED_APPS=[],
SESSION_ENGINE="django.contrib.sessions.backends.nonexistent",
)
def test_session_engine_is_invalid(self):
with self.assertRaisesMessage(ImportError, "nonexistent"):
self.test_session_modifying_view()
def test_view_with_exception(self):
"Request a page that is known to throw an error"
with self.assertRaises(KeyError):
self.client.get("/broken_view/")
def test_exc_info(self):
client = Client(raise_request_exception=False)
response = client.get("/broken_view/")
self.assertEqual(response.status_code, 500)
exc_type, exc_value, exc_traceback = response.exc_info
self.assertIs(exc_type, KeyError)
self.assertIsInstance(exc_value, KeyError)
self.assertEqual(str(exc_value), "'Oops! Looks like you wrote some bad code.'")
self.assertIsNotNone(exc_traceback)
def test_exc_info_none(self):
response = self.client.get("/get_view/")
self.assertIsNone(response.exc_info)
def test_mail_sending(self):
"Mail is redirected to a dummy outbox during test setup"
response = self.client.get("/mail_sending_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, "Test message")
self.assertEqual(mail.outbox[0].body, "This is a test email")
self.assertEqual(mail.outbox[0].from_email, "[email protected]")
self.assertEqual(mail.outbox[0].to[0], "[email protected]")
self.assertEqual(mail.outbox[0].to[1], "[email protected]")
def test_reverse_lazy_decodes(self):
"reverse_lazy() works in the test client"
data = {"var": "data"}
response = self.client.get(reverse_lazy("get_view"), data)
# Check some response details
self.assertContains(response, "This is a test")
def test_relative_redirect(self):
response = self.client.get("/accounts/")
self.assertRedirects(response, "/accounts/login/")
def test_relative_redirect_no_trailing_slash(self):
response = self.client.get("/accounts/no_trailing_slash")
self.assertRedirects(response, "/accounts/login/")
def test_mass_mail_sending(self):
"Mass mail is redirected to a dummy outbox during test setup"
response = self.client.get("/mass_mail_sending_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, "First Test message")
self.assertEqual(mail.outbox[0].body, "This is the first test email")
self.assertEqual(mail.outbox[0].from_email, "[email protected]")
self.assertEqual(mail.outbox[0].to[0], "[email protected]")
self.assertEqual(mail.outbox[0].to[1], "[email protected]")
self.assertEqual(mail.outbox[1].subject, "Second Test message")
self.assertEqual(mail.outbox[1].body, "This is the second test email")
self.assertEqual(mail.outbox[1].from_email, "[email protected]")
self.assertEqual(mail.outbox[1].to[0], "[email protected]")
self.assertEqual(mail.outbox[1].to[1], "[email protected]")
def test_exception_following_nested_client_request(self):
"""
A nested test client request shouldn't clobber exception signals from
the outer client request.
"""
with self.assertRaisesMessage(Exception, "exception message"):
self.client.get("/nesting_exception_view/")
def test_response_raises_multi_arg_exception(self):
"""A request may raise an exception with more than one required arg."""
with self.assertRaises(TwoArgException) as cm:
self.client.get("/two_arg_exception/")
self.assertEqual(cm.exception.args, ("one", "two"))
def test_uploading_temp_file(self):
with tempfile.TemporaryFile() as test_file:
response = self.client.post("/upload_view/", data={"temp_file": test_file})
self.assertEqual(response.content, b"temp_file")
def test_uploading_named_temp_file(self):
with tempfile.NamedTemporaryFile() as test_file:
response = self.client.post(
"/upload_view/",
data={"named_temp_file": test_file},
)
self.assertEqual(response.content, b"named_temp_file")
@override_settings(
MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"],
ROOT_URLCONF="test_client.urls",
)
class CSRFEnabledClientTests(SimpleTestCase):
def test_csrf_enabled_client(self):
"A client can be instantiated with CSRF checks enabled"
csrf_client = Client(enforce_csrf_checks=True)
# The normal client allows the post
response = self.client.post("/post_view/", {})
self.assertEqual(response.status_code, 200)
# The CSRF-enabled client rejects it
response = csrf_client.post("/post_view/", {})
self.assertEqual(response.status_code, 403)
class CustomTestClient(Client):
i_am_customized = "Yes"
class CustomTestClientTest(SimpleTestCase):
client_class = CustomTestClient
def test_custom_test_client(self):
"""A test case can specify a custom class for self.client."""
self.assertIs(hasattr(self.client, "i_am_customized"), True)
def _generic_view(request):
return HttpResponse(status=200)
@override_settings(ROOT_URLCONF="test_client.urls")
class RequestFactoryTest(SimpleTestCase):
"""Tests for the request factory."""
# A mapping between names of HTTP/1.1 methods and their test views.
http_methods_and_views = (
("get", get_view),
("post", post_view),
("put", _generic_view),
("patch", _generic_view),
("delete", _generic_view),
("head", _generic_view),
("options", _generic_view),
("trace", trace_view),
)
request_factory = RequestFactory()
def test_request_factory(self):
"""The request factory implements all the HTTP/1.1 methods."""
for method_name, view in self.http_methods_and_views:
method = getattr(self.request_factory, method_name)
request = method("/somewhere/")
response = view(request)
self.assertEqual(response.status_code, 200)
def test_get_request_from_factory(self):
"""
The request factory returns a templated response for a GET request.
"""
request = self.request_factory.get("/somewhere/")
response = get_view(request)
self.assertContains(response, "This is a test")
def test_trace_request_from_factory(self):
"""The request factory returns an echo response for a TRACE request."""
url_path = "/somewhere/"
request = self.request_factory.trace(url_path)
response = trace_view(request)
protocol = request.META["SERVER_PROTOCOL"]
echoed_request_line = "TRACE {} {}".format(url_path, protocol)
self.assertContains(response, echoed_request_line)
@override_settings(ROOT_URLCONF="test_client.urls")
class AsyncClientTest(TestCase):
async def test_response_resolver_match(self):
response = await self.async_client.get("/async_get_view/")
self.assertTrue(hasattr(response, "resolver_match"))
self.assertEqual(response.resolver_match.url_name, "async_get_view")
@modify_settings(
MIDDLEWARE={"prepend": "test_client.tests.async_middleware_urlconf"},
)
async def test_response_resolver_match_middleware_urlconf(self):
response = await self.async_client.get("/middleware_urlconf_view/")
self.assertEqual(response.resolver_match.url_name, "middleware_urlconf_view")
async def test_follow_parameter_not_implemented(self):
msg = "AsyncClient request methods do not accept the follow parameter."
tests = (
"get",
"post",
"put",
"patch",
"delete",
"head",
"options",
"trace",
)
for method_name in tests:
with self.subTest(method=method_name):
method = getattr(self.async_client, method_name)
with self.assertRaisesMessage(NotImplementedError, msg):
await method("/redirect_view/", follow=True)
async def test_get_data(self):
response = await self.async_client.get("/get_view/", {"var": "val"})
self.assertContains(response, "This is a test. val is the value.")
async def test_post_data(self):
response = await self.async_client.post("/post_view/", {"value": 37})
self.assertContains(response, "Data received: 37 is the value.")
async def test_body_read_on_get_data(self):
response = await self.async_client.get("/post_view/")
self.assertContains(response, "Viewing GET page.")
@override_settings(ROOT_URLCONF="test_client.urls")
class AsyncRequestFactoryTest(SimpleTestCase):
request_factory = AsyncRequestFactory()
async def test_request_factory(self):
tests = (
"get",
"post",
"put",
"patch",
"delete",
"head",
"options",
"trace",
)
for method_name in tests:
with self.subTest(method=method_name):
async def async_generic_view(request):
if request.method.lower() != method_name:
return HttpResponseNotAllowed(method_name)
return HttpResponse(status=200)
method = getattr(self.request_factory, method_name)
request = method("/somewhere/")
response = await async_generic_view(request)
self.assertEqual(response.status_code, 200)
async def test_request_factory_data(self):
async def async_generic_view(request):
return HttpResponse(status=200, content=request.body)
request = self.request_factory.post(
"/somewhere/",
data={"example": "data"},
content_type="application/json",
)
self.assertEqual(request.headers["content-length"], "19")
self.assertEqual(request.headers["content-type"], "application/json")
response = await async_generic_view(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"example": "data"}')
async def test_request_limited_read(self):
tests = ["GET", "POST"]
for method in tests:
with self.subTest(method=method):
request = self.request_factory.generic(
method,
"/somewhere",
)
self.assertEqual(request.read(200), b"")
def test_request_factory_sets_headers(self):
request = self.request_factory.get(
"/somewhere/",
AUTHORIZATION="Bearer faketoken",
X_ANOTHER_HEADER="some other value",
)
self.assertEqual(request.headers["authorization"], "Bearer faketoken")
self.assertIn("HTTP_AUTHORIZATION", request.META)
self.assertEqual(request.headers["x-another-header"], "some other value")
self.assertIn("HTTP_X_ANOTHER_HEADER", request.META)
def test_request_factory_query_string(self):
request = self.request_factory.get("/somewhere/", {"example": "data"})
self.assertNotIn("Query-String", request.headers)
self.assertEqual(request.GET["example"], "data")
|
f7069b898d64cc3017ff2b3b174c45f724b1d6df103a350e7e033303dd6072d7 | import json
from urllib.parse import urlencode
from xml.dom.minidom import parseString
from django.contrib.auth.decorators import login_required, permission_required
from django.core import mail
from django.core.exceptions import ValidationError
from django.forms import fields
from django.forms.forms import Form
from django.http import (
HttpResponse,
HttpResponseBadRequest,
HttpResponseNotAllowed,
HttpResponseNotFound,
HttpResponseRedirect,
)
from django.shortcuts import render
from django.template import Context, Template
from django.test import Client
from django.utils.decorators import method_decorator
from django.views.generic import TemplateView
def get_view(request):
"A simple view that expects a GET request, and returns a rendered template"
t = Template("This is a test. {{ var }} is the value.", name="GET Template")
c = Context({"var": request.GET.get("var", 42)})
return HttpResponse(t.render(c))
async def async_get_view(request):
return HttpResponse(b"GET content.")
def trace_view(request):
"""
A simple view that expects a TRACE request and echoes its status line.
TRACE requests should not have an entity; the view will return a 400 status
response if it is present.
"""
if request.method.upper() != "TRACE":
return HttpResponseNotAllowed("TRACE")
elif request.body:
return HttpResponseBadRequest("TRACE requests MUST NOT include an entity")
else:
protocol = request.META["SERVER_PROTOCOL"]
t = Template(
"{{ method }} {{ uri }} {{ version }}",
name="TRACE Template",
)
c = Context(
{
"method": request.method,
"uri": request.path,
"version": protocol,
}
)
return HttpResponse(t.render(c))
def put_view(request):
if request.method == "PUT":
t = Template("Data received: {{ data }} is the body.", name="PUT Template")
c = Context(
{
"Content-Length": request.META["CONTENT_LENGTH"],
"data": request.body.decode(),
}
)
else:
t = Template("Viewing GET page.", name="Empty GET Template")
c = Context()
return HttpResponse(t.render(c))
def post_view(request):
"""A view that expects a POST, and returns a different template depending
on whether any POST data is available
"""
if request.method == "POST":
if request.POST:
t = Template(
"Data received: {{ data }} is the value.", name="POST Template"
)
c = Context({"data": request.POST["value"]})
else:
t = Template("Viewing POST page.", name="Empty POST Template")
c = Context()
else:
t = Template("Viewing GET page.", name="Empty GET Template")
# Used by test_body_read_on_get_data.
request.read(200)
c = Context()
return HttpResponse(t.render(c))
def post_then_get_view(request):
"""
A view that expects a POST request, returns a redirect response
to itself providing only a ?success=true querystring,
the value of this querystring is then rendered upon GET.
"""
if request.method == "POST":
return HttpResponseRedirect("?success=true")
t = Template("The value of success is {{ value }}.", name="GET Template")
c = Context({"value": request.GET.get("success", "false")})
return HttpResponse(t.render(c))
def json_view(request):
"""
A view that expects a request with the header 'application/json' and JSON
data, which is deserialized and included in the context.
"""
if request.META.get("CONTENT_TYPE") != "application/json":
return HttpResponse()
t = Template("Viewing {} page. With data {{ data }}.".format(request.method))
data = json.loads(request.body.decode("utf-8"))
c = Context({"data": data})
return HttpResponse(t.render(c))
def view_with_header(request):
"A view that has a custom header"
response = HttpResponse()
response.headers["X-DJANGO-TEST"] = "Slartibartfast"
return response
def raw_post_view(request):
"""A view which expects raw XML to be posted and returns content extracted
from the XML"""
if request.method == "POST":
root = parseString(request.body)
first_book = root.firstChild.firstChild
title, author = [n.firstChild.nodeValue for n in first_book.childNodes]
t = Template("{{ title }} - {{ author }}", name="Book template")
c = Context({"title": title, "author": author})
else:
t = Template("GET request.", name="Book GET template")
c = Context()
return HttpResponse(t.render(c))
def redirect_view(request):
"A view that redirects all requests to the GET view"
if request.GET:
query = "?" + urlencode(request.GET, True)
else:
query = ""
return HttpResponseRedirect("/get_view/" + query)
def method_saving_307_redirect_query_string_view(request):
return HttpResponseRedirect("/post_view/?hello=world", status=307)
def method_saving_308_redirect_query_string_view(request):
return HttpResponseRedirect("/post_view/?hello=world", status=308)
def _post_view_redirect(request, status_code):
"""Redirect to /post_view/ using the status code."""
redirect_to = request.GET.get("to", "/post_view/")
return HttpResponseRedirect(redirect_to, status=status_code)
def method_saving_307_redirect_view(request):
return _post_view_redirect(request, 307)
def method_saving_308_redirect_view(request):
return _post_view_redirect(request, 308)
def view_with_secure(request):
"A view that indicates if the request was secure"
response = HttpResponse()
response.test_was_secure_request = request.is_secure()
response.test_server_port = request.META.get("SERVER_PORT", 80)
return response
def double_redirect_view(request):
"A view that redirects all requests to a redirection view"
return HttpResponseRedirect("/permanent_redirect_view/")
def bad_view(request):
"A view that returns a 404 with some error content"
return HttpResponseNotFound("Not found!. This page contains some MAGIC content")
TestChoices = (
("a", "First Choice"),
("b", "Second Choice"),
("c", "Third Choice"),
("d", "Fourth Choice"),
("e", "Fifth Choice"),
)
class TestForm(Form):
text = fields.CharField()
email = fields.EmailField()
value = fields.IntegerField()
single = fields.ChoiceField(choices=TestChoices)
multi = fields.MultipleChoiceField(choices=TestChoices)
def clean(self):
cleaned_data = self.cleaned_data
if cleaned_data.get("text") == "Raise non-field error":
raise ValidationError("Non-field error.")
return cleaned_data
def form_view(request):
"A view that tests a simple form"
if request.method == "POST":
form = TestForm(request.POST)
if form.is_valid():
t = Template("Valid POST data.", name="Valid POST Template")
c = Context()
else:
t = Template(
"Invalid POST data. {{ form.errors }}", name="Invalid POST Template"
)
c = Context({"form": form})
else:
form = TestForm(request.GET)
t = Template("Viewing base form. {{ form }}.", name="Form GET Template")
c = Context({"form": form})
return HttpResponse(t.render(c))
def form_view_with_template(request):
"A view that tests a simple form"
if request.method == "POST":
form = TestForm(request.POST)
if form.is_valid():
message = "POST data OK"
else:
message = "POST data has errors"
else:
form = TestForm()
message = "GET form page"
return render(
request,
"form_view.html",
{
"form": form,
"message": message,
},
)
@login_required
def login_protected_view(request):
"A simple view that is login protected."
t = Template(
"This is a login protected test. Username is {{ user.username }}.",
name="Login Template",
)
c = Context({"user": request.user})
return HttpResponse(t.render(c))
@login_required(redirect_field_name="redirect_to")
def login_protected_view_changed_redirect(request):
"A simple view that is login protected with a custom redirect field set"
t = Template(
"This is a login protected test. Username is {{ user.username }}.",
name="Login Template",
)
c = Context({"user": request.user})
return HttpResponse(t.render(c))
def _permission_protected_view(request):
"A simple view that is permission protected."
t = Template(
"This is a permission protected test. "
"Username is {{ user.username }}. "
"Permissions are {{ user.get_all_permissions }}.",
name="Permissions Template",
)
c = Context({"user": request.user})
return HttpResponse(t.render(c))
permission_protected_view = permission_required("permission_not_granted")(
_permission_protected_view
)
permission_protected_view_exception = permission_required(
"permission_not_granted", raise_exception=True
)(_permission_protected_view)
class _ViewManager:
@method_decorator(login_required)
def login_protected_view(self, request):
t = Template(
"This is a login protected test using a method. "
"Username is {{ user.username }}.",
name="Login Method Template",
)
c = Context({"user": request.user})
return HttpResponse(t.render(c))
@method_decorator(permission_required("permission_not_granted"))
def permission_protected_view(self, request):
t = Template(
"This is a permission protected test using a method. "
"Username is {{ user.username }}. "
"Permissions are {{ user.get_all_permissions }}.",
name="Permissions Template",
)
c = Context({"user": request.user})
return HttpResponse(t.render(c))
_view_manager = _ViewManager()
login_protected_method_view = _view_manager.login_protected_view
permission_protected_method_view = _view_manager.permission_protected_view
def session_view(request):
"A view that modifies the session"
request.session["tobacconist"] = "hovercraft"
t = Template(
"This is a view that modifies the session.",
name="Session Modifying View Template",
)
c = Context()
return HttpResponse(t.render(c))
def broken_view(request):
"""A view which just raises an exception, simulating a broken view."""
raise KeyError("Oops! Looks like you wrote some bad code.")
def mail_sending_view(request):
mail.EmailMessage(
"Test message",
"This is a test email",
"[email protected]",
["[email protected]", "[email protected]"],
).send()
return HttpResponse("Mail sent")
def mass_mail_sending_view(request):
m1 = mail.EmailMessage(
"First Test message",
"This is the first test email",
"[email protected]",
["[email protected]", "[email protected]"],
)
m2 = mail.EmailMessage(
"Second Test message",
"This is the second test email",
"[email protected]",
["[email protected]", "[email protected]"],
)
c = mail.get_connection()
c.send_messages([m1, m2])
return HttpResponse("Mail sent")
def nesting_exception_view(request):
"""
A view that uses a nested client to call another view and then raises an
exception.
"""
client = Client()
client.get("/get_view/")
raise Exception("exception message")
def django_project_redirect(request):
return HttpResponseRedirect("https://www.djangoproject.com/")
def no_trailing_slash_external_redirect(request):
"""
RFC 2616 3.2.2: A bare domain without any abs_path element should be
treated as having the trailing `/`.
Use https://testserver, rather than an external domain, in order to allow
use of follow=True, triggering Client._handle_redirects().
"""
return HttpResponseRedirect("https://testserver")
def index_view(request):
"""Target for no_trailing_slash_external_redirect with follow=True."""
return HttpResponse("Hello world")
def upload_view(request):
"""Prints keys of request.FILES to the response."""
return HttpResponse(", ".join(request.FILES))
class TwoArgException(Exception):
def __init__(self, one, two):
pass
def two_arg_exception(request):
raise TwoArgException("one", "two")
class CBView(TemplateView):
template_name = "base.html"
|
d780250295f0954ac4fc931cc8f6fa66e7823a8190cc626a3e1f270510dd9bd4 | from django.db import models
from django.utils import timezone
class RelatedModel(models.Model):
simple = models.ForeignKey("SimpleModel", models.CASCADE, null=True)
class SimpleModel(models.Model):
field = models.IntegerField()
created = models.DateTimeField(default=timezone.now)
class ManyToManyModel(models.Model):
simples = models.ManyToManyField("SimpleModel")
|
c0553c3cbbc99022c604e89d07e6eae7e78355659d3050efea2d4bcc84812f37 | from django.test import TestCase
from .models import ManyToManyModel, RelatedModel, SimpleModel
class AsyncRelatedManagersOperationTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.mtm1 = ManyToManyModel.objects.create()
cls.s1 = SimpleModel.objects.create(field=0)
cls.mtm2 = ManyToManyModel.objects.create()
cls.mtm2.simples.set([cls.s1])
async def test_acreate(self):
await self.mtm1.simples.acreate(field=2)
new_simple = await self.mtm1.simples.aget()
self.assertEqual(new_simple.field, 2)
async def test_acreate_reverse(self):
await self.s1.relatedmodel_set.acreate()
new_relatedmodel = await self.s1.relatedmodel_set.aget()
self.assertEqual(new_relatedmodel.simple, self.s1)
async def test_aget_or_create(self):
new_simple, created = await self.mtm1.simples.aget_or_create(field=2)
self.assertIs(created, True)
self.assertEqual(await self.mtm1.simples.acount(), 1)
self.assertEqual(new_simple.field, 2)
new_simple, created = await self.mtm1.simples.aget_or_create(
id=new_simple.id, through_defaults={"field": 3}
)
self.assertIs(created, False)
self.assertEqual(await self.mtm1.simples.acount(), 1)
self.assertEqual(new_simple.field, 2)
async def test_aget_or_create_reverse(self):
new_relatedmodel, created = await self.s1.relatedmodel_set.aget_or_create()
self.assertIs(created, True)
self.assertEqual(await self.s1.relatedmodel_set.acount(), 1)
self.assertEqual(new_relatedmodel.simple, self.s1)
async def test_aupdate_or_create(self):
new_simple, created = await self.mtm1.simples.aupdate_or_create(field=2)
self.assertIs(created, True)
self.assertEqual(await self.mtm1.simples.acount(), 1)
self.assertEqual(new_simple.field, 2)
new_simple, created = await self.mtm1.simples.aupdate_or_create(
id=new_simple.id, defaults={"field": 3}
)
self.assertIs(created, False)
self.assertEqual(await self.mtm1.simples.acount(), 1)
self.assertEqual(new_simple.field, 3)
async def test_aupdate_or_create_reverse(self):
new_relatedmodel, created = await self.s1.relatedmodel_set.aupdate_or_create()
self.assertIs(created, True)
self.assertEqual(await self.s1.relatedmodel_set.acount(), 1)
self.assertEqual(new_relatedmodel.simple, self.s1)
async def test_aadd(self):
await self.mtm1.simples.aadd(self.s1)
self.assertEqual(await self.mtm1.simples.aget(), self.s1)
async def test_aadd_reverse(self):
r1 = await RelatedModel.objects.acreate()
await self.s1.relatedmodel_set.aadd(r1, bulk=False)
self.assertEqual(await self.s1.relatedmodel_set.aget(), r1)
async def test_aremove(self):
self.assertEqual(await self.mtm2.simples.acount(), 1)
await self.mtm2.simples.aremove(self.s1)
self.assertEqual(await self.mtm2.simples.acount(), 0)
async def test_aremove_reverse(self):
r1 = await RelatedModel.objects.acreate(simple=self.s1)
self.assertEqual(await self.s1.relatedmodel_set.acount(), 1)
await self.s1.relatedmodel_set.aremove(r1)
self.assertEqual(await self.s1.relatedmodel_set.acount(), 0)
async def test_aset(self):
await self.mtm1.simples.aset([self.s1])
self.assertEqual(await self.mtm1.simples.aget(), self.s1)
await self.mtm1.simples.aset([])
self.assertEqual(await self.mtm1.simples.acount(), 0)
await self.mtm1.simples.aset([self.s1], clear=True)
self.assertEqual(await self.mtm1.simples.aget(), self.s1)
async def test_aset_reverse(self):
r1 = await RelatedModel.objects.acreate()
await self.s1.relatedmodel_set.aset([r1])
self.assertEqual(await self.s1.relatedmodel_set.aget(), r1)
await self.s1.relatedmodel_set.aset([])
self.assertEqual(await self.s1.relatedmodel_set.acount(), 0)
await self.s1.relatedmodel_set.aset([r1], bulk=False, clear=True)
self.assertEqual(await self.s1.relatedmodel_set.aget(), r1)
async def test_aclear(self):
self.assertEqual(await self.mtm2.simples.acount(), 1)
await self.mtm2.simples.aclear()
self.assertEqual(await self.mtm2.simples.acount(), 0)
async def test_aclear_reverse(self):
await RelatedModel.objects.acreate(simple=self.s1)
self.assertEqual(await self.s1.relatedmodel_set.acount(), 1)
await self.s1.relatedmodel_set.aclear(bulk=False)
self.assertEqual(await self.s1.relatedmodel_set.acount(), 0)
|
29ed3ee27952037ae655f27a2543d4434170501261143ea1e066984c291419d4 | import json
import mimetypes
import os
import sys
from copy import copy
from functools import partial
from http import HTTPStatus
from importlib import import_module
from io import BytesIO
from urllib.parse import unquote_to_bytes, urljoin, urlparse, urlsplit
from asgiref.sync import sync_to_async
from django.conf import settings
from django.core.handlers.asgi import ASGIRequest
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import LimitedStream, WSGIRequest
from django.core.serializers.json import DjangoJSONEncoder
from django.core.signals import got_request_exception, request_finished, request_started
from django.db import close_old_connections
from django.http import HttpHeaders, HttpRequest, QueryDict, SimpleCookie
from django.test import signals
from django.test.utils import ContextList
from django.urls import resolve
from django.utils.encoding import force_bytes
from django.utils.functional import SimpleLazyObject
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
from django.utils.regex_helper import _lazy_re_compile
__all__ = (
"AsyncClient",
"AsyncRequestFactory",
"Client",
"RedirectCycleError",
"RequestFactory",
"encode_file",
"encode_multipart",
)
BOUNDARY = "BoUnDaRyStRiNg"
MULTIPART_CONTENT = "multipart/form-data; boundary=%s" % BOUNDARY
CONTENT_TYPE_RE = _lazy_re_compile(r".*; charset=([\w-]+);?")
# Structured suffix spec: https://tools.ietf.org/html/rfc6838#section-4.2.8
JSON_CONTENT_TYPE_RE = _lazy_re_compile(r"^application\/(.+\+)?json")
class RedirectCycleError(Exception):
"""The test client has been asked to follow a redirect loop."""
def __init__(self, message, last_response):
super().__init__(message)
self.last_response = last_response
self.redirect_chain = last_response.redirect_chain
class FakePayload:
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be sought and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in real life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert (
self.__len >= num_bytes
), "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
if self.read_started:
raise ValueError("Unable to write a payload after it's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def close(self):
pass
def closing_iterator_wrapper(iterable, close):
try:
yield from iterable
finally:
request_finished.disconnect(close_old_connections)
close() # will fire request_finished
request_finished.connect(close_old_connections)
def conditional_content_removal(request, response):
"""
Simulate the behavior of most web servers by removing the content of
responses for HEAD requests, 1xx, 204, and 304 responses. Ensure
compliance with RFC 9112 Section 6.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
if response.streaming:
response.streaming_content = []
else:
response.content = b""
if request.method == "HEAD":
if response.streaming:
response.streaming_content = []
else:
response.content = b""
return response
class ClientHandler(BaseHandler):
"""
An HTTP Handler that can be used for testing purposes. Use the WSGI
interface to compose requests, but return the raw HttpResponse object with
the originating WSGIRequest attached to its ``wsgi_request`` attribute.
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super().__init__(*args, **kwargs)
def __call__(self, environ):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._middleware_chain is None:
self.load_middleware()
request_started.disconnect(close_old_connections)
request_started.send(sender=self.__class__, environ=environ)
request_started.connect(close_old_connections)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
# Request goes through middleware.
response = self.get_response(request)
# Simulate behaviors of most web servers.
conditional_content_removal(request, response)
# Attach the originating request to the response so that it could be
# later retrieved.
response.wsgi_request = request
# Emulate a WSGI server by calling the close method on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close
)
else:
request_finished.disconnect(close_old_connections)
response.close() # will fire request_finished
request_finished.connect(close_old_connections)
return response
class AsyncClientHandler(BaseHandler):
"""An async version of ClientHandler."""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super().__init__(*args, **kwargs)
async def __call__(self, scope):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._middleware_chain is None:
self.load_middleware(is_async=True)
# Extract body file from the scope, if provided.
if "_body_file" in scope:
body_file = scope.pop("_body_file")
else:
body_file = FakePayload("")
request_started.disconnect(close_old_connections)
await sync_to_async(request_started.send, thread_sensitive=False)(
sender=self.__class__, scope=scope
)
request_started.connect(close_old_connections)
# Wrap FakePayload body_file to allow large read() in test environment.
request = ASGIRequest(scope, LimitedStream(body_file, len(body_file)))
# Sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably required
# for backwards compatibility with external tests against admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
# Request goes through middleware.
response = await self.get_response_async(request)
# Simulate behaviors of most web servers.
conditional_content_removal(request, response)
# Attach the originating ASGI request to the response so that it could
# be later retrieved.
response.asgi_request = request
# Emulate a server by calling the close method on completion.
if response.streaming:
response.streaming_content = await sync_to_async(
closing_iterator_wrapper, thread_sensitive=False
)(
response.streaming_content,
response.close,
)
else:
request_finished.disconnect(close_old_connections)
# Will fire request_finished.
await sync_to_async(response.close, thread_sensitive=False)()
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Store templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault("templates", []).append(template)
if "context" not in store:
store["context"] = ContextList()
store["context"].append(copy(context))
def encode_multipart(boundary, data):
"""
Encode multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
def is_file(thing):
return hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if value is None:
raise TypeError(
"Cannot encode None for key '%s' as POST data. Did you mean "
"to pass an empty string or omit the value?" % key
)
elif is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, str) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend(
to_bytes(val)
for val in [
"--%s" % boundary,
'Content-Disposition: form-data; name="%s"' % key,
"",
item,
]
)
else:
lines.extend(
to_bytes(val)
for val in [
"--%s" % boundary,
'Content-Disposition: form-data; name="%s"' % key,
"",
value,
]
)
lines.extend(
[
to_bytes("--%s--" % boundary),
b"",
]
)
return b"\r\n".join(lines)
def encode_file(boundary, key, file):
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
# file.name might not be a string. For example, it's an int for
# tempfile.TemporaryFile().
file_has_string_name = hasattr(file, "name") and isinstance(file.name, str)
filename = os.path.basename(file.name) if file_has_string_name else ""
if hasattr(file, "content_type"):
content_type = file.content_type
elif filename:
content_type = mimetypes.guess_type(filename)[0]
else:
content_type = None
if content_type is None:
content_type = "application/octet-stream"
filename = filename or key
return [
to_bytes("--%s" % boundary),
to_bytes(
'Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)
),
to_bytes("Content-Type: %s" % content_type),
b"",
to_bytes(file.read()),
]
class RequestFactory:
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, *, json_encoder=DjangoJSONEncoder, headers=None, **defaults):
self.json_encoder = json_encoder
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
if headers:
self.defaults.update(HttpHeaders.to_wsgi_names(headers))
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See https://www.python.org/dev/peps/pep-3333/#environ-variables
return {
"HTTP_COOKIE": "; ".join(
sorted(
"%s=%s" % (morsel.key, morsel.coded_value)
for morsel in self.cookies.values()
)
),
"PATH_INFO": "/",
"REMOTE_ADDR": "127.0.0.1",
"REQUEST_METHOD": "GET",
"SCRIPT_NAME": "",
"SERVER_NAME": "testserver",
"SERVER_PORT": "80",
"SERVER_PROTOCOL": "HTTP/1.1",
"wsgi.version": (1, 0),
"wsgi.url_scheme": "http",
"wsgi.input": FakePayload(b""),
"wsgi.errors": self.errors,
"wsgi.multiprocess": True,
"wsgi.multithread": False,
"wsgi.run_once": False,
**self.defaults,
**request,
}
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _encode_data(self, data, content_type):
if content_type is MULTIPART_CONTENT:
return encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match[1]
else:
charset = settings.DEFAULT_CHARSET
return force_bytes(data, encoding=charset)
def _encode_json(self, data, content_type):
"""
Return encoded JSON if data is a dict, list, or tuple and content_type
is application/json.
"""
should_encode = JSON_CONTENT_TYPE_RE.match(content_type) and isinstance(
data, (dict, list, tuple)
)
return json.dumps(data, cls=self.json_encoder) if should_encode else data
def _get_path(self, parsed):
path = parsed.path
# If there are parameters, add them
if parsed.params:
path += ";" + parsed.params
path = unquote_to_bytes(path)
# Replace the behavior where non-ASCII values in the WSGI environ are
# arbitrarily decoded with ISO-8859-1.
# Refs comment in `get_bytes_from_wsgi()`.
return path.decode("iso-8859-1")
def get(self, path, data=None, secure=False, *, headers=None, **extra):
"""Construct a GET request."""
data = {} if data is None else data
return self.generic(
"GET",
path,
secure=secure,
headers=headers,
**{
"QUERY_STRING": urlencode(data, doseq=True),
**extra,
},
)
def post(
self,
path,
data=None,
content_type=MULTIPART_CONTENT,
secure=False,
*,
headers=None,
**extra,
):
"""Construct a POST request."""
data = self._encode_json({} if data is None else data, content_type)
post_data = self._encode_data(data, content_type)
return self.generic(
"POST",
path,
post_data,
content_type,
secure=secure,
headers=headers,
**extra,
)
def head(self, path, data=None, secure=False, *, headers=None, **extra):
"""Construct a HEAD request."""
data = {} if data is None else data
return self.generic(
"HEAD",
path,
secure=secure,
headers=headers,
**{
"QUERY_STRING": urlencode(data, doseq=True),
**extra,
},
)
def trace(self, path, secure=False, *, headers=None, **extra):
"""Construct a TRACE request."""
return self.generic("TRACE", path, secure=secure, headers=headers, **extra)
def options(
self,
path,
data="",
content_type="application/octet-stream",
secure=False,
*,
headers=None,
**extra,
):
"Construct an OPTIONS request."
return self.generic(
"OPTIONS", path, data, content_type, secure=secure, headers=headers, **extra
)
def put(
self,
path,
data="",
content_type="application/octet-stream",
secure=False,
*,
headers=None,
**extra,
):
"""Construct a PUT request."""
data = self._encode_json(data, content_type)
return self.generic(
"PUT", path, data, content_type, secure=secure, headers=headers, **extra
)
def patch(
self,
path,
data="",
content_type="application/octet-stream",
secure=False,
*,
headers=None,
**extra,
):
"""Construct a PATCH request."""
data = self._encode_json(data, content_type)
return self.generic(
"PATCH", path, data, content_type, secure=secure, headers=headers, **extra
)
def delete(
self,
path,
data="",
content_type="application/octet-stream",
secure=False,
*,
headers=None,
**extra,
):
"""Construct a DELETE request."""
data = self._encode_json(data, content_type)
return self.generic(
"DELETE", path, data, content_type, secure=secure, headers=headers, **extra
)
def generic(
self,
method,
path,
data="",
content_type="application/octet-stream",
secure=False,
*,
headers=None,
**extra,
):
"""Construct an arbitrary HTTP request."""
parsed = urlparse(str(path)) # path can be lazy
data = force_bytes(data, settings.DEFAULT_CHARSET)
r = {
"PATH_INFO": self._get_path(parsed),
"REQUEST_METHOD": method,
"SERVER_PORT": "443" if secure else "80",
"wsgi.url_scheme": "https" if secure else "http",
}
if data:
r.update(
{
"CONTENT_LENGTH": str(len(data)),
"CONTENT_TYPE": content_type,
"wsgi.input": FakePayload(data),
}
)
if headers:
extra.update(HttpHeaders.to_wsgi_names(headers))
r.update(extra)
# If QUERY_STRING is absent or empty, we want to extract it from the URL.
if not r.get("QUERY_STRING"):
# WSGI requires latin-1 encoded strings. See get_path_info().
query_string = parsed[4].encode().decode("iso-8859-1")
r["QUERY_STRING"] = query_string
return self.request(**r)
class AsyncRequestFactory(RequestFactory):
"""
Class that lets you create mock ASGI-like Request objects for use in
testing. Usage:
rf = AsyncRequestFactory()
get_request = await rf.get('/hello/')
post_request = await rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
including synchronous ones. The reason we have a separate class here is:
a) this makes ASGIRequest subclasses, and
b) AsyncTestClient can subclass it.
"""
def _base_scope(self, **request):
"""The base scope for a request."""
# This is a minimal valid ASGI scope, plus:
# - headers['cookie'] for cookie support,
# - 'client' often useful, see #8551.
scope = {
"asgi": {"version": "3.0"},
"type": "http",
"http_version": "1.1",
"client": ["127.0.0.1", 0],
"server": ("testserver", "80"),
"scheme": "http",
"method": "GET",
"headers": [],
**self.defaults,
**request,
}
scope["headers"].append(
(
b"cookie",
b"; ".join(
sorted(
("%s=%s" % (morsel.key, morsel.coded_value)).encode("ascii")
for morsel in self.cookies.values()
)
),
)
)
return scope
def request(self, **request):
"""Construct a generic request object."""
# This is synchronous, which means all methods on this class are.
# AsyncClient, however, has an async request function, which makes all
# its methods async.
if "_body_file" in request:
body_file = request.pop("_body_file")
else:
body_file = FakePayload("")
# Wrap FakePayload body_file to allow large read() in test environment.
return ASGIRequest(
self._base_scope(**request), LimitedStream(body_file, len(body_file))
)
def generic(
self,
method,
path,
data="",
content_type="application/octet-stream",
secure=False,
*,
headers=None,
**extra,
):
"""Construct an arbitrary HTTP request."""
parsed = urlparse(str(path)) # path can be lazy.
data = force_bytes(data, settings.DEFAULT_CHARSET)
s = {
"method": method,
"path": self._get_path(parsed),
"server": ("127.0.0.1", "443" if secure else "80"),
"scheme": "https" if secure else "http",
"headers": [(b"host", b"testserver")],
}
if data:
s["headers"].extend(
[
(b"content-length", str(len(data)).encode("ascii")),
(b"content-type", content_type.encode("ascii")),
]
)
s["_body_file"] = FakePayload(data)
follow = extra.pop("follow", None)
if follow is not None:
s["follow"] = follow
if query_string := extra.pop("QUERY_STRING", None):
s["query_string"] = query_string
if headers:
extra.update(HttpHeaders.to_asgi_names(headers))
s["headers"] += [
(key.lower().encode("ascii"), value.encode("latin1"))
for key, value in extra.items()
]
# If QUERY_STRING is absent or empty, we want to extract it from the
# URL.
if not s.get("query_string"):
s["query_string"] = parsed[4]
return self.request(**s)
class ClientMixin:
"""
Mixin with common methods between Client and AsyncClient.
"""
def store_exc_info(self, **kwargs):
"""Store exceptions when they are generated by a view."""
self.exc_info = sys.exc_info()
def check_exception(self, response):
"""
Look for a signaled exception, clear the current context exception
data, re-raise the signaled exception, and clear the signaled exception
from the local cache.
"""
response.exc_info = self.exc_info
if self.exc_info:
_, exc_value, _ = self.exc_info
self.exc_info = None
if self.raise_request_exception:
raise exc_value
@property
def session(self):
"""Return the current session variables."""
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)
if cookie:
return engine.SessionStore(cookie.value)
session = engine.SessionStore()
session.save()
self.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
return session
def login(self, **credentials):
"""
Set the Factory to appear as if it has successfully logged into a site.
Return True if login is possible or False if the provided credentials
are incorrect.
"""
from django.contrib.auth import authenticate
user = authenticate(**credentials)
if user:
self._login(user)
return True
return False
def force_login(self, user, backend=None):
def get_backend():
from django.contrib.auth import load_backend
for backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
if hasattr(backend, "get_user"):
return backend_path
if backend is None:
backend = get_backend()
user.backend = backend
self._login(user, backend)
def _login(self, user, backend=None):
from django.contrib.auth import login
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
engine = import_module(settings.SESSION_ENGINE)
request.session = engine.SessionStore()
login(request, user, backend)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
"max-age": None,
"path": "/",
"domain": settings.SESSION_COOKIE_DOMAIN,
"secure": settings.SESSION_COOKIE_SECURE or None,
"expires": None,
}
self.cookies[session_cookie].update(cookie_data)
def logout(self):
"""Log out the user by removing the cookies and session object."""
from django.contrib.auth import get_user, logout
request = HttpRequest()
if self.session:
request.session = self.session
request.user = get_user(request)
else:
engine = import_module(settings.SESSION_ENGINE)
request.session = engine.SessionStore()
logout(request)
self.cookies = SimpleCookie()
def _parse_json(self, response, **extra):
if not hasattr(response, "_json"):
if not JSON_CONTENT_TYPE_RE.match(response.get("Content-Type")):
raise ValueError(
'Content-Type header is "%s", not "application/json"'
% response.get("Content-Type")
)
response._json = json.loads(
response.content.decode(response.charset), **extra
)
return response._json
class Client(ClientMixin, RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(
self,
enforce_csrf_checks=False,
raise_request_exception=True,
*,
headers=None,
**defaults,
):
super().__init__(headers=headers, **defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.raise_request_exception = raise_request_exception
self.exc_info = None
self.extra = None
def request(self, **request):
"""
Make a generic request. Compose the environment dictionary and pass
to the handler, return the result of the handler. Assume defaults for
the query environment, which can be overridden using the arguments to
the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = partial(store_rendered_templates, data)
signal_uid = "template-render-%s" % id(request)
signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid)
# Capture exceptions created by the handler.
exception_uid = "request-exception-%s" % id(request)
got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid)
try:
response = self.handler(environ)
finally:
signals.template_rendered.disconnect(dispatch_uid=signal_uid)
got_request_exception.disconnect(dispatch_uid=exception_uid)
# Check for signaled exceptions.
self.check_exception(response)
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
response.json = partial(self._parse_json, response)
# Attach the ResolverMatch instance to the response.
urlconf = getattr(response.wsgi_request, "urlconf", None)
response.resolver_match = SimpleLazyObject(
lambda: resolve(request["PATH_INFO"], urlconf=urlconf),
)
# Flatten a single context. Not really necessary anymore thanks to the
# __getattr__ flattening in ContextList, but has some edge case
# backwards compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
def get(
self,
path,
data=None,
follow=False,
secure=False,
*,
headers=None,
**extra,
):
"""Request a response from the server using GET."""
self.extra = extra
response = super().get(path, data=data, secure=secure, headers=headers, **extra)
if follow:
response = self._handle_redirects(
response, data=data, headers=headers, **extra
)
return response
def post(
self,
path,
data=None,
content_type=MULTIPART_CONTENT,
follow=False,
secure=False,
*,
headers=None,
**extra,
):
"""Request a response from the server using POST."""
self.extra = extra
response = super().post(
path,
data=data,
content_type=content_type,
secure=secure,
headers=headers,
**extra,
)
if follow:
response = self._handle_redirects(
response, data=data, content_type=content_type, headers=headers, **extra
)
return response
def head(
self,
path,
data=None,
follow=False,
secure=False,
*,
headers=None,
**extra,
):
"""Request a response from the server using HEAD."""
self.extra = extra
response = super().head(
path, data=data, secure=secure, headers=headers, **extra
)
if follow:
response = self._handle_redirects(
response, data=data, headers=headers, **extra
)
return response
def options(
self,
path,
data="",
content_type="application/octet-stream",
follow=False,
secure=False,
*,
headers=None,
**extra,
):
"""Request a response from the server using OPTIONS."""
self.extra = extra
response = super().options(
path,
data=data,
content_type=content_type,
secure=secure,
headers=headers,
**extra,
)
if follow:
response = self._handle_redirects(
response, data=data, content_type=content_type, headers=headers, **extra
)
return response
def put(
self,
path,
data="",
content_type="application/octet-stream",
follow=False,
secure=False,
*,
headers=None,
**extra,
):
"""Send a resource to the server using PUT."""
self.extra = extra
response = super().put(
path,
data=data,
content_type=content_type,
secure=secure,
headers=headers,
**extra,
)
if follow:
response = self._handle_redirects(
response, data=data, content_type=content_type, headers=headers, **extra
)
return response
def patch(
self,
path,
data="",
content_type="application/octet-stream",
follow=False,
secure=False,
*,
headers=None,
**extra,
):
"""Send a resource to the server using PATCH."""
self.extra = extra
response = super().patch(
path,
data=data,
content_type=content_type,
secure=secure,
headers=headers,
**extra,
)
if follow:
response = self._handle_redirects(
response, data=data, content_type=content_type, headers=headers, **extra
)
return response
def delete(
self,
path,
data="",
content_type="application/octet-stream",
follow=False,
secure=False,
*,
headers=None,
**extra,
):
"""Send a DELETE request to the server."""
self.extra = extra
response = super().delete(
path,
data=data,
content_type=content_type,
secure=secure,
headers=headers,
**extra,
)
if follow:
response = self._handle_redirects(
response, data=data, content_type=content_type, headers=headers, **extra
)
return response
def trace(
self,
path,
data="",
follow=False,
secure=False,
*,
headers=None,
**extra,
):
"""Send a TRACE request to the server."""
self.extra = extra
response = super().trace(
path, data=data, secure=secure, headers=headers, **extra
)
if follow:
response = self._handle_redirects(
response, data=data, headers=headers, **extra
)
return response
def _handle_redirects(
self,
response,
data="",
content_type="",
headers=None,
**extra,
):
"""
Follow any redirects by requesting responses from the server using GET.
"""
response.redirect_chain = []
redirect_status_codes = (
HTTPStatus.MOVED_PERMANENTLY,
HTTPStatus.FOUND,
HTTPStatus.SEE_OTHER,
HTTPStatus.TEMPORARY_REDIRECT,
HTTPStatus.PERMANENT_REDIRECT,
)
while response.status_code in redirect_status_codes:
response_url = response.url
redirect_chain = response.redirect_chain
redirect_chain.append((response_url, response.status_code))
url = urlsplit(response_url)
if url.scheme:
extra["wsgi.url_scheme"] = url.scheme
if url.hostname:
extra["SERVER_NAME"] = url.hostname
if url.port:
extra["SERVER_PORT"] = str(url.port)
path = url.path
# RFC 3986 Section 6.2.3: Empty path should be normalized to "/".
if not path and url.netloc:
path = "/"
# Prepend the request path to handle relative path redirects
if not path.startswith("/"):
path = urljoin(response.request["PATH_INFO"], path)
if response.status_code in (
HTTPStatus.TEMPORARY_REDIRECT,
HTTPStatus.PERMANENT_REDIRECT,
):
# Preserve request method and query string (if needed)
# post-redirect for 307/308 responses.
request_method = response.request["REQUEST_METHOD"].lower()
if request_method not in ("get", "head"):
extra["QUERY_STRING"] = url.query
request_method = getattr(self, request_method)
else:
request_method = self.get
data = QueryDict(url.query)
content_type = None
response = request_method(
path,
data=data,
content_type=content_type,
follow=False,
headers=headers,
**extra,
)
response.redirect_chain = redirect_chain
if redirect_chain[-1] in redirect_chain[:-1]:
# Check that we're not redirecting to somewhere we've already
# been to, to prevent loops.
raise RedirectCycleError(
"Redirect loop detected.", last_response=response
)
if len(redirect_chain) > 20:
# Such a lengthy chain likely also means a loop, but one with
# a growing path, changing view, or changing query argument;
# 20 is the value of "network.http.redirection-limit" from Firefox.
raise RedirectCycleError("Too many redirects.", last_response=response)
return response
class AsyncClient(ClientMixin, AsyncRequestFactory):
"""
An async version of Client that creates ASGIRequests and calls through an
async request path.
Does not currently support "follow" on its methods.
"""
def __init__(
self,
enforce_csrf_checks=False,
raise_request_exception=True,
*,
headers=None,
**defaults,
):
super().__init__(headers=headers, **defaults)
self.handler = AsyncClientHandler(enforce_csrf_checks)
self.raise_request_exception = raise_request_exception
self.exc_info = None
self.extra = None
async def request(self, **request):
"""
Make a generic request. Compose the scope dictionary and pass to the
handler, return the result of the handler. Assume defaults for the
query environment, which can be overridden using the arguments to the
request.
"""
if "follow" in request:
raise NotImplementedError(
"AsyncClient request methods do not accept the follow parameter."
)
scope = self._base_scope(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = partial(store_rendered_templates, data)
signal_uid = "template-render-%s" % id(request)
signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid)
# Capture exceptions created by the handler.
exception_uid = "request-exception-%s" % id(request)
got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid)
try:
response = await self.handler(scope)
finally:
signals.template_rendered.disconnect(dispatch_uid=signal_uid)
got_request_exception.disconnect(dispatch_uid=exception_uid)
# Check for signaled exceptions.
self.check_exception(response)
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
response.json = partial(self._parse_json, response)
# Attach the ResolverMatch instance to the response.
urlconf = getattr(response.asgi_request, "urlconf", None)
response.resolver_match = SimpleLazyObject(
lambda: resolve(request["path"], urlconf=urlconf),
)
# Flatten a single context. Not really necessary anymore thanks to the
# __getattr__ flattening in ContextList, but has some edge case
# backwards compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
|
0f93ea8e4d0664a5ec45a03534aa2e35e304c961a80ff4dda78ed4e4b30a7ca2 | from urllib.parse import quote
from django.http import (
HttpResponseBadRequest,
HttpResponseForbidden,
HttpResponseNotFound,
HttpResponseServerError,
)
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.views.decorators.csrf import requires_csrf_token
ERROR_404_TEMPLATE_NAME = "404.html"
ERROR_403_TEMPLATE_NAME = "403.html"
ERROR_400_TEMPLATE_NAME = "400.html"
ERROR_500_TEMPLATE_NAME = "500.html"
ERROR_PAGE_TEMPLATE = """
<!doctype html>
<html lang="en">
<head>
<title>%(title)s</title>
</head>
<body>
<h1>%(title)s</h1><p>%(details)s</p>
</body>
</html>
"""
# These views can be called when CsrfViewMiddleware.process_view() not run,
# therefore need @requires_csrf_token in case the template needs
# {% csrf_token %}.
@requires_csrf_token
def page_not_found(request, exception, template_name=ERROR_404_TEMPLATE_NAME):
"""
Default 404 handler.
Templates: :template:`404.html`
Context:
request_path
The path of the requested URL (e.g., '/app/pages/bad_page/'). It's
quoted to prevent a content injection attack.
exception
The message from the exception which triggered the 404 (if one was
supplied), or the exception class name
"""
exception_repr = exception.__class__.__name__
# Try to get an "interesting" exception message, if any (and not the ugly
# Resolver404 dictionary)
try:
message = exception.args[0]
except (AttributeError, IndexError):
pass
else:
if isinstance(message, str):
exception_repr = message
context = {
"request_path": quote(request.path),
"exception": exception_repr,
}
try:
template = loader.get_template(template_name)
body = template.render(context, request)
except TemplateDoesNotExist:
if template_name != ERROR_404_TEMPLATE_NAME:
# Reraise if it's a missing custom template.
raise
# Render template (even though there are no substitutions) to allow
# inspecting the context in tests.
template = Engine().from_string(
ERROR_PAGE_TEMPLATE
% {
"title": "Not Found",
"details": "The requested resource was not found on this server.",
},
)
body = template.render(Context(context))
return HttpResponseNotFound(body)
@requires_csrf_token
def server_error(request, template_name=ERROR_500_TEMPLATE_NAME):
"""
500 error handler.
Templates: :template:`500.html`
Context: None
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
if template_name != ERROR_500_TEMPLATE_NAME:
# Reraise if it's a missing custom template.
raise
return HttpResponseServerError(
ERROR_PAGE_TEMPLATE % {"title": "Server Error (500)", "details": ""},
)
return HttpResponseServerError(template.render())
@requires_csrf_token
def bad_request(request, exception, template_name=ERROR_400_TEMPLATE_NAME):
"""
400 error handler.
Templates: :template:`400.html`
Context: None
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
if template_name != ERROR_400_TEMPLATE_NAME:
# Reraise if it's a missing custom template.
raise
return HttpResponseBadRequest(
ERROR_PAGE_TEMPLATE % {"title": "Bad Request (400)", "details": ""},
)
# No exception content is passed to the template, to not disclose any
# sensitive information.
return HttpResponseBadRequest(template.render())
@requires_csrf_token
def permission_denied(request, exception, template_name=ERROR_403_TEMPLATE_NAME):
"""
Permission denied (403) handler.
Templates: :template:`403.html`
Context:
exception
The message from the exception which triggered the 403 (if one was
supplied).
If the template does not exist, an Http403 response containing the text
"403 Forbidden" (as per RFC 9110 Section 15.5.4) will be returned.
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
if template_name != ERROR_403_TEMPLATE_NAME:
# Reraise if it's a missing custom template.
raise
return HttpResponseForbidden(
ERROR_PAGE_TEMPLATE % {"title": "403 Forbidden", "details": ""},
)
return HttpResponseForbidden(
template.render(request=request, context={"exception": str(exception)})
)
|
0c797ae14297761c9558c9c174bb32c6133ac5fe56568ee8f335f921515270ea | import codecs
import datetime
import locale
from decimal import Decimal
from urllib.parse import quote
from django.utils.functional import Promise
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
super().__init__(*args)
def __str__(self):
return "%s. You passed in %r (%s)" % (
super().__str__(),
self.obj,
type(self.obj),
)
def smart_str(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Return a string representing 's'. Treat bytestrings using the 'encoding'
codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_str(s, encoding, strings_only, errors)
_PROTECTED_TYPES = (
type(None),
int,
float,
Decimal,
datetime.datetime,
datetime.date,
datetime.time,
)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_str(strings_only=True).
"""
return isinstance(obj, _PROTECTED_TYPES)
def force_str(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Similar to smart_str(), except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if issubclass(type(s), str):
return s
if strings_only and is_protected_type(s):
return s
try:
if isinstance(s, bytes):
s = str(s, encoding, errors)
else:
s = str(s)
except UnicodeDecodeError as e:
raise DjangoUnicodeDecodeError(s, *e.args)
return s
def smart_bytes(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Return a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_bytes(s, encoding, strings_only, errors)
def force_bytes(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if isinstance(s, bytes):
if encoding == "utf-8":
return s
else:
return s.decode("utf-8", errors).encode(encoding, errors)
if strings_only and is_protected_type(s):
return s
if isinstance(s, memoryview):
return bytes(s)
return str(s).encode(encoding, errors)
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from RFC 3987 Section 3.1, slightly simplified since
the input is assumed to be a string rather than an arbitrary byte stream.
Take an IRI (string or UTF-8 bytes, e.g. '/I ♥ Django/' or
b'/I \xe2\x99\xa5 Django/') and return a string containing the encoded
result with ASCII chars only (e.g. '/I%20%E2%99%A5%20Django/').
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in RFC 3986 Sections 2.2 and 2.3:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.parse.quote() already considers all
# but the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of RFC 3987 Section 3.1 specifically mentions that % must not be
# converted.
if iri is None:
return iri
elif isinstance(iri, Promise):
iri = str(iri)
return quote(iri, safe="/#%[]=:;$&()+,!?*@'~")
# List of byte values that uri_to_iri() decodes from percent encoding.
# First, the unreserved characters from RFC 3986:
_ascii_ranges = [[45, 46, 95, 126], range(65, 91), range(97, 123)]
_hextobyte = {
(fmt % char).encode(): bytes((char,))
for ascii_range in _ascii_ranges
for char in ascii_range
for fmt in ["%02x", "%02X"]
}
# And then everything above 128, because bytes ≥ 128 are part of multibyte
# Unicode characters.
_hexdig = "0123456789ABCDEFabcdef"
_hextobyte.update(
{(a + b).encode(): bytes.fromhex(a + b) for a in _hexdig[8:] for b in _hexdig}
)
def uri_to_iri(uri):
"""
Convert a Uniform Resource Identifier(URI) into an Internationalized
Resource Identifier(IRI).
This is the algorithm from RFC 3987 Section 3.2, excluding step 4.
Take an URI in ASCII bytes (e.g. '/I%20%E2%99%A5%20Django/') and return
a string containing the encoded result (e.g. '/I%20♥%20Django/').
"""
if uri is None:
return uri
uri = force_bytes(uri)
# Fast selective unquote: First, split on '%' and then starting with the
# second block, decode the first 2 bytes if they represent a hex code to
# decode. The rest of the block is the part after '%AB', not containing
# any '%'. Add that to the output without further processing.
bits = uri.split(b"%")
if len(bits) == 1:
iri = uri
else:
parts = [bits[0]]
append = parts.append
hextobyte = _hextobyte
for item in bits[1:]:
hex = item[:2]
if hex in hextobyte:
append(hextobyte[item[:2]])
append(item[2:])
else:
append(b"%")
append(item)
iri = b"".join(parts)
return repercent_broken_unicode(iri).decode()
def escape_uri_path(path):
"""
Escape the unsafe characters from the path portion of a Uniform Resource
Identifier (URI).
"""
# These are the "reserved" and "unreserved" characters specified in RFC
# 3986 Sections 2.2 and 2.3:
# reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" | "$" | ","
# unreserved = alphanum | mark
# mark = "-" | "_" | "." | "!" | "~" | "*" | "'" | "(" | ")"
# The list of safe characters here is constructed subtracting ";", "=",
# and "?" according to RFC 3986 Section 3.3.
# The reason for not subtracting and escaping "/" is that we are escaping
# the entire path, not a path segment.
return quote(path, safe="/:@&+$,-_.!~*'()")
def punycode(domain):
"""Return the Punycode of the given domain if it's non-ASCII."""
return domain.encode("idna").decode("ascii")
def repercent_broken_unicode(path):
"""
As per RFC 3987 Section 3.2, step three of converting a URI into an IRI,
repercent-encode any octet produced that is not part of a strictly legal
UTF-8 octet sequence.
"""
while True:
try:
path.decode()
except UnicodeDecodeError as e:
# CVE-2019-14235: A recursion shouldn't be used since the exception
# handling uses massive amounts of memory
repercent = quote(path[e.start : e.end], safe=b"/#%[]=:;$&()+,!?*@'~")
path = path[: e.start] + repercent.encode() + path[e.end :]
else:
return path
def filepath_to_uri(path):
"""Convert a file system path to a URI portion that is suitable for
inclusion in a URL.
Encode certain chars that would normally be recognized as special chars
for URIs. Do not encode the ' character, as it is a valid character
within URIs. See the encodeURIComponent() JavaScript function for details.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return quote(str(path).replace("\\", "/"), safe="/~!*()'")
def get_system_encoding():
"""
The encoding for the character type functions. Fallback to 'ascii' if the
#encoding is unsupported by Python or could not be determined. See tickets
#10335 and #5846.
"""
try:
encoding = locale.getlocale()[1] or "ascii"
codecs.lookup(encoding)
except Exception:
encoding = "ascii"
return encoding
DEFAULT_LOCALE_ENCODING = get_system_encoding()
|
74cb675be70f6b3fae3d2921d86046502590d3be8ead0b8aaf690e9e2ebfe5b9 | """HTML utilities suitable for global use."""
import html
import json
import re
from html.parser import HTMLParser
from urllib.parse import parse_qsl, quote, unquote, urlencode, urlsplit, urlunsplit
from django.utils.encoding import punycode
from django.utils.functional import Promise, keep_lazy, keep_lazy_text
from django.utils.http import RFC3986_GENDELIMS, RFC3986_SUBDELIMS
from django.utils.regex_helper import _lazy_re_compile
from django.utils.safestring import SafeData, SafeString, mark_safe
from django.utils.text import normalize_newlines
@keep_lazy(SafeString)
def escape(text):
"""
Return the given text with ampersands, quotes and angle brackets encoded
for use in HTML.
Always escape input, even if it's already escaped and marked as such.
This may result in double-escaping. If this is a concern, use
conditional_escape() instead.
"""
return SafeString(html.escape(str(text)))
_js_escapes = {
ord("\\"): "\\u005C",
ord("'"): "\\u0027",
ord('"'): "\\u0022",
ord(">"): "\\u003E",
ord("<"): "\\u003C",
ord("&"): "\\u0026",
ord("="): "\\u003D",
ord("-"): "\\u002D",
ord(";"): "\\u003B",
ord("`"): "\\u0060",
ord("\u2028"): "\\u2028",
ord("\u2029"): "\\u2029",
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update((ord("%c" % z), "\\u%04X" % z) for z in range(32))
@keep_lazy(SafeString)
def escapejs(value):
"""Hex encode characters for use in JavaScript strings."""
return mark_safe(str(value).translate(_js_escapes))
_json_script_escapes = {
ord(">"): "\\u003E",
ord("<"): "\\u003C",
ord("&"): "\\u0026",
}
def json_script(value, element_id=None, encoder=None):
"""
Escape all the HTML/XML special characters with their unicode escapes, so
value is safe to be output anywhere except for inside a tag attribute. Wrap
the escaped JSON in a script tag.
"""
from django.core.serializers.json import DjangoJSONEncoder
json_str = json.dumps(value, cls=encoder or DjangoJSONEncoder).translate(
_json_script_escapes
)
if element_id:
template = '<script id="{}" type="application/json">{}</script>'
args = (element_id, mark_safe(json_str))
else:
template = '<script type="application/json">{}</script>'
args = (mark_safe(json_str),)
return format_html(template, *args)
def conditional_escape(text):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
This function relies on the __html__ convention used both by Django's
SafeData class and by third-party libraries like markupsafe.
"""
if isinstance(text, Promise):
text = str(text)
if hasattr(text, "__html__"):
return text.__html__()
else:
return escape(text)
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but pass all arguments through conditional_escape(),
and call mark_safe() on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = {k: conditional_escape(v) for (k, v) in kwargs.items()}
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def format_html_join(sep, format_string, args_generator):
"""
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{} {}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(
conditional_escape(sep).join(
format_html(format_string, *args) for args in args_generator
)
)
@keep_lazy_text
def linebreaks(value, autoescape=False):
"""Convert newlines into <p> and <br>s."""
value = normalize_newlines(value)
paras = re.split("\n{2,}", str(value))
if autoescape:
paras = ["<p>%s</p>" % escape(p).replace("\n", "<br>") for p in paras]
else:
paras = ["<p>%s</p>" % p.replace("\n", "<br>") for p in paras]
return "\n\n".join(paras)
class MLStripper(HTMLParser):
def __init__(self):
super().__init__(convert_charrefs=False)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
self.fed.append("&%s;" % name)
def handle_charref(self, name):
self.fed.append("&#%s;" % name)
def get_data(self):
return "".join(self.fed)
def _strip_once(value):
"""
Internal tag stripping utility used by strip_tags.
"""
s = MLStripper()
s.feed(value)
s.close()
return s.get_data()
@keep_lazy_text
def strip_tags(value):
"""Return the given HTML with all tags stripped."""
# Note: in typical case this loop executes _strip_once once. Loop condition
# is redundant, but helps to reduce number of executions of _strip_once.
value = str(value)
while "<" in value and ">" in value:
new_value = _strip_once(value)
if value.count("<") == new_value.count("<"):
# _strip_once wasn't able to detect more tags.
break
value = new_value
return value
@keep_lazy_text
def strip_spaces_between_tags(value):
"""Return the given HTML with spaces between tags removed."""
return re.sub(r">\s+<", "><", str(value))
def smart_urlquote(url):
"""Quote a URL if it isn't already quoted."""
def unquote_quote(segment):
segment = unquote(segment)
# Tilde is part of RFC 3986 Section 2.3 Unreserved Characters,
# see also https://bugs.python.org/issue16285
return quote(segment, safe=RFC3986_SUBDELIMS + RFC3986_GENDELIMS + "~")
# Handle IDN before quoting.
try:
scheme, netloc, path, query, fragment = urlsplit(url)
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
return unquote_quote(url)
try:
netloc = punycode(netloc) # IDN -> ACE
except UnicodeError: # invalid domain part
return unquote_quote(url)
if query:
# Separately unquoting key/value, so as to not mix querystring separators
# included in query values. See #22267.
query_parts = [
(unquote(q[0]), unquote(q[1]))
for q in parse_qsl(query, keep_blank_values=True)
]
# urlencode will take care of quoting
query = urlencode(query_parts)
path = unquote_quote(path)
fragment = unquote_quote(fragment)
return urlunsplit((scheme, netloc, path, query, fragment))
class Urlizer:
"""
Convert any URLs in text into clickable links.
Work on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
"""
trailing_punctuation_chars = ".,:;!"
wrapping_punctuation = [("(", ")"), ("[", "]")]
simple_url_re = _lazy_re_compile(r"^https?://\[?\w", re.IGNORECASE)
simple_url_2_re = _lazy_re_compile(
r"^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)($|/.*)$", re.IGNORECASE
)
word_split_re = _lazy_re_compile(r"""([\s<>"']+)""")
mailto_template = "mailto:{local}@{domain}"
url_template = '<a href="{href}"{attrs}>{url}</a>'
def __call__(self, text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
If trim_url_limit is not None, truncate the URLs in the link text
longer than this limit to trim_url_limit - 1 characters and append an
ellipsis.
If nofollow is True, give the links a rel="nofollow" attribute.
If autoescape is True, autoescape the link text and URLs.
"""
safe_input = isinstance(text, SafeData)
words = self.word_split_re.split(str(text))
return "".join(
[
self.handle_word(
word,
safe_input=safe_input,
trim_url_limit=trim_url_limit,
nofollow=nofollow,
autoescape=autoescape,
)
for word in words
]
)
def handle_word(
self,
word,
*,
safe_input,
trim_url_limit=None,
nofollow=False,
autoescape=False,
):
if "." in word or "@" in word or ":" in word:
# lead: Punctuation trimmed from the beginning of the word.
# middle: State of the word.
# trail: Punctuation trimmed from the end of the word.
lead, middle, trail = self.trim_punctuation(word)
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ""
if self.simple_url_re.match(middle):
url = smart_urlquote(html.unescape(middle))
elif self.simple_url_2_re.match(middle):
url = smart_urlquote("http://%s" % html.unescape(middle))
elif ":" not in middle and self.is_email_simple(middle):
local, domain = middle.rsplit("@", 1)
try:
domain = punycode(domain)
except UnicodeError:
return word
url = self.mailto_template.format(local=local, domain=domain)
nofollow_attr = ""
# Make link.
if url:
trimmed = self.trim_url(middle, limit=trim_url_limit)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
trimmed = escape(trimmed)
middle = self.url_template.format(
href=escape(url),
attrs=nofollow_attr,
url=trimmed,
)
return mark_safe(f"{lead}{middle}{trail}")
else:
if safe_input:
return mark_safe(word)
elif autoescape:
return escape(word)
elif safe_input:
return mark_safe(word)
elif autoescape:
return escape(word)
return word
def trim_url(self, x, *, limit):
if limit is None or len(x) <= limit:
return x
return "%s…" % x[: max(0, limit - 1)]
def trim_punctuation(self, word):
"""
Trim trailing and wrapping punctuation from `word`. Return the items of
the new state.
"""
lead, middle, trail = "", word, ""
# Continue trimming until middle remains unchanged.
trimmed_something = True
while trimmed_something:
trimmed_something = False
# Trim wrapping punctuation.
for opening, closing in self.wrapping_punctuation:
if middle.startswith(opening):
middle = middle[len(opening) :]
lead += opening
trimmed_something = True
# Keep parentheses at the end only if they're balanced.
if (
middle.endswith(closing)
and middle.count(closing) == middle.count(opening) + 1
):
middle = middle[: -len(closing)]
trail = closing + trail
trimmed_something = True
# Trim trailing punctuation (after trimming wrapping punctuation,
# as encoded entities contain ';'). Unescape entities to avoid
# breaking them by removing ';'.
middle_unescaped = html.unescape(middle)
stripped = middle_unescaped.rstrip(self.trailing_punctuation_chars)
if middle_unescaped != stripped:
punctuation_count = len(middle_unescaped) - len(stripped)
trail = middle[-punctuation_count:] + trail
middle = middle[:-punctuation_count]
trimmed_something = True
return lead, middle, trail
@staticmethod
def is_email_simple(value):
"""Return True if value looks like an email address."""
# An @ must be in the middle of the value.
if "@" not in value or value.startswith("@") or value.endswith("@"):
return False
try:
p1, p2 = value.split("@")
except ValueError:
# value contains more than one @.
return False
# Dot must be in p2 (e.g. example.com)
if "." not in p2 or p2.startswith("."):
return False
return True
urlizer = Urlizer()
@keep_lazy_text
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
return urlizer(
text, trim_url_limit=trim_url_limit, nofollow=nofollow, autoescape=autoescape
)
def avoid_wrapping(value):
"""
Avoid text wrapping in the middle of a phrase by adding non-breaking
spaces where there previously were normal spaces.
"""
return value.replace(" ", "\xa0")
def html_safe(klass):
"""
A decorator that defines the __html__ method. This helps non-Django
templates to detect classes whose __str__ methods return SafeString.
"""
if "__html__" in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it defines "
"__html__()." % klass.__name__
)
if "__str__" not in klass.__dict__:
raise ValueError(
"can't apply @html_safe to %s because it doesn't "
"define __str__()." % klass.__name__
)
klass_str = klass.__str__
klass.__str__ = lambda self: mark_safe(klass_str(self))
klass.__html__ = lambda self: str(self)
return klass
|
6bd2de2427688b663f6a092adf44bf583b2ec13c6ed8e7821473f3ac04b93a8f | import base64
import datetime
import re
import unicodedata
from binascii import Error as BinasciiError
from email.utils import formatdate
from urllib.parse import (
ParseResult,
SplitResult,
_coerce_args,
_splitnetloc,
_splitparams,
scheme_chars,
unquote,
)
from urllib.parse import urlencode as original_urlencode
from urllib.parse import uses_params
from django.utils.datastructures import MultiValueDict
from django.utils.regex_helper import _lazy_re_compile
# Based on RFC 9110 Appendix A.
ETAG_MATCH = _lazy_re_compile(
r"""
\A( # start of string and capture group
(?:W/)? # optional weak indicator
" # opening quote
[^"]* # any sequence of non-quote characters
" # end quote
)\Z # end of string and capture group
""",
re.X,
)
MONTHS = "jan feb mar apr may jun jul aug sep oct nov dec".split()
__D = r"(?P<day>[0-9]{2})"
__D2 = r"(?P<day>[ 0-9][0-9])"
__M = r"(?P<mon>\w{3})"
__Y = r"(?P<year>[0-9]{4})"
__Y2 = r"(?P<year>[0-9]{2})"
__T = r"(?P<hour>[0-9]{2}):(?P<min>[0-9]{2}):(?P<sec>[0-9]{2})"
RFC1123_DATE = _lazy_re_compile(r"^\w{3}, %s %s %s %s GMT$" % (__D, __M, __Y, __T))
RFC850_DATE = _lazy_re_compile(r"^\w{6,9}, %s-%s-%s %s GMT$" % (__D, __M, __Y2, __T))
ASCTIME_DATE = _lazy_re_compile(r"^\w{3} %s %s %s %s$" % (__M, __D2, __T, __Y))
RFC3986_GENDELIMS = ":/?#[]@"
RFC3986_SUBDELIMS = "!$&'()*+,;="
# TODO: Remove when dropping support for PY38.
# Unsafe bytes to be removed per WHATWG spec.
_UNSAFE_URL_BYTES_TO_REMOVE = ["\t", "\r", "\n"]
def urlencode(query, doseq=False):
"""
A version of Python's urllib.parse.urlencode() function that can operate on
MultiValueDict and non-string values.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, "items"):
query = query.items()
query_params = []
for key, value in query:
if value is None:
raise TypeError(
"Cannot encode None for key '%s' in a query string. Did you "
"mean to pass an empty string or omit the value?" % key
)
elif not doseq or isinstance(value, (str, bytes)):
query_val = value
else:
try:
itr = iter(value)
except TypeError:
query_val = value
else:
# Consume generators and iterators, when doseq=True, to
# work around https://bugs.python.org/issue31706.
query_val = []
for item in itr:
if item is None:
raise TypeError(
"Cannot encode None for key '%s' in a query "
"string. Did you mean to pass an empty string or "
"omit the value?" % key
)
elif not isinstance(item, bytes):
item = str(item)
query_val.append(item)
query_params.append((key, query_val))
return original_urlencode(query_params, doseq)
def http_date(epoch_seconds=None):
"""
Format the time to match the RFC 5322 date format as specified by RFC 9110
Section 5.6.7.
`epoch_seconds` is a floating point number expressed in seconds since the
epoch, in UTC - such as that outputted by time.time(). If set to None, it
defaults to the current time.
Output a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parse a date format as specified by HTTP RFC 9110 Section 5.6.7.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Return an integer expressed in seconds since the epoch, in UTC.
"""
# email.utils.parsedate() does the job for RFC 1123 dates; unfortunately
# RFC 9110 makes it mandatory to support RFC 850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
tz = datetime.timezone.utc
year = int(m["year"])
if year < 100:
current_year = datetime.datetime.now(tz=tz).year
current_century = current_year - (current_year % 100)
if year - (current_year % 100) > 50:
# year that appears to be more than 50 years in the future are
# interpreted as representing the past.
year += current_century - 100
else:
year += current_century
month = MONTHS.index(m["mon"].lower()) + 1
day = int(m["day"])
hour = int(m["hour"])
min = int(m["min"])
sec = int(m["sec"])
result = datetime.datetime(year, month, day, hour, min, sec, tzinfo=tz)
return int(result.timestamp())
except Exception as exc:
raise ValueError("%r is not a valid date" % date) from exc
def parse_http_date_safe(date):
"""
Same as parse_http_date, but return None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Convert a base 36 string to an int. Raise ValueError if the input won't fit
into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is longer than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
return int(s, 36)
def int_to_base36(i):
"""Convert an integer to a base36 string."""
char_set = "0123456789abcdefghijklmnopqrstuvwxyz"
if i < 0:
raise ValueError("Negative base36 conversion input.")
if i < 36:
return char_set[i]
b36 = ""
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
def urlsafe_base64_encode(s):
"""
Encode a bytestring to a base64 string for use in URLs. Strip any trailing
equal signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b"\n=").decode("ascii")
def urlsafe_base64_decode(s):
"""
Decode a base64 encoded string. Add back any trailing equal signs that
might have been stripped.
"""
s = s.encode()
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b"="))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parse a string of ETags given in an If-None-Match or If-Match header as
defined by RFC 9110. Return a list of quoted ETags, or ['*'] if all ETags
should be matched.
"""
if etag_str.strip() == "*":
return ["*"]
else:
# Parse each ETag individually, and return any that are valid.
etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(","))
return [match[1] for match in etag_matches if match]
def quote_etag(etag_str):
"""
If the provided string is already a quoted ETag, return it. Otherwise, wrap
the string in quotes, making it a strong ETag.
"""
if ETAG_MATCH.match(etag_str):
return etag_str
else:
return '"%s"' % etag_str
def is_same_domain(host, pattern):
"""
Return ``True`` if the host is either an exact match or a match
to the wildcard pattern.
Any pattern beginning with a period matches a domain and all of its
subdomains. (e.g. ``.example.com`` matches ``example.com`` and
``foo.example.com``). Anything else is an exact string match.
"""
if not pattern:
return False
pattern = pattern.lower()
return (
pattern[0] == "."
and (host.endswith(pattern) or host == pattern[1:])
or pattern == host
)
def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):
"""
Return ``True`` if the url uses an allowed host and a safe scheme.
Always return ``False`` on an empty url.
If ``require_https`` is ``True``, only 'https' will be considered a valid
scheme, as opposed to 'http' and 'https' with the default, ``False``.
Note: "True" doesn't entail that a URL is "safe". It may still be e.g.
quoted incorrectly. Ensure to also use django.utils.encoding.iri_to_uri()
on the path component of untrusted URLs.
"""
if url is not None:
url = url.strip()
if not url:
return False
if allowed_hosts is None:
allowed_hosts = set()
elif isinstance(allowed_hosts, str):
allowed_hosts = {allowed_hosts}
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return _url_has_allowed_host_and_scheme(
url, allowed_hosts, require_https=require_https
) and _url_has_allowed_host_and_scheme(
url.replace("\\", "/"), allowed_hosts, require_https=require_https
)
# TODO: Remove when dropping support for PY38.
# Copied from urllib.parse.urlparse() but uses fixed urlsplit() function.
def _urlparse(url, scheme="", allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = _urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
if scheme in uses_params and ";" in url:
url, params = _splitparams(url)
else:
params = ""
result = ParseResult(scheme, netloc, url, params, query, fragment)
return _coerce_result(result)
# TODO: Remove when dropping support for PY38.
def _remove_unsafe_bytes_from_url(url):
for b in _UNSAFE_URL_BYTES_TO_REMOVE:
url = url.replace(b, "")
return url
# TODO: Remove when dropping support for PY38.
# Backport of urllib.parse.urlsplit() from Python 3.9.
def _urlsplit(url, scheme="", allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
url = _remove_unsafe_bytes_from_url(url)
scheme = _remove_unsafe_bytes_from_url(scheme)
netloc = query = fragment = ""
i = url.find(":")
if i > 0:
for c in url[:i]:
if c not in scheme_chars:
break
else:
scheme, url = url[:i].lower(), url[i + 1 :]
if url[:2] == "//":
netloc, url = _splitnetloc(url, 2)
if ("[" in netloc and "]" not in netloc) or (
"]" in netloc and "[" not in netloc
):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and "#" in url:
url, fragment = url.split("#", 1)
if "?" in url:
url, query = url.split("?", 1)
v = SplitResult(scheme, netloc, url, query, fragment)
return _coerce_result(v)
def _url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith("///"):
return False
try:
url_info = _urlparse(url)
except ValueError: # e.g. invalid IPv6 addresses
return False
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == "C":
return False
scheme = url_info.scheme
# Consider URLs without a scheme (e.g. //example.com/p) to be http.
if not url_info.scheme and url_info.netloc:
scheme = "http"
valid_schemes = ["https"] if require_https else ["http", "https"]
return (not url_info.netloc or url_info.netloc in allowed_hosts) and (
not scheme or scheme in valid_schemes
)
def escape_leading_slashes(url):
"""
If redirecting to an absolute path (two leading slashes), a slash must be
escaped to prevent browsers from handling the path as schemaless and
redirecting to another host.
"""
if url.startswith("//"):
url = "/%2F{}".format(url[2:])
return url
def _parseparam(s):
while s[:1] == ";":
s = s[1:]
end = s.find(";")
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(";", end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
def parse_header_parameters(line):
"""
Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
parts = _parseparam(";" + line)
key = parts.__next__().lower()
pdict = {}
for p in parts:
i = p.find("=")
if i >= 0:
has_encoding = False
name = p[:i].strip().lower()
if name.endswith("*"):
# Lang/encoding embedded in the value (like "filename*=UTF-8''file.ext")
# https://tools.ietf.org/html/rfc2231#section-4
name = name[:-1]
if p.count("'") == 2:
has_encoding = True
value = p[i + 1 :].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace("\\\\", "\\").replace('\\"', '"')
if has_encoding:
encoding, lang, value = value.split("'")
value = unquote(value, encoding=encoding)
pdict[name] = value
return key, pdict
|
720850aa4677e039ec512638c28586bb28441db40291909722972f3fd2016f36 | """
This module contains helper functions for controlling caching. It does so by
managing the "Vary" header of responses. It includes functions to patch the
header of response objects directly and decorators that change functions to do
that header-patching themselves.
For information on the Vary header, see RFC 9110 Section 12.5.5.
Essentially, the "Vary" HTTP header defines which headers a cache should take
into account when building its cache key. Requests with the same path but
different header content for headers named in "Vary" need to get different
cache keys to prevent delivery of wrong content.
An example: i18n middleware would need to distinguish caches by the
"Accept-language" header.
"""
import time
from collections import defaultdict
from django.conf import settings
from django.core.cache import caches
from django.http import HttpResponse, HttpResponseNotModified
from django.utils.crypto import md5
from django.utils.http import http_date, parse_etags, parse_http_date_safe, quote_etag
from django.utils.log import log_response
from django.utils.regex_helper import _lazy_re_compile
from django.utils.timezone import get_current_timezone_name
from django.utils.translation import get_language
cc_delim_re = _lazy_re_compile(r"\s*,\s*")
def patch_cache_control(response, **kwargs):
"""
Patch the Cache-Control header by adding all keyword arguments to it.
The transformation is as follows:
* All keyword parameter names are turned to lowercase, and underscores
are converted to hyphens.
* If the value of a parameter is True (exactly True, not just a
true value), only the parameter name is added to the header.
* All other parameters are added with their value, after applying
str() to it.
"""
def dictitem(s):
t = s.split("=", 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
def dictvalue(*t):
if t[1] is True:
return t[0]
else:
return "%s=%s" % (t[0], t[1])
cc = defaultdict(set)
if response.get("Cache-Control"):
for field in cc_delim_re.split(response.headers["Cache-Control"]):
directive, value = dictitem(field)
if directive == "no-cache":
# no-cache supports multiple field names.
cc[directive].add(value)
else:
cc[directive] = value
# If there's already a max-age header but we're being asked to set a new
# max-age, use the minimum of the two ages. In practice this happens when
# a decorator and a piece of middleware both operate on a given view.
if "max-age" in cc and "max_age" in kwargs:
kwargs["max_age"] = min(int(cc["max-age"]), kwargs["max_age"])
# Allow overriding private caching and vice versa
if "private" in cc and "public" in kwargs:
del cc["private"]
elif "public" in cc and "private" in kwargs:
del cc["public"]
for (k, v) in kwargs.items():
directive = k.replace("_", "-")
if directive == "no-cache":
# no-cache supports multiple field names.
cc[directive].add(v)
else:
cc[directive] = v
directives = []
for directive, values in cc.items():
if isinstance(values, set):
if True in values:
# True takes precedence.
values = {True}
directives.extend([dictvalue(directive, value) for value in values])
else:
directives.append(dictvalue(directive, values))
cc = ", ".join(directives)
response.headers["Cache-Control"] = cc
def get_max_age(response):
"""
Return the max-age from the response Cache-Control header as an integer,
or None if it wasn't found or wasn't an integer.
"""
if not response.has_header("Cache-Control"):
return
cc = dict(
_to_tuple(el) for el in cc_delim_re.split(response.headers["Cache-Control"])
)
try:
return int(cc["max-age"])
except (ValueError, TypeError, KeyError):
pass
def set_response_etag(response):
if not response.streaming and response.content:
response.headers["ETag"] = quote_etag(
md5(response.content, usedforsecurity=False).hexdigest(),
)
return response
def _precondition_failed(request):
response = HttpResponse(status=412)
log_response(
"Precondition Failed: %s",
request.path,
response=response,
request=request,
)
return response
def _not_modified(request, response=None):
new_response = HttpResponseNotModified()
if response:
# Preserve the headers required by RFC 9110 Section 15.4.5, as well as
# Last-Modified.
for header in (
"Cache-Control",
"Content-Location",
"Date",
"ETag",
"Expires",
"Last-Modified",
"Vary",
):
if header in response:
new_response.headers[header] = response.headers[header]
# Preserve cookies as per the cookie specification: "If a proxy server
# receives a response which contains a Set-cookie header, it should
# propagate the Set-cookie header to the client, regardless of whether
# the response was 304 (Not Modified) or 200 (OK).
# https://curl.haxx.se/rfc/cookie_spec.html
new_response.cookies = response.cookies
return new_response
def get_conditional_response(request, etag=None, last_modified=None, response=None):
# Only return conditional responses on successful requests.
if response and not (200 <= response.status_code < 300):
return response
# Get HTTP request headers.
if_match_etags = parse_etags(request.META.get("HTTP_IF_MATCH", ""))
if_unmodified_since = request.META.get("HTTP_IF_UNMODIFIED_SINCE")
if_unmodified_since = if_unmodified_since and parse_http_date_safe(
if_unmodified_since
)
if_none_match_etags = parse_etags(request.META.get("HTTP_IF_NONE_MATCH", ""))
if_modified_since = request.META.get("HTTP_IF_MODIFIED_SINCE")
if_modified_since = if_modified_since and parse_http_date_safe(if_modified_since)
# Evaluation of request preconditions below follows RFC 9110 Section
# 13.2.2.
# Step 1: Test the If-Match precondition.
if if_match_etags and not _if_match_passes(etag, if_match_etags):
return _precondition_failed(request)
# Step 2: Test the If-Unmodified-Since precondition.
if (
not if_match_etags
and if_unmodified_since
and not _if_unmodified_since_passes(last_modified, if_unmodified_since)
):
return _precondition_failed(request)
# Step 3: Test the If-None-Match precondition.
if if_none_match_etags and not _if_none_match_passes(etag, if_none_match_etags):
if request.method in ("GET", "HEAD"):
return _not_modified(request, response)
else:
return _precondition_failed(request)
# Step 4: Test the If-Modified-Since precondition.
if (
not if_none_match_etags
and if_modified_since
and not _if_modified_since_passes(last_modified, if_modified_since)
and request.method in ("GET", "HEAD")
):
return _not_modified(request, response)
# Step 5: Test the If-Range precondition (not supported).
# Step 6: Return original response since there isn't a conditional response.
return response
def _if_match_passes(target_etag, etags):
"""
Test the If-Match comparison as defined in RFC 9110 Section 13.1.1.
"""
if not target_etag:
# If there isn't an ETag, then there can't be a match.
return False
elif etags == ["*"]:
# The existence of an ETag means that there is "a current
# representation for the target resource", even if the ETag is weak,
# so there is a match to '*'.
return True
elif target_etag.startswith("W/"):
# A weak ETag can never strongly match another ETag.
return False
else:
# Since the ETag is strong, this will only return True if there's a
# strong match.
return target_etag in etags
def _if_unmodified_since_passes(last_modified, if_unmodified_since):
"""
Test the If-Unmodified-Since comparison as defined in RFC 9110 Section
13.1.4.
"""
return last_modified and last_modified <= if_unmodified_since
def _if_none_match_passes(target_etag, etags):
"""
Test the If-None-Match comparison as defined in RFC 9110 Section 13.1.2.
"""
if not target_etag:
# If there isn't an ETag, then there isn't a match.
return True
elif etags == ["*"]:
# The existence of an ETag means that there is "a current
# representation for the target resource", so there is a match to '*'.
return False
else:
# The comparison should be weak, so look for a match after stripping
# off any weak indicators.
target_etag = target_etag.strip("W/")
etags = (etag.strip("W/") for etag in etags)
return target_etag not in etags
def _if_modified_since_passes(last_modified, if_modified_since):
"""
Test the If-Modified-Since comparison as defined in RFC 9110 Section
13.1.3.
"""
return not last_modified or last_modified > if_modified_since
def patch_response_headers(response, cache_timeout=None):
"""
Add HTTP caching headers to the given HttpResponse: Expires and
Cache-Control.
Each header is only added if it isn't already set.
cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
by default.
"""
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
if cache_timeout < 0:
cache_timeout = 0 # Can't have max-age negative
if not response.has_header("Expires"):
response.headers["Expires"] = http_date(time.time() + cache_timeout)
patch_cache_control(response, max_age=cache_timeout)
def add_never_cache_headers(response):
"""
Add headers to a response to indicate that a page should never be cached.
"""
patch_response_headers(response, cache_timeout=-1)
patch_cache_control(
response, no_cache=True, no_store=True, must_revalidate=True, private=True
)
def patch_vary_headers(response, newheaders):
"""
Add (or update) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". If headers
contains an asterisk, then "Vary" header will consist of a single asterisk
'*'. Otherwise, existing headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if response.has_header("Vary"):
vary_headers = cc_delim_re.split(response.headers["Vary"])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = {header.lower() for header in vary_headers}
additional_headers = [
newheader
for newheader in newheaders
if newheader.lower() not in existing_headers
]
vary_headers += additional_headers
if "*" in vary_headers:
response.headers["Vary"] = "*"
else:
response.headers["Vary"] = ", ".join(vary_headers)
def has_vary_header(response, header_query):
"""
Check to see if the response has a given header name in its Vary header.
"""
if not response.has_header("Vary"):
return False
vary_headers = cc_delim_re.split(response.headers["Vary"])
existing_headers = {header.lower() for header in vary_headers}
return header_query.lower() in existing_headers
def _i18n_cache_key_suffix(request, cache_key):
"""If necessary, add the current locale or time zone to the cache key."""
if settings.USE_I18N:
# first check if LocaleMiddleware or another middleware added
# LANGUAGE_CODE to request, then fall back to the active language
# which in turn can also fall back to settings.LANGUAGE_CODE
cache_key += ".%s" % getattr(request, "LANGUAGE_CODE", get_language())
if settings.USE_TZ:
cache_key += ".%s" % get_current_timezone_name()
return cache_key
def _generate_cache_key(request, method, headerlist, key_prefix):
"""Return a cache key from the headers given in the header list."""
ctx = md5(usedforsecurity=False)
for header in headerlist:
value = request.META.get(header)
if value is not None:
ctx.update(value.encode())
url = md5(request.build_absolute_uri().encode("ascii"), usedforsecurity=False)
cache_key = "views.decorators.cache.cache_page.%s.%s.%s.%s" % (
key_prefix,
method,
url.hexdigest(),
ctx.hexdigest(),
)
return _i18n_cache_key_suffix(request, cache_key)
def _generate_cache_header_key(key_prefix, request):
"""Return a cache key for the header cache."""
url = md5(request.build_absolute_uri().encode("ascii"), usedforsecurity=False)
cache_key = "views.decorators.cache.cache_header.%s.%s" % (
key_prefix,
url.hexdigest(),
)
return _i18n_cache_key_suffix(request, cache_key)
def get_cache_key(request, key_prefix=None, method="GET", cache=None):
"""
Return a cache key based on the request URL and query. It can be used
in the request phase because it pulls the list of headers to take into
account from the global URL registry and uses those to build a cache key
to check against.
If there isn't a headerlist stored, return None, indicating that the page
needs to be rebuilt.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
headerlist = cache.get(cache_key)
if headerlist is not None:
return _generate_cache_key(request, method, headerlist, key_prefix)
else:
return None
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):
"""
Learn what headers to take into account for some request URL from the
response object. Store those headers in a global URL registry so that
later access to that URL will know what headers to take into account
without building the response object itself. The headers are named in the
Vary header of the response, but we want to prevent response generation.
The list of headers to use for cache key generation is stored in the same
cache as the pages themselves. If the cache ages some data out of the
cache, this just means that we have to build the response once to get at
the Vary header and so at the list of headers to use for the cache key.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = caches[settings.CACHE_MIDDLEWARE_ALIAS]
if response.has_header("Vary"):
is_accept_language_redundant = settings.USE_I18N
# If i18n is used, the generated cache key will be suffixed with the
# current locale. Adding the raw value of Accept-Language is redundant
# in that case and would result in storing the same content under
# multiple keys in the cache. See #18191 for details.
headerlist = []
for header in cc_delim_re.split(response.headers["Vary"]):
header = header.upper().replace("-", "_")
if header != "ACCEPT_LANGUAGE" or not is_accept_language_redundant:
headerlist.append("HTTP_" + header)
headerlist.sort()
cache.set(cache_key, headerlist, cache_timeout)
return _generate_cache_key(request, request.method, headerlist, key_prefix)
else:
# if there is no Vary header, we still need a cache key
# for the request.build_absolute_uri()
cache.set(cache_key, [], cache_timeout)
return _generate_cache_key(request, request.method, [], key_prefix)
def _to_tuple(s):
t = s.split("=", 1)
if len(t) == 2:
return t[0].lower(), t[1]
return t[0].lower(), True
|
054e826607c1a37982a41369de611a41dd211a60bafa24fba1cb848885fb9d3f | from django.utils.cache import patch_vary_headers
from django.utils.deprecation import MiddlewareMixin
from django.utils.regex_helper import _lazy_re_compile
from django.utils.text import compress_sequence, compress_string
re_accepts_gzip = _lazy_re_compile(r"\bgzip\b")
class GZipMiddleware(MiddlewareMixin):
"""
Compress content if the browser allows gzip compression.
Set the Vary header accordingly, so that caches will base their storage
on the Accept-Encoding header.
"""
def process_response(self, request, response):
# It's not worth attempting to compress really short responses.
if not response.streaming and len(response.content) < 200:
return response
# Avoid gzipping if we've already got a content-encoding.
if response.has_header("Content-Encoding"):
return response
patch_vary_headers(response, ("Accept-Encoding",))
ae = request.META.get("HTTP_ACCEPT_ENCODING", "")
if not re_accepts_gzip.search(ae):
return response
if response.streaming:
# Delete the `Content-Length` header for streaming content, because
# we won't know the compressed size until we stream it.
response.streaming_content = compress_sequence(response.streaming_content)
del response.headers["Content-Length"]
else:
# Return the compressed content only if it's actually shorter.
compressed_content = compress_string(response.content)
if len(compressed_content) >= len(response.content):
return response
response.content = compressed_content
response.headers["Content-Length"] = str(len(response.content))
# If there is a strong ETag, make it weak to fulfill the requirements
# of RFC 9110 Section 8.8.1 while also allowing conditional request
# matches on ETags.
etag = response.get("ETag")
if etag and etag.startswith('"'):
response.headers["ETag"] = "W/" + etag
response.headers["Content-Encoding"] = "gzip"
return response
|
91a7eea54b2fe790424527c58682ff3f8d6fb1102dbc175a252b019f813fdaec | """
Cross Site Request Forgery Middleware.
This module provides a middleware that implements protection
against request forgeries from other sites.
"""
import logging
import string
from collections import defaultdict
from urllib.parse import urlparse
from django.conf import settings
from django.core.exceptions import DisallowedHost, ImproperlyConfigured
from django.http import HttpHeaders, UnreadablePostError
from django.urls import get_callable
from django.utils.cache import patch_vary_headers
from django.utils.crypto import constant_time_compare, get_random_string
from django.utils.deprecation import MiddlewareMixin
from django.utils.functional import cached_property
from django.utils.http import is_same_domain
from django.utils.log import log_response
from django.utils.regex_helper import _lazy_re_compile
logger = logging.getLogger("django.security.csrf")
# This matches if any character is not in CSRF_ALLOWED_CHARS.
invalid_token_chars_re = _lazy_re_compile("[^a-zA-Z0-9]")
REASON_BAD_ORIGIN = "Origin checking failed - %s does not match any trusted origins."
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = "Referer checking failed - %s does not match any trusted origins."
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_CSRF_TOKEN_MISSING = "CSRF token missing."
REASON_MALFORMED_REFERER = "Referer checking failed - Referer is malformed."
REASON_INSECURE_REFERER = (
"Referer checking failed - Referer is insecure while host is secure."
)
# The reason strings below are for passing to InvalidTokenFormat. They are
# phrases without a subject because they can be in reference to either the CSRF
# cookie or non-cookie token.
REASON_INCORRECT_LENGTH = "has incorrect length"
REASON_INVALID_CHARACTERS = "has invalid characters"
CSRF_SECRET_LENGTH = 32
CSRF_TOKEN_LENGTH = 2 * CSRF_SECRET_LENGTH
CSRF_ALLOWED_CHARS = string.ascii_letters + string.digits
CSRF_SESSION_KEY = "_csrftoken"
def _get_failure_view():
"""Return the view to be used for CSRF rejections."""
return get_callable(settings.CSRF_FAILURE_VIEW)
def _get_new_csrf_string():
return get_random_string(CSRF_SECRET_LENGTH, allowed_chars=CSRF_ALLOWED_CHARS)
def _mask_cipher_secret(secret):
"""
Given a secret (assumed to be a string of CSRF_ALLOWED_CHARS), generate a
token by adding a mask and applying it to the secret.
"""
mask = _get_new_csrf_string()
chars = CSRF_ALLOWED_CHARS
pairs = zip((chars.index(x) for x in secret), (chars.index(x) for x in mask))
cipher = "".join(chars[(x + y) % len(chars)] for x, y in pairs)
return mask + cipher
def _unmask_cipher_token(token):
"""
Given a token (assumed to be a string of CSRF_ALLOWED_CHARS, of length
CSRF_TOKEN_LENGTH, and that its first half is a mask), use it to decrypt
the second half to produce the original secret.
"""
mask = token[:CSRF_SECRET_LENGTH]
token = token[CSRF_SECRET_LENGTH:]
chars = CSRF_ALLOWED_CHARS
pairs = zip((chars.index(x) for x in token), (chars.index(x) for x in mask))
return "".join(chars[x - y] for x, y in pairs) # Note negative values are ok
def _add_new_csrf_cookie(request):
"""Generate a new random CSRF_COOKIE value, and add it to request.META."""
csrf_secret = _get_new_csrf_string()
request.META.update(
{
# RemovedInDjango50Warning: when the deprecation ends, replace
# with: 'CSRF_COOKIE': csrf_secret
"CSRF_COOKIE": (
_mask_cipher_secret(csrf_secret)
if settings.CSRF_COOKIE_MASKED
else csrf_secret
),
"CSRF_COOKIE_NEEDS_UPDATE": True,
}
)
return csrf_secret
def get_token(request):
"""
Return the CSRF token required for a POST form. The token is an
alphanumeric value. A new token is created if one is not already set.
A side effect of calling this function is to make the csrf_protect
decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie'
header to the outgoing response. For this reason, you may need to use this
function lazily, as is done by the csrf context processor.
"""
if "CSRF_COOKIE" in request.META:
csrf_secret = request.META["CSRF_COOKIE"]
# Since the cookie is being used, flag to send the cookie in
# process_response() (even if the client already has it) in order to
# renew the expiry timer.
request.META["CSRF_COOKIE_NEEDS_UPDATE"] = True
else:
csrf_secret = _add_new_csrf_cookie(request)
return _mask_cipher_secret(csrf_secret)
def rotate_token(request):
"""
Change the CSRF token in use for a request - should be done on login
for security purposes.
"""
_add_new_csrf_cookie(request)
class InvalidTokenFormat(Exception):
def __init__(self, reason):
self.reason = reason
def _check_token_format(token):
"""
Raise an InvalidTokenFormat error if the token has an invalid length or
characters that aren't allowed. The token argument can be a CSRF cookie
secret or non-cookie CSRF token, and either masked or unmasked.
"""
if len(token) not in (CSRF_TOKEN_LENGTH, CSRF_SECRET_LENGTH):
raise InvalidTokenFormat(REASON_INCORRECT_LENGTH)
# Make sure all characters are in CSRF_ALLOWED_CHARS.
if invalid_token_chars_re.search(token):
raise InvalidTokenFormat(REASON_INVALID_CHARACTERS)
def _does_token_match(request_csrf_token, csrf_secret):
"""
Return whether the given CSRF token matches the given CSRF secret, after
unmasking the token if necessary.
This function assumes that the request_csrf_token argument has been
validated to have the correct length (CSRF_SECRET_LENGTH or
CSRF_TOKEN_LENGTH characters) and allowed characters, and that if it has
length CSRF_TOKEN_LENGTH, it is a masked secret.
"""
# Only unmask tokens that are exactly CSRF_TOKEN_LENGTH characters long.
if len(request_csrf_token) == CSRF_TOKEN_LENGTH:
request_csrf_token = _unmask_cipher_token(request_csrf_token)
assert len(request_csrf_token) == CSRF_SECRET_LENGTH
return constant_time_compare(request_csrf_token, csrf_secret)
class RejectRequest(Exception):
def __init__(self, reason):
self.reason = reason
class CsrfViewMiddleware(MiddlewareMixin):
"""
Require a present and correct csrfmiddlewaretoken for POST requests that
have a CSRF cookie, and set an outgoing CSRF cookie.
This middleware should be used in conjunction with the {% csrf_token %}
template tag.
"""
@cached_property
def csrf_trusted_origins_hosts(self):
return [
urlparse(origin).netloc.lstrip("*")
for origin in settings.CSRF_TRUSTED_ORIGINS
]
@cached_property
def allowed_origins_exact(self):
return {origin for origin in settings.CSRF_TRUSTED_ORIGINS if "*" not in origin}
@cached_property
def allowed_origin_subdomains(self):
"""
A mapping of allowed schemes to list of allowed netlocs, where all
subdomains of the netloc are allowed.
"""
allowed_origin_subdomains = defaultdict(list)
for parsed in (
urlparse(origin)
for origin in settings.CSRF_TRUSTED_ORIGINS
if "*" in origin
):
allowed_origin_subdomains[parsed.scheme].append(parsed.netloc.lstrip("*"))
return allowed_origin_subdomains
# The _accept and _reject methods currently only exist for the sake of the
# requires_csrf_token decorator.
def _accept(self, request):
# Avoid checking the request twice by adding a custom attribute to
# request. This will be relevant when both decorator and middleware
# are used.
request.csrf_processing_done = True
return None
def _reject(self, request, reason):
response = _get_failure_view()(request, reason=reason)
log_response(
"Forbidden (%s): %s",
reason,
request.path,
response=response,
request=request,
logger=logger,
)
return response
def _get_secret(self, request):
"""
Return the CSRF secret originally associated with the request, or None
if it didn't have one.
If the CSRF_USE_SESSIONS setting is false, raises InvalidTokenFormat if
the request's secret has invalid characters or an invalid length.
"""
if settings.CSRF_USE_SESSIONS:
try:
csrf_secret = request.session.get(CSRF_SESSION_KEY)
except AttributeError:
raise ImproperlyConfigured(
"CSRF_USE_SESSIONS is enabled, but request.session is not "
"set. SessionMiddleware must appear before CsrfViewMiddleware "
"in MIDDLEWARE."
)
else:
try:
csrf_secret = request.COOKIES[settings.CSRF_COOKIE_NAME]
except KeyError:
csrf_secret = None
else:
# This can raise InvalidTokenFormat.
_check_token_format(csrf_secret)
if csrf_secret is None:
return None
# Django versions before 4.0 masked the secret before storing.
if len(csrf_secret) == CSRF_TOKEN_LENGTH:
csrf_secret = _unmask_cipher_token(csrf_secret)
return csrf_secret
def _set_csrf_cookie(self, request, response):
if settings.CSRF_USE_SESSIONS:
if request.session.get(CSRF_SESSION_KEY) != request.META["CSRF_COOKIE"]:
request.session[CSRF_SESSION_KEY] = request.META["CSRF_COOKIE"]
else:
response.set_cookie(
settings.CSRF_COOKIE_NAME,
request.META["CSRF_COOKIE"],
max_age=settings.CSRF_COOKIE_AGE,
domain=settings.CSRF_COOKIE_DOMAIN,
path=settings.CSRF_COOKIE_PATH,
secure=settings.CSRF_COOKIE_SECURE,
httponly=settings.CSRF_COOKIE_HTTPONLY,
samesite=settings.CSRF_COOKIE_SAMESITE,
)
# Set the Vary header since content varies with the CSRF cookie.
patch_vary_headers(response, ("Cookie",))
def _origin_verified(self, request):
request_origin = request.META["HTTP_ORIGIN"]
try:
good_host = request.get_host()
except DisallowedHost:
pass
else:
good_origin = "%s://%s" % (
"https" if request.is_secure() else "http",
good_host,
)
if request_origin == good_origin:
return True
if request_origin in self.allowed_origins_exact:
return True
try:
parsed_origin = urlparse(request_origin)
except ValueError:
return False
request_scheme = parsed_origin.scheme
request_netloc = parsed_origin.netloc
return any(
is_same_domain(request_netloc, host)
for host in self.allowed_origin_subdomains.get(request_scheme, ())
)
def _check_referer(self, request):
referer = request.META.get("HTTP_REFERER")
if referer is None:
raise RejectRequest(REASON_NO_REFERER)
try:
referer = urlparse(referer)
except ValueError:
raise RejectRequest(REASON_MALFORMED_REFERER)
# Make sure we have a valid URL for Referer.
if "" in (referer.scheme, referer.netloc):
raise RejectRequest(REASON_MALFORMED_REFERER)
# Ensure that our Referer is also secure.
if referer.scheme != "https":
raise RejectRequest(REASON_INSECURE_REFERER)
if any(
is_same_domain(referer.netloc, host)
for host in self.csrf_trusted_origins_hosts
):
return
# Allow matching the configured cookie domain.
good_referer = (
settings.SESSION_COOKIE_DOMAIN
if settings.CSRF_USE_SESSIONS
else settings.CSRF_COOKIE_DOMAIN
)
if good_referer is None:
# If no cookie domain is configured, allow matching the current
# host:port exactly if it's permitted by ALLOWED_HOSTS.
try:
# request.get_host() includes the port.
good_referer = request.get_host()
except DisallowedHost:
raise RejectRequest(REASON_BAD_REFERER % referer.geturl())
else:
server_port = request.get_port()
if server_port not in ("443", "80"):
good_referer = "%s:%s" % (good_referer, server_port)
if not is_same_domain(referer.netloc, good_referer):
raise RejectRequest(REASON_BAD_REFERER % referer.geturl())
def _bad_token_message(self, reason, token_source):
if token_source != "POST":
# Assume it is a settings.CSRF_HEADER_NAME value.
header_name = HttpHeaders.parse_header_name(token_source)
token_source = f"the {header_name!r} HTTP header"
return f"CSRF token from {token_source} {reason}."
def _check_token(self, request):
# Access csrf_secret via self._get_secret() as rotate_token() may have
# been called by an authentication middleware during the
# process_request() phase.
try:
csrf_secret = self._get_secret(request)
except InvalidTokenFormat as exc:
raise RejectRequest(f"CSRF cookie {exc.reason}.")
if csrf_secret is None:
# No CSRF cookie. For POST requests, we insist on a CSRF cookie,
# and in this way we can avoid all CSRF attacks, including login
# CSRF.
raise RejectRequest(REASON_NO_CSRF_COOKIE)
# Check non-cookie token for match.
request_csrf_token = ""
if request.method == "POST":
try:
request_csrf_token = request.POST.get("csrfmiddlewaretoken", "")
except UnreadablePostError:
# Handle a broken connection before we've completed reading the
# POST data. process_view shouldn't raise any exceptions, so
# we'll ignore and serve the user a 403 (assuming they're still
# listening, which they probably aren't because of the error).
pass
if request_csrf_token == "":
# Fall back to X-CSRFToken, to make things easier for AJAX, and
# possible for PUT/DELETE.
try:
# This can have length CSRF_SECRET_LENGTH or CSRF_TOKEN_LENGTH,
# depending on whether the client obtained the token from
# the DOM or the cookie (and if the cookie, whether the cookie
# was masked or unmasked).
request_csrf_token = request.META[settings.CSRF_HEADER_NAME]
except KeyError:
raise RejectRequest(REASON_CSRF_TOKEN_MISSING)
token_source = settings.CSRF_HEADER_NAME
else:
token_source = "POST"
try:
_check_token_format(request_csrf_token)
except InvalidTokenFormat as exc:
reason = self._bad_token_message(exc.reason, token_source)
raise RejectRequest(reason)
if not _does_token_match(request_csrf_token, csrf_secret):
reason = self._bad_token_message("incorrect", token_source)
raise RejectRequest(reason)
def process_request(self, request):
try:
csrf_secret = self._get_secret(request)
except InvalidTokenFormat:
_add_new_csrf_cookie(request)
else:
if csrf_secret is not None:
# Use the same secret next time. If the secret was originally
# masked, this also causes it to be replaced with the unmasked
# form, but only in cases where the secret is already getting
# saved anyways.
request.META["CSRF_COOKIE"] = csrf_secret
def process_view(self, request, callback, callback_args, callback_kwargs):
if getattr(request, "csrf_processing_done", False):
return None
# Wait until request.META["CSRF_COOKIE"] has been manipulated before
# bailing out, so that get_token still works
if getattr(callback, "csrf_exempt", False):
return None
# Assume that anything not defined as 'safe' by RFC 9110 needs protection
if request.method in ("GET", "HEAD", "OPTIONS", "TRACE"):
return self._accept(request)
if getattr(request, "_dont_enforce_csrf_checks", False):
# Mechanism to turn off CSRF checks for test suite. It comes after
# the creation of CSRF cookies, so that everything else continues
# to work exactly the same (e.g. cookies are sent, etc.), but
# before any branches that call the _reject method.
return self._accept(request)
# Reject the request if the Origin header doesn't match an allowed
# value.
if "HTTP_ORIGIN" in request.META:
if not self._origin_verified(request):
return self._reject(
request, REASON_BAD_ORIGIN % request.META["HTTP_ORIGIN"]
)
elif request.is_secure():
# If the Origin header wasn't provided, reject HTTPS requests if
# the Referer header doesn't match an allowed value.
#
# Suppose user visits http://example.com/
# An active network attacker (man-in-the-middle, MITM) sends a
# POST form that targets https://example.com/detonate-bomb/ and
# submits it via JavaScript.
#
# The attacker will need to provide a CSRF cookie and token, but
# that's no problem for a MITM and the session-independent secret
# we're using. So the MITM can circumvent the CSRF protection. This
# is true for any HTTP connection, but anyone using HTTPS expects
# better! For this reason, for https://example.com/ we need
# additional protection that treats http://example.com/ as
# completely untrusted. Under HTTPS, Barth et al. found that the
# Referer header is missing for same-domain requests in only about
# 0.2% of cases or less, so we can use strict Referer checking.
try:
self._check_referer(request)
except RejectRequest as exc:
return self._reject(request, exc.reason)
try:
self._check_token(request)
except RejectRequest as exc:
return self._reject(request, exc.reason)
return self._accept(request)
def process_response(self, request, response):
if request.META.get("CSRF_COOKIE_NEEDS_UPDATE"):
self._set_csrf_cookie(request, response)
# Unset the flag to prevent _set_csrf_cookie() from being
# unnecessarily called again in process_response() by other
# instances of CsrfViewMiddleware. This can happen e.g. when both a
# decorator and middleware are used. However,
# CSRF_COOKIE_NEEDS_UPDATE is still respected in subsequent calls
# e.g. in case rotate_token() is called in process_response() later
# by custom middleware but before those subsequent calls.
request.META["CSRF_COOKIE_NEEDS_UPDATE"] = False
return response
|
d87f8352bd4768244f166178a0976dd99ed739e60b67af117933a98f8fad9bd1 | import re
from django.core.exceptions import ValidationError
from django.forms.utils import pretty_name
from django.forms.widgets import MultiWidget, Textarea, TextInput
from django.utils.functional import cached_property
from django.utils.html import format_html, html_safe
from django.utils.translation import gettext_lazy as _
__all__ = ("BoundField",)
@html_safe
class BoundField:
"A Field plus data"
def __init__(self, form, field, name):
self.form = form
self.field = field
self.name = name
self.html_name = form.add_prefix(name)
self.html_initial_name = form.add_initial_prefix(name)
self.html_initial_id = form.add_initial_prefix(self.auto_id)
if self.field.label is None:
self.label = pretty_name(name)
else:
self.label = self.field.label
self.help_text = field.help_text or ""
def __str__(self):
"""Render this field as an HTML widget."""
if self.field.show_hidden_initial:
return self.as_widget() + self.as_hidden(only_initial=True)
return self.as_widget()
@cached_property
def subwidgets(self):
"""
Most widgets yield a single subwidget, but others like RadioSelect and
CheckboxSelectMultiple produce one subwidget for each choice.
This property is cached so that only one database query occurs when
rendering ModelChoiceFields.
"""
id_ = self.field.widget.attrs.get("id") or self.auto_id
attrs = {"id": id_} if id_ else {}
attrs = self.build_widget_attrs(attrs)
return [
BoundWidget(self.field.widget, widget, self.form.renderer)
for widget in self.field.widget.subwidgets(
self.html_name, self.value(), attrs=attrs
)
]
def __bool__(self):
# BoundField evaluates to True even if it doesn't have subwidgets.
return True
def __iter__(self):
return iter(self.subwidgets)
def __len__(self):
return len(self.subwidgets)
def __getitem__(self, idx):
# Prevent unnecessary reevaluation when accessing BoundField's attrs
# from templates.
if not isinstance(idx, (int, slice)):
raise TypeError(
"BoundField indices must be integers or slices, not %s."
% type(idx).__name__
)
return self.subwidgets[idx]
@property
def errors(self):
"""
Return an ErrorList (empty if there are no errors) for this field.
"""
return self.form.errors.get(
self.name, self.form.error_class(renderer=self.form.renderer)
)
def as_widget(self, widget=None, attrs=None, only_initial=False):
"""
Render the field by rendering the passed widget, adding any HTML
attributes passed as attrs. If a widget isn't specified, use the
field's default widget.
"""
widget = widget or self.field.widget
if self.field.localize:
widget.is_localized = True
attrs = attrs or {}
attrs = self.build_widget_attrs(attrs, widget)
if self.auto_id and "id" not in widget.attrs:
attrs.setdefault(
"id", self.html_initial_id if only_initial else self.auto_id
)
if only_initial and self.html_initial_name in self.form.data:
# Propagate the hidden initial value.
value = self.form._widget_data_value(
self.field.hidden_widget(),
self.html_initial_name,
)
else:
value = self.value()
return widget.render(
name=self.html_initial_name if only_initial else self.html_name,
value=value,
attrs=attrs,
renderer=self.form.renderer,
)
def as_text(self, attrs=None, **kwargs):
"""
Return a string of HTML for representing this as an <input type="text">.
"""
return self.as_widget(TextInput(), attrs, **kwargs)
def as_textarea(self, attrs=None, **kwargs):
"""Return a string of HTML for representing this as a <textarea>."""
return self.as_widget(Textarea(), attrs, **kwargs)
def as_hidden(self, attrs=None, **kwargs):
"""
Return a string of HTML for representing this as an <input type="hidden">.
"""
return self.as_widget(self.field.hidden_widget(), attrs, **kwargs)
@property
def data(self):
"""
Return the data for this BoundField, or None if it wasn't given.
"""
return self.form._widget_data_value(self.field.widget, self.html_name)
def value(self):
"""
Return the value for this BoundField, using the initial value if
the form is not bound or the data otherwise.
"""
data = self.initial
if self.form.is_bound:
data = self.field.bound_data(self.data, data)
return self.field.prepare_value(data)
def _has_changed(self):
field = self.field
if field.show_hidden_initial:
hidden_widget = field.hidden_widget()
initial_value = self.form._widget_data_value(
hidden_widget,
self.html_initial_name,
)
try:
initial_value = field.to_python(initial_value)
except ValidationError:
# Always assume data has changed if validation fails.
return True
else:
initial_value = self.initial
return field.has_changed(initial_value, self.data)
def label_tag(self, contents=None, attrs=None, label_suffix=None, tag=None):
"""
Wrap the given contents in a <label>, if the field has an ID attribute.
contents should be mark_safe'd to avoid HTML escaping. If contents
aren't given, use the field's HTML-escaped label.
If attrs are given, use them as HTML attributes on the <label> tag.
label_suffix overrides the form's label_suffix.
"""
contents = contents or self.label
if label_suffix is None:
label_suffix = (
self.field.label_suffix
if self.field.label_suffix is not None
else self.form.label_suffix
)
# Only add the suffix if the label does not end in punctuation.
# Translators: If found as last label character, these punctuation
# characters will prevent the default label_suffix to be appended to the label
if label_suffix and contents and contents[-1] not in _(":?.!"):
contents = format_html("{}{}", contents, label_suffix)
widget = self.field.widget
id_ = widget.attrs.get("id") or self.auto_id
if id_:
id_for_label = widget.id_for_label(id_)
if id_for_label:
attrs = {**(attrs or {}), "for": id_for_label}
if self.field.required and hasattr(self.form, "required_css_class"):
attrs = attrs or {}
if "class" in attrs:
attrs["class"] += " " + self.form.required_css_class
else:
attrs["class"] = self.form.required_css_class
context = {
"field": self,
"label": contents,
"attrs": attrs,
"use_tag": bool(id_),
"tag": tag or "label",
}
return self.form.render(self.form.template_name_label, context)
def legend_tag(self, contents=None, attrs=None, label_suffix=None):
"""
Wrap the given contents in a <legend>, if the field has an ID
attribute. Contents should be mark_safe'd to avoid HTML escaping. If
contents aren't given, use the field's HTML-escaped label.
If attrs are given, use them as HTML attributes on the <legend> tag.
label_suffix overrides the form's label_suffix.
"""
return self.label_tag(contents, attrs, label_suffix, tag="legend")
def css_classes(self, extra_classes=None):
"""
Return a string of space-separated CSS classes for this field.
"""
if hasattr(extra_classes, "split"):
extra_classes = extra_classes.split()
extra_classes = set(extra_classes or [])
if self.errors and hasattr(self.form, "error_css_class"):
extra_classes.add(self.form.error_css_class)
if self.field.required and hasattr(self.form, "required_css_class"):
extra_classes.add(self.form.required_css_class)
return " ".join(extra_classes)
@property
def is_hidden(self):
"""Return True if this BoundField's widget is hidden."""
return self.field.widget.is_hidden
@property
def auto_id(self):
"""
Calculate and return the ID attribute for this BoundField, if the
associated Form has specified auto_id. Return an empty string otherwise.
"""
auto_id = self.form.auto_id # Boolean or string
if auto_id and "%s" in str(auto_id):
return auto_id % self.html_name
elif auto_id:
return self.html_name
return ""
@property
def id_for_label(self):
"""
Wrapper around the field widget's `id_for_label` method.
Useful, for example, for focusing on this field regardless of whether
it has a single widget or a MultiWidget.
"""
widget = self.field.widget
id_ = widget.attrs.get("id") or self.auto_id
return widget.id_for_label(id_)
@cached_property
def initial(self):
return self.form.get_initial_for_field(self.field, self.name)
def build_widget_attrs(self, attrs, widget=None):
widget = widget or self.field.widget
attrs = dict(attrs) # Copy attrs to avoid modifying the argument.
if (
widget.use_required_attribute(self.initial)
and self.field.required
and self.form.use_required_attribute
):
# MultiValueField has require_all_fields: if False, fall back
# on subfields.
if (
hasattr(self.field, "require_all_fields")
and not self.field.require_all_fields
and isinstance(self.field.widget, MultiWidget)
):
for subfield, subwidget in zip(self.field.fields, widget.widgets):
subwidget.attrs["required"] = (
subwidget.use_required_attribute(self.initial)
and subfield.required
)
else:
attrs["required"] = True
if self.field.disabled:
attrs["disabled"] = True
return attrs
@property
def widget_type(self):
return re.sub(
r"widget$|input$", "", self.field.widget.__class__.__name__.lower()
)
@property
def use_fieldset(self):
"""
Return the value of this BoundField widget's use_fieldset attribute.
"""
return self.field.widget.use_fieldset
@html_safe
class BoundWidget:
"""
A container class used for iterating over widgets. This is useful for
widgets that have choices. For example, the following can be used in a
template:
{% for radio in myform.beatles %}
<label for="{{ radio.id_for_label }}">
{{ radio.choice_label }}
<span class="radio">{{ radio.tag }}</span>
</label>
{% endfor %}
"""
def __init__(self, parent_widget, data, renderer):
self.parent_widget = parent_widget
self.data = data
self.renderer = renderer
def __str__(self):
return self.tag(wrap_label=True)
def tag(self, wrap_label=False):
context = {"widget": {**self.data, "wrap_label": wrap_label}}
return self.parent_widget._render(self.template_name, context, self.renderer)
@property
def template_name(self):
if "template_name" in self.data:
return self.data["template_name"]
return self.parent_widget.template_name
@property
def id_for_label(self):
return self.data["attrs"].get("id")
@property
def choice_label(self):
return self.data["label"]
|
ee50069dee0084e896f13a4b9c69b4364d201d4a287f86e1defd0996f7e92548 | """
Form classes
"""
import copy
import datetime
import warnings
from django.core.exceptions import NON_FIELD_ERRORS, ValidationError
from django.forms.fields import Field, FileField
from django.forms.utils import ErrorDict, ErrorList, RenderableFormMixin
from django.forms.widgets import Media, MediaDefiningClass
from django.utils.datastructures import MultiValueDict
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.functional import cached_property
from django.utils.html import conditional_escape
from django.utils.safestring import SafeString, mark_safe
from django.utils.translation import gettext as _
from .renderers import get_default_renderer
__all__ = ("BaseForm", "Form")
class DeclarativeFieldsMetaclass(MediaDefiningClass):
"""Collect Fields declared on the base classes."""
def __new__(mcs, name, bases, attrs):
# Collect fields from current class and remove them from attrs.
attrs["declared_fields"] = {
key: attrs.pop(key)
for key, value in list(attrs.items())
if isinstance(value, Field)
}
new_class = super().__new__(mcs, name, bases, attrs)
# Walk through the MRO.
declared_fields = {}
for base in reversed(new_class.__mro__):
# Collect fields from base class.
if hasattr(base, "declared_fields"):
declared_fields.update(base.declared_fields)
# Field shadowing.
for attr, value in base.__dict__.items():
if value is None and attr in declared_fields:
declared_fields.pop(attr)
new_class.base_fields = declared_fields
new_class.declared_fields = declared_fields
return new_class
class BaseForm(RenderableFormMixin):
"""
The main implementation of all the Form logic. Note that this class is
different than Form. See the comments by the Form class for more info. Any
improvements to the form API should be made to this class, not to the Form
class.
"""
default_renderer = None
field_order = None
prefix = None
use_required_attribute = True
template_name_div = "django/forms/div.html"
template_name_p = "django/forms/p.html"
template_name_table = "django/forms/table.html"
template_name_ul = "django/forms/ul.html"
template_name_label = "django/forms/label.html"
def __init__(
self,
data=None,
files=None,
auto_id="id_%s",
prefix=None,
initial=None,
error_class=ErrorList,
label_suffix=None,
empty_permitted=False,
field_order=None,
use_required_attribute=None,
renderer=None,
):
self.is_bound = data is not None or files is not None
self.data = MultiValueDict() if data is None else data
self.files = MultiValueDict() if files is None else files
self.auto_id = auto_id
if prefix is not None:
self.prefix = prefix
self.initial = initial or {}
self.error_class = error_class
# Translators: This is the default suffix added to form field labels
self.label_suffix = label_suffix if label_suffix is not None else _(":")
self.empty_permitted = empty_permitted
self._errors = None # Stores the errors after clean() has been called.
# The base_fields class attribute is the *class-wide* definition of
# fields. Because a particular *instance* of the class might want to
# alter self.fields, we create self.fields here by copying base_fields.
# Instances should always modify self.fields; they should not modify
# self.base_fields.
self.fields = copy.deepcopy(self.base_fields)
self._bound_fields_cache = {}
self.order_fields(self.field_order if field_order is None else field_order)
if use_required_attribute is not None:
self.use_required_attribute = use_required_attribute
if self.empty_permitted and self.use_required_attribute:
raise ValueError(
"The empty_permitted and use_required_attribute arguments may "
"not both be True."
)
# Initialize form renderer. Use a global default if not specified
# either as an argument or as self.default_renderer.
if renderer is None:
if self.default_renderer is None:
renderer = get_default_renderer()
else:
renderer = self.default_renderer
if isinstance(self.default_renderer, type):
renderer = renderer()
self.renderer = renderer
def order_fields(self, field_order):
"""
Rearrange the fields according to field_order.
field_order is a list of field names specifying the order. Append fields
not included in the list in the default order for backward compatibility
with subclasses not overriding field_order. If field_order is None,
keep all fields in the order defined in the class. Ignore unknown
fields in field_order to allow disabling fields in form subclasses
without redefining ordering.
"""
if field_order is None:
return
fields = {}
for key in field_order:
try:
fields[key] = self.fields.pop(key)
except KeyError: # ignore unknown fields
pass
fields.update(self.fields) # add remaining fields in original order
self.fields = fields
def __repr__(self):
if self._errors is None:
is_valid = "Unknown"
else:
is_valid = self.is_bound and not self._errors
return "<%(cls)s bound=%(bound)s, valid=%(valid)s, fields=(%(fields)s)>" % {
"cls": self.__class__.__name__,
"bound": self.is_bound,
"valid": is_valid,
"fields": ";".join(self.fields),
}
def _bound_items(self):
"""Yield (name, bf) pairs, where bf is a BoundField object."""
for name in self.fields:
yield name, self[name]
def __iter__(self):
"""Yield the form's fields as BoundField objects."""
for name in self.fields:
yield self[name]
def __getitem__(self, name):
"""Return a BoundField with the given name."""
try:
field = self.fields[name]
except KeyError:
raise KeyError(
"Key '%s' not found in '%s'. Choices are: %s."
% (
name,
self.__class__.__name__,
", ".join(sorted(self.fields)),
)
)
if name not in self._bound_fields_cache:
self._bound_fields_cache[name] = field.get_bound_field(self, name)
return self._bound_fields_cache[name]
@property
def errors(self):
"""Return an ErrorDict for the data provided for the form."""
if self._errors is None:
self.full_clean()
return self._errors
def is_valid(self):
"""Return True if the form has no errors, or False otherwise."""
return self.is_bound and not self.errors
def add_prefix(self, field_name):
"""
Return the field name with a prefix appended, if this Form has a
prefix set.
Subclasses may wish to override.
"""
return "%s-%s" % (self.prefix, field_name) if self.prefix else field_name
def add_initial_prefix(self, field_name):
"""Add an 'initial' prefix for checking dynamic initial values."""
return "initial-%s" % self.add_prefix(field_name)
def _widget_data_value(self, widget, html_name):
# value_from_datadict() gets the data from the data dictionaries.
# Each widget type knows how to retrieve its own data, because some
# widgets split data over several HTML fields.
return widget.value_from_datadict(self.data, self.files, html_name)
def _html_output(
self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row
):
"Output HTML. Used by as_table(), as_ul(), as_p()."
warnings.warn(
"django.forms.BaseForm._html_output() is deprecated. "
"Please use .render() and .get_context() instead.",
RemovedInDjango50Warning,
stacklevel=2,
)
# Errors that should be displayed above all fields.
top_errors = self.non_field_errors().copy()
output, hidden_fields = [], []
for name, bf in self._bound_items():
field = bf.field
html_class_attr = ""
bf_errors = self.error_class(bf.errors)
if bf.is_hidden:
if bf_errors:
top_errors.extend(
[
_("(Hidden field %(name)s) %(error)s")
% {"name": name, "error": str(e)}
for e in bf_errors
]
)
hidden_fields.append(str(bf))
else:
# Create a 'class="..."' attribute if the row should have any
# CSS classes applied.
css_classes = bf.css_classes()
if css_classes:
html_class_attr = ' class="%s"' % css_classes
if errors_on_separate_row and bf_errors:
output.append(error_row % str(bf_errors))
if bf.label:
label = conditional_escape(bf.label)
label = bf.label_tag(label) or ""
else:
label = ""
if field.help_text:
help_text = help_text_html % field.help_text
else:
help_text = ""
output.append(
normal_row
% {
"errors": bf_errors,
"label": label,
"field": bf,
"help_text": help_text,
"html_class_attr": html_class_attr,
"css_classes": css_classes,
"field_name": bf.html_name,
}
)
if top_errors:
output.insert(0, error_row % top_errors)
if hidden_fields: # Insert any hidden fields in the last row.
str_hidden = "".join(hidden_fields)
if output:
last_row = output[-1]
# Chop off the trailing row_ender (e.g. '</td></tr>') and
# insert the hidden fields.
if not last_row.endswith(row_ender):
# This can happen in the as_p() case (and possibly others
# that users write): if there are only top errors, we may
# not be able to conscript the last row for our purposes,
# so insert a new, empty row.
last_row = normal_row % {
"errors": "",
"label": "",
"field": "",
"help_text": "",
"html_class_attr": html_class_attr,
"css_classes": "",
"field_name": "",
}
output.append(last_row)
output[-1] = last_row[: -len(row_ender)] + str_hidden + row_ender
else:
# If there aren't any rows in the output, just append the
# hidden fields.
output.append(str_hidden)
return mark_safe("\n".join(output))
@property
def template_name(self):
return self.renderer.form_template_name
def get_context(self):
fields = []
hidden_fields = []
top_errors = self.non_field_errors().copy()
for name, bf in self._bound_items():
bf_errors = self.error_class(bf.errors, renderer=self.renderer)
if bf.is_hidden:
if bf_errors:
top_errors += [
_("(Hidden field %(name)s) %(error)s")
% {"name": name, "error": str(e)}
for e in bf_errors
]
hidden_fields.append(bf)
else:
errors_str = str(bf_errors)
# RemovedInDjango50Warning.
if not isinstance(errors_str, SafeString):
warnings.warn(
f"Returning a plain string from "
f"{self.error_class.__name__} is deprecated. Please "
f"customize via the template system instead.",
RemovedInDjango50Warning,
)
errors_str = mark_safe(errors_str)
fields.append((bf, errors_str))
return {
"form": self,
"fields": fields,
"hidden_fields": hidden_fields,
"errors": top_errors,
}
def non_field_errors(self):
"""
Return an ErrorList of errors that aren't associated with a particular
field -- i.e., from Form.clean(). Return an empty ErrorList if there
are none.
"""
return self.errors.get(
NON_FIELD_ERRORS,
self.error_class(error_class="nonfield", renderer=self.renderer),
)
def add_error(self, field, error):
"""
Update the content of `self._errors`.
The `field` argument is the name of the field to which the errors
should be added. If it's None, treat the errors as NON_FIELD_ERRORS.
The `error` argument can be a single error, a list of errors, or a
dictionary that maps field names to lists of errors. An "error" can be
either a simple string or an instance of ValidationError with its
message attribute set and a "list or dictionary" can be an actual
`list` or `dict` or an instance of ValidationError with its
`error_list` or `error_dict` attribute set.
If `error` is a dictionary, the `field` argument *must* be None and
errors will be added to the fields that correspond to the keys of the
dictionary.
"""
if not isinstance(error, ValidationError):
# Normalize to ValidationError and let its constructor
# do the hard work of making sense of the input.
error = ValidationError(error)
if hasattr(error, "error_dict"):
if field is not None:
raise TypeError(
"The argument `field` must be `None` when the `error` "
"argument contains errors for multiple fields."
)
else:
error = error.error_dict
else:
error = {field or NON_FIELD_ERRORS: error.error_list}
for field, error_list in error.items():
if field not in self.errors:
if field != NON_FIELD_ERRORS and field not in self.fields:
raise ValueError(
"'%s' has no field named '%s'."
% (self.__class__.__name__, field)
)
if field == NON_FIELD_ERRORS:
self._errors[field] = self.error_class(
error_class="nonfield", renderer=self.renderer
)
else:
self._errors[field] = self.error_class(renderer=self.renderer)
self._errors[field].extend(error_list)
if field in self.cleaned_data:
del self.cleaned_data[field]
def has_error(self, field, code=None):
return field in self.errors and (
code is None
or any(error.code == code for error in self.errors.as_data()[field])
)
def full_clean(self):
"""
Clean all of self.data and populate self._errors and self.cleaned_data.
"""
self._errors = ErrorDict()
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
# If the form is permitted to be empty, and none of the form data has
# changed from the initial data, short circuit any validation.
if self.empty_permitted and not self.has_changed():
return
self._clean_fields()
self._clean_form()
self._post_clean()
def _clean_fields(self):
for name, bf in self._bound_items():
field = bf.field
value = bf.initial if field.disabled else bf.data
try:
if isinstance(field, FileField):
value = field.clean(value, bf.initial)
else:
value = field.clean(value)
self.cleaned_data[name] = value
if hasattr(self, "clean_%s" % name):
value = getattr(self, "clean_%s" % name)()
self.cleaned_data[name] = value
except ValidationError as e:
self.add_error(name, e)
def _clean_form(self):
try:
cleaned_data = self.clean()
except ValidationError as e:
self.add_error(None, e)
else:
if cleaned_data is not None:
self.cleaned_data = cleaned_data
def _post_clean(self):
"""
An internal hook for performing additional cleaning after form cleaning
is complete. Used for model validation in model forms.
"""
pass
def clean(self):
"""
Hook for doing any extra form-wide cleaning after Field.clean() has been
called on every field. Any ValidationError raised by this method will
not be associated with a particular field; it will have a special-case
association with the field named '__all__'.
"""
return self.cleaned_data
def has_changed(self):
"""Return True if data differs from initial."""
return bool(self.changed_data)
@cached_property
def changed_data(self):
return [name for name, bf in self._bound_items() if bf._has_changed()]
@property
def media(self):
"""Return all media required to render the widgets on this form."""
media = Media()
for field in self.fields.values():
media += field.widget.media
return media
def is_multipart(self):
"""
Return True if the form needs to be multipart-encoded, i.e. it has
FileInput, or False otherwise.
"""
return any(field.widget.needs_multipart_form for field in self.fields.values())
def hidden_fields(self):
"""
Return a list of all the BoundField objects that are hidden fields.
Useful for manual form layout in templates.
"""
return [field for field in self if field.is_hidden]
def visible_fields(self):
"""
Return a list of BoundField objects that aren't hidden fields.
The opposite of the hidden_fields() method.
"""
return [field for field in self if not field.is_hidden]
def get_initial_for_field(self, field, field_name):
"""
Return initial data for field on form. Use initial data from the form
or the field, in that order. Evaluate callable values.
"""
value = self.initial.get(field_name, field.initial)
if callable(value):
value = value()
# If this is an auto-generated default date, nix the microseconds
# for standardized handling. See #22502.
if (
isinstance(value, (datetime.datetime, datetime.time))
and not field.widget.supports_microseconds
):
value = value.replace(microsecond=0)
return value
class Form(BaseForm, metaclass=DeclarativeFieldsMetaclass):
"A collection of Fields, plus their associated data."
# This is a separate class from BaseForm in order to abstract the way
# self.fields is specified. This class (Form) is the one that does the
# fancy metaclass stuff purely for the semantic sugar -- it allows one
# to define a form using declarative syntax.
# BaseForm itself has no way of designating self.fields.
|
b95533d28bfe7a6736f616c3efc40a2a46bf4752f89a90c33e19327e4c788f34 | from django.http.cookie import SimpleCookie, parse_cookie
from django.http.request import (
HttpHeaders,
HttpRequest,
QueryDict,
RawPostDataException,
UnreadablePostError,
)
from django.http.response import (
BadHeaderError,
FileResponse,
Http404,
HttpResponse,
HttpResponseBadRequest,
HttpResponseBase,
HttpResponseForbidden,
HttpResponseGone,
HttpResponseNotAllowed,
HttpResponseNotFound,
HttpResponseNotModified,
HttpResponsePermanentRedirect,
HttpResponseRedirect,
HttpResponseServerError,
JsonResponse,
StreamingHttpResponse,
)
__all__ = [
"SimpleCookie",
"parse_cookie",
"HttpHeaders",
"HttpRequest",
"QueryDict",
"RawPostDataException",
"UnreadablePostError",
"HttpResponse",
"HttpResponseBase",
"StreamingHttpResponse",
"HttpResponseRedirect",
"HttpResponsePermanentRedirect",
"HttpResponseNotModified",
"HttpResponseBadRequest",
"HttpResponseForbidden",
"HttpResponseNotFound",
"HttpResponseNotAllowed",
"HttpResponseGone",
"HttpResponseServerError",
"Http404",
"BadHeaderError",
"JsonResponse",
"FileResponse",
]
|
2b3e1ce47b8791555b00c2ba392e77154650545b61d6bfc246a3cb44e093ec22 | """
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
import base64
import binascii
import collections
import html
from django.conf import settings
from django.core.exceptions import (
RequestDataTooBig,
SuspiciousMultipartForm,
TooManyFieldsSent,
)
from django.core.files.uploadhandler import SkipFile, StopFutureHandlers, StopUpload
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str
from django.utils.http import parse_header_parameters
from django.utils.regex_helper import _lazy_re_compile
__all__ = ("MultiPartParser", "MultiPartParserError", "InputStreamExhausted")
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
class MultiPartParser:
"""
An RFC 7578 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
"""
boundary_re = _lazy_re_compile(r"[ -~]{0,200}[!-~]")
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handlers:
A list of UploadHandler instances that perform operations on the
uploaded data.
:encoding:
The encoding with which to treat the incoming data.
"""
# Content-Type should contain multipart and the boundary information.
content_type = META.get("CONTENT_TYPE", "")
if not content_type.startswith("multipart/"):
raise MultiPartParserError("Invalid Content-Type: %s" % content_type)
try:
content_type.encode("ascii")
except UnicodeEncodeError:
raise MultiPartParserError(
"Invalid non-ASCII Content-Type in multipart: %s"
% force_str(content_type)
)
# Parse the header to get the boundary to split the parts.
_, opts = parse_header_parameters(content_type)
boundary = opts.get("boundary")
if not boundary or not self.boundary_re.fullmatch(boundary):
raise MultiPartParserError(
"Invalid boundary in multipart: %s" % force_str(boundary)
)
# Content-Length should contain the length of the body we are about
# to receive.
try:
content_length = int(META.get("CONTENT_LENGTH", 0))
except (ValueError, TypeError):
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
self._boundary = boundary.encode("ascii")
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2**31 - 4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Return a tuple containing the POST and FILES dictionary, respectively.
"""
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
# HTTP spec says that Content-Length >= 0 is valid
# handling content-length == 0 before continuing
if self._content_length == 0:
return QueryDict(encoding=self._encoding), MultiValueDict()
# See if any of the handlers take care of the parsing.
# This allows overriding everything if need be.
for handler in handlers:
result = handler.handle_raw_input(
self._input_data,
self._meta,
self._content_length,
self._boundary,
encoding,
)
# Check to see if it was handled
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict(mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
# Number of bytes that have been read.
num_bytes_read = 0
# To count the number of keys in the request.
num_post_keys = 0
# To limit the amount of data read from the request.
read_size = None
# Whether a file upload is finished.
uploaded_file = True
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
uploaded_file = True
try:
disposition = meta_data["content-disposition"][1]
field_name = disposition["name"].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get("content-transfer-encoding")
if transfer_encoding is not None:
transfer_encoding = transfer_encoding[0].strip()
field_name = force_str(field_name, encoding, errors="replace")
if item_type == FIELD:
# Avoid storing more than DATA_UPLOAD_MAX_NUMBER_FIELDS.
num_post_keys += 1
if (
settings.DATA_UPLOAD_MAX_NUMBER_FIELDS is not None
and settings.DATA_UPLOAD_MAX_NUMBER_FIELDS < num_post_keys
):
raise TooManyFieldsSent(
"The number of GET/POST parameters exceeded "
"settings.DATA_UPLOAD_MAX_NUMBER_FIELDS."
)
# Avoid reading more than DATA_UPLOAD_MAX_MEMORY_SIZE.
if settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None:
read_size = (
settings.DATA_UPLOAD_MAX_MEMORY_SIZE - num_bytes_read
)
# This is a post field, we can just set it in the post
if transfer_encoding == "base64":
raw_data = field_stream.read(size=read_size)
num_bytes_read += len(raw_data)
try:
data = base64.b64decode(raw_data)
except binascii.Error:
data = raw_data
else:
data = field_stream.read(size=read_size)
num_bytes_read += len(data)
# Add two here to make the check consistent with the
# x-www-form-urlencoded check that includes '&='.
num_bytes_read += len(field_name) + 2
if (
settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None
and num_bytes_read > settings.DATA_UPLOAD_MAX_MEMORY_SIZE
):
raise RequestDataTooBig(
"Request body exceeded "
"settings.DATA_UPLOAD_MAX_MEMORY_SIZE."
)
self._post.appendlist(
field_name, force_str(data, encoding, errors="replace")
)
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get("filename")
if file_name:
file_name = force_str(file_name, encoding, errors="replace")
file_name = self.sanitize_file_name(file_name)
if not file_name:
continue
content_type, content_type_extra = meta_data.get(
"content-type", ("", {})
)
content_type = content_type.strip()
charset = content_type_extra.get("charset")
try:
content_length = int(meta_data.get("content-length")[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
uploaded_file = False
try:
for handler in handlers:
try:
handler.new_file(
field_name,
file_name,
content_type,
content_length,
charset,
content_type_extra,
)
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == "base64":
# We only special-case base64 transfer encoding
# We should always decode base64 chunks by
# multiple of 4, ignoring whitespace.
stripped_chunk = b"".join(chunk.split())
remaining = len(stripped_chunk) % 4
while remaining != 0:
over_chunk = field_stream.read(4 - remaining)
if not over_chunk:
break
stripped_chunk += b"".join(over_chunk.split())
remaining = len(stripped_chunk) % 4
try:
chunk = base64.b64decode(stripped_chunk)
except Exception as exc:
# Since this is only a chunk, any error is
# an unfixable error.
raise MultiPartParserError(
"Could not decode base64 data."
) from exc
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk, counters[i])
counters[i] += chunk_length
if chunk is None:
# Don't continue if the chunk received by
# the handler is None.
break
except SkipFile:
self._close_files()
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload as e:
self._close_files()
if not e.connection_reset:
exhaust(self._input_data)
else:
if not uploaded_file:
for handler in handlers:
handler.upload_interrupted()
# Make sure that the request data is all fed
exhaust(self._input_data)
# Signal that the upload has completed.
# any() shortcircuits if a handler's upload_complete() returns a value.
any(handler.upload_complete() for handler in handlers)
self._post._mutable = False
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signaling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(
force_str(old_field_name, self._encoding, errors="replace"),
file_obj,
)
break
def sanitize_file_name(self, file_name):
"""
Sanitize the filename of an upload.
Remove all possible path separators, even though that might remove more
than actually required by the target system. Filenames that could
potentially cause problems (current/parent dir) are also discarded.
It should be noted that this function could still return a "filepath"
like "C:some_file.txt" which is handled later on by the storage layer.
So while this function does sanitize filenames to some extent, the
resulting filename should still be considered as untrusted user input.
"""
file_name = html.unescape(file_name)
file_name = file_name.rsplit("/")[-1]
file_name = file_name.rsplit("\\")[-1]
# Remove non-printable characters.
file_name = "".join([char for char in file_name if char.isprintable()])
if file_name in {"", ".", ".."}:
return None
return file_name
IE_sanitize = sanitize_file_name
def _close_files(self):
# Free up all file handles.
# FIXME: this currently assumes that upload handlers store the file as 'file'
# We should document that...
# (Maybe add handler.free_file to complement new_file)
for handler in self._upload_handlers:
if hasattr(handler, "file"):
handler.file.close()
class LazyStream:
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = b""
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = self._remaining if size is None else size
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield b"".join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, "remaining bytes to read should never go negative"
try:
chunk = next(self)
except StopIteration:
return
else:
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
return b"".join(parts())
def __next__(self):
"""
Used when the exact number of bytes to read is unimportant.
Return whatever chunk is conveniently returned from the iterator.
Useful to avoid unnecessary bookkeeping if performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = b""
else:
output = next(self._producer)
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replace the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Place bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = bytes + self._leftover
def _update_unget_history(self, num_bytes):
"""
Update the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len(
[
current_number
for current_number in self._unget_history
if current_number == num_bytes
]
)
if number_equal > 40:
raise SuspiciousMultipartForm(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter:
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, yield chunks of read operations from that object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def __next__(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class InterBoundaryIter:
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def __next__(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter:
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
def __iter__(self):
return self
def __next__(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = b"".join(chunks)
boundary = self._find_boundary(chunk)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we don't treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]: # and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data):
"""
Find a multipart boundary in data.
Should no boundary exist in the data, return None. Otherwise, return
a tuple containing the indices of the following:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = data.find(self._boundary)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
last = max(0, end - 1)
if data[last : last + 1] == b"\n":
end -= 1
last = max(0, end - 1)
if data[last : last + 1] == b"\r":
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""Exhaust an iterator or stream."""
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
collections.deque(iterator, maxlen=0) # consume iterator quickly.
def parse_boundary_stream(stream, max_header_size):
"""
Parse one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find(b"\r\n\r\n")
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4 :])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split(b"\r\n"):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
main_value_pair, params = parse_header_parameters(line.decode())
name, value = main_value_pair.split(":", 1)
params = {k: v.encode() for k, v in params.items()}
except ValueError: # Invalid header.
continue
if name == "content-disposition":
TYPE = FIELD
if params.get("filename"):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser:
def __init__(self, stream, boundary):
self._stream = stream
self._separator = b"--" + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
|
4cf9bfb66292687136568df9a56ec54222ba1eac9076cbfeca55f52966c4a132 | import codecs
import copy
from io import BytesIO
from itertools import chain
from urllib.parse import parse_qsl, quote, urlencode, urljoin, urlsplit
from django.conf import settings
from django.core import signing
from django.core.exceptions import (
DisallowedHost,
ImproperlyConfigured,
RequestDataTooBig,
TooManyFieldsSent,
)
from django.core.files import uploadhandler
from django.http.multipartparser import MultiPartParser, MultiPartParserError
from django.utils.datastructures import (
CaseInsensitiveMapping,
ImmutableList,
MultiValueDict,
)
from django.utils.encoding import escape_uri_path, iri_to_uri
from django.utils.functional import cached_property
from django.utils.http import is_same_domain, parse_header_parameters
from django.utils.regex_helper import _lazy_re_compile
RAISE_ERROR = object()
host_validation_re = _lazy_re_compile(
r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9\.:]+\])(:[0-9]+)?$"
)
class UnreadablePostError(OSError):
pass
class RawPostDataException(Exception):
"""
You cannot access raw_post_data from a request that has
multipart/* POST data if it has been accessed via POST,
FILES, etc..
"""
pass
class HttpRequest:
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
non_picklable_attrs = frozenset(["resolver_match", "_stream"])
def __init__(self):
# WARNING: The `WSGIRequest` subclass doesn't call `super`.
# Any variable assignment made here should also happen in
# `WSGIRequest.__init__()`.
self.GET = QueryDict(mutable=True)
self.POST = QueryDict(mutable=True)
self.COOKIES = {}
self.META = {}
self.FILES = MultiValueDict()
self.path = ""
self.path_info = ""
self.method = None
self.resolver_match = None
self.content_type = None
self.content_params = None
def __repr__(self):
if self.method is None or not self.get_full_path():
return "<%s>" % self.__class__.__name__
return "<%s: %s %r>" % (
self.__class__.__name__,
self.method,
self.get_full_path(),
)
def __getstate__(self):
obj_dict = self.__dict__.copy()
for attr in self.non_picklable_attrs:
if attr in obj_dict:
del obj_dict[attr]
return obj_dict
def __deepcopy__(self, memo):
obj = copy.copy(self)
for attr in self.non_picklable_attrs:
if hasattr(self, attr):
setattr(obj, attr, copy.deepcopy(getattr(self, attr), memo))
memo[id(self)] = obj
return obj
@cached_property
def headers(self):
return HttpHeaders(self.META)
@cached_property
def accepted_types(self):
"""Return a list of MediaType instances."""
return parse_accept_header(self.headers.get("Accept", "*/*"))
def accepts(self, media_type):
return any(
accepted_type.match(media_type) for accepted_type in self.accepted_types
)
def _set_content_type_params(self, meta):
"""Set content_type, content_params, and encoding."""
self.content_type, self.content_params = parse_header_parameters(
meta.get("CONTENT_TYPE", "")
)
if "charset" in self.content_params:
try:
codecs.lookup(self.content_params["charset"])
except LookupError:
pass
else:
self.encoding = self.content_params["charset"]
def _get_raw_host(self):
"""
Return the HTTP host using the environment or request headers. Skip
allowed hosts protection, so may return an insecure host.
"""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and ("HTTP_X_FORWARDED_HOST" in self.META):
host = self.META["HTTP_X_FORWARDED_HOST"]
elif "HTTP_HOST" in self.META:
host = self.META["HTTP_HOST"]
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META["SERVER_NAME"]
server_port = self.get_port()
if server_port != ("443" if self.is_secure() else "80"):
host = "%s:%s" % (host, server_port)
return host
def get_host(self):
"""Return the HTTP host using the environment or request headers."""
host = self._get_raw_host()
# Allow variants of localhost if ALLOWED_HOSTS is empty and DEBUG=True.
allowed_hosts = settings.ALLOWED_HOSTS
if settings.DEBUG and not allowed_hosts:
allowed_hosts = [".localhost", "127.0.0.1", "[::1]"]
domain, port = split_domain_port(host)
if domain and validate_host(domain, allowed_hosts):
return host
else:
msg = "Invalid HTTP_HOST header: %r." % host
if domain:
msg += " You may need to add %r to ALLOWED_HOSTS." % domain
else:
msg += (
" The domain name provided is not valid according to RFC 1034/1035."
)
raise DisallowedHost(msg)
def get_port(self):
"""Return the port number for the request as a string."""
if settings.USE_X_FORWARDED_PORT and "HTTP_X_FORWARDED_PORT" in self.META:
port = self.META["HTTP_X_FORWARDED_PORT"]
else:
port = self.META["SERVER_PORT"]
return str(port)
def get_full_path(self, force_append_slash=False):
return self._get_full_path(self.path, force_append_slash)
def get_full_path_info(self, force_append_slash=False):
return self._get_full_path(self.path_info, force_append_slash)
def _get_full_path(self, path, force_append_slash):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return "%s%s%s" % (
escape_uri_path(path),
"/" if force_append_slash and not path.endswith("/") else "",
("?" + iri_to_uri(self.META.get("QUERY_STRING", "")))
if self.META.get("QUERY_STRING", "")
else "",
)
def get_signed_cookie(self, key, default=RAISE_ERROR, salt="", max_age=None):
"""
Attempt to return a signed cookie. If the signature fails or the
cookie has expired, raise an exception, unless the `default` argument
is provided, in which case return that value.
"""
try:
cookie_value = self.COOKIES[key]
except KeyError:
if default is not RAISE_ERROR:
return default
else:
raise
try:
value = signing.get_cookie_signer(salt=key + salt).unsign(
cookie_value, max_age=max_age
)
except signing.BadSignature:
if default is not RAISE_ERROR:
return default
else:
raise
return value
def build_absolute_uri(self, location=None):
"""
Build an absolute URI from the location and the variables available in
this request. If no ``location`` is specified, build the absolute URI
using request.get_full_path(). If the location is absolute, convert it
to an RFC 3987 compliant URI and return it. If location is relative or
is scheme-relative (i.e., ``//example.com/``), urljoin() it to a base
URL constructed from the request variables.
"""
if location is None:
# Make it an absolute url (but schemeless and domainless) for the
# edge case that the path starts with '//'.
location = "//%s" % self.get_full_path()
else:
# Coerce lazy locations.
location = str(location)
bits = urlsplit(location)
if not (bits.scheme and bits.netloc):
# Handle the simple, most common case. If the location is absolute
# and a scheme or host (netloc) isn't provided, skip an expensive
# urljoin() as long as no path segments are '.' or '..'.
if (
bits.path.startswith("/")
and not bits.scheme
and not bits.netloc
and "/./" not in bits.path
and "/../" not in bits.path
):
# If location starts with '//' but has no netloc, reuse the
# schema and netloc from the current request. Strip the double
# slashes and continue as if it wasn't specified.
if location.startswith("//"):
location = location[2:]
location = self._current_scheme_host + location
else:
# Join the constructed URL with the provided location, which
# allows the provided location to apply query strings to the
# base path.
location = urljoin(self._current_scheme_host + self.path, location)
return iri_to_uri(location)
@cached_property
def _current_scheme_host(self):
return "{}://{}".format(self.scheme, self.get_host())
def _get_scheme(self):
"""
Hook for subclasses like WSGIRequest to implement. Return 'http' by
default.
"""
return "http"
@property
def scheme(self):
if settings.SECURE_PROXY_SSL_HEADER:
try:
header, secure_value = settings.SECURE_PROXY_SSL_HEADER
except ValueError:
raise ImproperlyConfigured(
"The SECURE_PROXY_SSL_HEADER setting must be a tuple containing "
"two values."
)
header_value = self.META.get(header)
if header_value is not None:
header_value, *_ = header_value.split(",", 1)
return "https" if header_value.strip() == secure_value else "http"
return self._get_scheme()
def is_secure(self):
return self.scheme == "https"
@property
def encoding(self):
return self._encoding
@encoding.setter
def encoding(self, val):
"""
Set the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, remove and recreate it on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, "GET"):
del self.GET
if hasattr(self, "_post"):
del self._post
def _initialize_handlers(self):
self._upload_handlers = [
uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS
]
@property
def upload_handlers(self):
if not self._upload_handlers:
# If there are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
@upload_handlers.setter
def upload_handlers(self, upload_handlers):
if hasattr(self, "_files"):
raise AttributeError(
"You cannot set the upload handlers after the upload has been "
"processed."
)
self._upload_handlers = upload_handlers
def parse_file_upload(self, META, post_data):
"""Return a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning=(
"You cannot alter upload handlers after the upload has been "
"processed."
),
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
@property
def body(self):
if not hasattr(self, "_body"):
if self._read_started:
raise RawPostDataException(
"You cannot access body after reading from request's data stream"
)
# Limit the maximum request data size that will be handled in-memory.
if (
settings.DATA_UPLOAD_MAX_MEMORY_SIZE is not None
and int(self.META.get("CONTENT_LENGTH") or 0)
> settings.DATA_UPLOAD_MAX_MEMORY_SIZE
):
raise RequestDataTooBig(
"Request body exceeded settings.DATA_UPLOAD_MAX_MEMORY_SIZE."
)
try:
self._body = self.read()
except OSError as e:
raise UnreadablePostError(*e.args) from e
finally:
self._stream.close()
self._stream = BytesIO(self._body)
return self._body
def _mark_post_parse_error(self):
self._post = QueryDict()
self._files = MultiValueDict()
def _load_post_and_files(self):
"""Populate self._post and self._files if the content-type is a form type"""
if self.method != "POST":
self._post, self._files = (
QueryDict(encoding=self._encoding),
MultiValueDict(),
)
return
if self._read_started and not hasattr(self, "_body"):
self._mark_post_parse_error()
return
if self.content_type == "multipart/form-data":
if hasattr(self, "_body"):
# Use already read data
data = BytesIO(self._body)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except MultiPartParserError:
# An error occurred while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
self._mark_post_parse_error()
raise
elif self.content_type == "application/x-www-form-urlencoded":
self._post, self._files = (
QueryDict(self.body, encoding=self._encoding),
MultiValueDict(),
)
else:
self._post, self._files = (
QueryDict(encoding=self._encoding),
MultiValueDict(),
)
def close(self):
if hasattr(self, "_files"):
for f in chain.from_iterable(list_[1] for list_ in self._files.lists()):
f.close()
# File-like and iterator interface.
#
# Expects self._stream to be set to an appropriate source of bytes by
# a corresponding request subclass (e.g. WSGIRequest).
# Also when request data has already been read by request.POST or
# request.body, self._stream points to a BytesIO instance
# containing that data.
def read(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.read(*args, **kwargs)
except OSError as e:
raise UnreadablePostError(*e.args) from e
def readline(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.readline(*args, **kwargs)
except OSError as e:
raise UnreadablePostError(*e.args) from e
def __iter__(self):
return iter(self.readline, b"")
def readlines(self):
return list(self)
class HttpHeaders(CaseInsensitiveMapping):
HTTP_PREFIX = "HTTP_"
# PEP 333 gives two headers which aren't prepended with HTTP_.
UNPREFIXED_HEADERS = {"CONTENT_TYPE", "CONTENT_LENGTH"}
def __init__(self, environ):
headers = {}
for header, value in environ.items():
name = self.parse_header_name(header)
if name:
headers[name] = value
super().__init__(headers)
def __getitem__(self, key):
"""Allow header lookup using underscores in place of hyphens."""
return super().__getitem__(key.replace("_", "-"))
@classmethod
def parse_header_name(cls, header):
if header.startswith(cls.HTTP_PREFIX):
header = header[len(cls.HTTP_PREFIX) :]
elif header not in cls.UNPREFIXED_HEADERS:
return None
return header.replace("_", "-").title()
@classmethod
def to_wsgi_name(cls, header):
header = header.replace("-", "_").upper()
if header in cls.UNPREFIXED_HEADERS:
return header
return f"{cls.HTTP_PREFIX}{header}"
@classmethod
def to_asgi_name(cls, header):
return header.replace("-", "_").upper()
@classmethod
def to_wsgi_names(cls, headers):
return {
cls.to_wsgi_name(header_name): value
for header_name, value in headers.items()
}
@classmethod
def to_asgi_names(cls, headers):
return {
cls.to_asgi_name(header_name): value
for header_name, value in headers.items()
}
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict which represents a query string.
A QueryDict can be used to represent GET or POST data. It subclasses
MultiValueDict since keys in such data can be repeated, for instance
in the data from a form with a <select multiple> field.
By default QueryDicts are immutable, though the copy() method
will always return a mutable copy.
Both keys and values set on this class are converted from the given encoding
(DEFAULT_CHARSET by default) to str.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string=None, mutable=False, encoding=None):
super().__init__()
self.encoding = encoding or settings.DEFAULT_CHARSET
query_string = query_string or ""
parse_qsl_kwargs = {
"keep_blank_values": True,
"encoding": self.encoding,
"max_num_fields": settings.DATA_UPLOAD_MAX_NUMBER_FIELDS,
}
if isinstance(query_string, bytes):
# query_string normally contains URL-encoded data, a subset of ASCII.
try:
query_string = query_string.decode(self.encoding)
except UnicodeDecodeError:
# ... but some user agents are misbehaving :-(
query_string = query_string.decode("iso-8859-1")
try:
for key, value in parse_qsl(query_string, **parse_qsl_kwargs):
self.appendlist(key, value)
except ValueError as e:
# ValueError can also be raised if the strict_parsing argument to
# parse_qsl() is True. As that is not used by Django, assume that
# the exception was raised by exceeding the value of max_num_fields
# instead of fragile checks of exception message strings.
raise TooManyFieldsSent(
"The number of GET/POST parameters exceeded "
"settings.DATA_UPLOAD_MAX_NUMBER_FIELDS."
) from e
self._mutable = mutable
@classmethod
def fromkeys(cls, iterable, value="", mutable=False, encoding=None):
"""
Return a new QueryDict with keys (may be repeated) from an iterable and
values from value.
"""
q = cls("", mutable=True, encoding=encoding)
for key in iterable:
q.appendlist(key, value)
if not mutable:
q._mutable = False
return q
@property
def encoding(self):
if self._encoding is None:
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
@encoding.setter
def encoding(self, value):
self._encoding = value
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super().__setitem__(key, value)
def __delitem__(self, key):
self._assert_mutable()
super().__delitem__(key)
def __copy__(self):
result = self.__class__("", mutable=True, encoding=self.encoding)
for key, value in self.lists():
result.setlist(key, value)
return result
def __deepcopy__(self, memo):
result = self.__class__("", mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in self.lists():
result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
list_ = [bytes_to_text(elt, self.encoding) for elt in list_]
super().setlist(key, list_)
def setlistdefault(self, key, default_list=None):
self._assert_mutable()
return super().setlistdefault(key, default_list)
def appendlist(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super().appendlist(key, value)
def pop(self, key, *args):
self._assert_mutable()
return super().pop(key, *args)
def popitem(self):
self._assert_mutable()
return super().popitem()
def clear(self):
self._assert_mutable()
super().clear()
def setdefault(self, key, default=None):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
default = bytes_to_text(default, self.encoding)
return super().setdefault(key, default)
def copy(self):
"""Return a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self, safe=None):
"""
Return an encoded string of all query string arguments.
`safe` specifies characters which don't require quoting, for example::
>>> q = QueryDict(mutable=True)
>>> q['next'] = '/a&b/'
>>> q.urlencode()
'next=%2Fa%26b%2F'
>>> q.urlencode(safe='/')
'next=/a%26b/'
"""
output = []
if safe:
safe = safe.encode(self.encoding)
def encode(k, v):
return "%s=%s" % ((quote(k, safe), quote(v, safe)))
else:
def encode(k, v):
return urlencode({k: v})
for k, list_ in self.lists():
output.extend(
encode(k.encode(self.encoding), str(v).encode(self.encoding))
for v in list_
)
return "&".join(output)
class MediaType:
def __init__(self, media_type_raw_line):
full_type, self.params = parse_header_parameters(
media_type_raw_line if media_type_raw_line else ""
)
self.main_type, _, self.sub_type = full_type.partition("/")
def __str__(self):
params_str = "".join("; %s=%s" % (k, v) for k, v in self.params.items())
return "%s%s%s" % (
self.main_type,
("/%s" % self.sub_type) if self.sub_type else "",
params_str,
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__qualname__, self)
@property
def is_all_types(self):
return self.main_type == "*" and self.sub_type == "*"
def match(self, other):
if self.is_all_types:
return True
other = MediaType(other)
if self.main_type == other.main_type and self.sub_type in {"*", other.sub_type}:
return True
return False
# It's neither necessary nor appropriate to use
# django.utils.encoding.force_str() for parsing URLs and form inputs. Thus,
# this slightly more restricted function, used by QueryDict.
def bytes_to_text(s, encoding):
"""
Convert bytes objects to strings, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Return any non-bytes objects without change.
"""
if isinstance(s, bytes):
return str(s, encoding, "replace")
else:
return s
def split_domain_port(host):
"""
Return a (domain, port) tuple from a given host.
Returned domain is lowercased. If the host is invalid, the domain will be
empty.
"""
host = host.lower()
if not host_validation_re.match(host):
return "", ""
if host[-1] == "]":
# It's an IPv6 address without a port.
return host, ""
bits = host.rsplit(":", 1)
domain, port = bits if len(bits) == 2 else (bits[0], "")
# Remove a trailing dot (if present) from the domain.
domain = domain[:-1] if domain.endswith(".") else domain
return domain, port
def validate_host(host, allowed_hosts):
"""
Validate the given host for this site.
Check that the host looks valid and matches a host or host pattern in the
given list of ``allowed_hosts``. Any pattern beginning with a period
matches a domain and all its subdomains (e.g. ``.example.com`` matches
``example.com`` and any subdomain), ``*`` matches anything, and anything
else must match exactly.
Note: This function assumes that the given host is lowercased and has
already had the port, if any, stripped off.
Return ``True`` for a valid host, ``False`` otherwise.
"""
return any(
pattern == "*" or is_same_domain(host, pattern) for pattern in allowed_hosts
)
def parse_accept_header(header):
return [MediaType(token) for token in header.split(",") if token.strip()]
|
8d1d86618592f74778e0b563e8f2f861e465adb751c8423fc31296c1daf6711c | """Translation helper functions."""
import functools
import gettext as gettext_module
import os
import re
import sys
import warnings
from asgiref.local import Local
from django.apps import apps
from django.conf import settings
from django.conf.locale import LANG_INFO
from django.core.exceptions import AppRegistryNotReady
from django.core.signals import setting_changed
from django.dispatch import receiver
from django.utils.regex_helper import _lazy_re_compile
from django.utils.safestring import SafeData, mark_safe
from . import to_language, to_locale
# Translations are cached in a dictionary for every language.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = Local()
# The default translation is based on the settings file.
_default = None
# magic gettext number to separate context from message
CONTEXT_SEPARATOR = "\x04"
# Format of Accept-Language header values. From RFC 9110 Sections 12.4.2 and
# 12.5.4, and RFC 5646 Section 2.1.
accept_language_re = _lazy_re_compile(
r"""
# "en", "en-au", "x-y-z", "es-419", "*"
([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*)
# Optional "q=1.00", "q=0.8"
(?:\s*;\s*q=(0(?:\.[0-9]{,3})?|1(?:\.0{,3})?))?
# Multiple accepts per header.
(?:\s*,\s*|$)
""",
re.VERBOSE,
)
language_code_re = _lazy_re_compile(
r"^[a-z]{1,8}(?:-[a-z0-9]{1,8})*(?:@[a-z0-9]{1,20})?$", re.IGNORECASE
)
language_code_prefix_re = _lazy_re_compile(r"^/(\w+([@-]\w+){0,2})(/|$)")
@receiver(setting_changed)
def reset_cache(*, setting, **kwargs):
"""
Reset global state when LANGUAGES setting has been changed, as some
languages should no longer be accepted.
"""
if setting in ("LANGUAGES", "LANGUAGE_CODE"):
check_for_language.cache_clear()
get_languages.cache_clear()
get_supported_language_variant.cache_clear()
class TranslationCatalog:
"""
Simulate a dict for DjangoTranslation._catalog so as multiple catalogs
with different plural equations are kept separate.
"""
def __init__(self, trans=None):
self._catalogs = [trans._catalog.copy()] if trans else [{}]
self._plurals = [trans.plural] if trans else [lambda n: int(n != 1)]
def __getitem__(self, key):
for cat in self._catalogs:
try:
return cat[key]
except KeyError:
pass
raise KeyError(key)
def __setitem__(self, key, value):
self._catalogs[0][key] = value
def __contains__(self, key):
return any(key in cat for cat in self._catalogs)
def items(self):
for cat in self._catalogs:
yield from cat.items()
def keys(self):
for cat in self._catalogs:
yield from cat.keys()
def update(self, trans):
# Merge if plural function is the same, else prepend.
for cat, plural in zip(self._catalogs, self._plurals):
if trans.plural.__code__ == plural.__code__:
cat.update(trans._catalog)
break
else:
self._catalogs.insert(0, trans._catalog.copy())
self._plurals.insert(0, trans.plural)
def get(self, key, default=None):
missing = object()
for cat in self._catalogs:
result = cat.get(key, missing)
if result is not missing:
return result
return default
def plural(self, msgid, num):
for cat, plural in zip(self._catalogs, self._plurals):
tmsg = cat.get((msgid, plural(num)))
if tmsg is not None:
return tmsg
raise KeyError
class DjangoTranslation(gettext_module.GNUTranslations):
"""
Set up the GNUTranslations context with regard to output charset.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct an object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
domain = "django"
def __init__(self, language, domain=None, localedirs=None):
"""Create a GNUTranslations() using many locale directories"""
gettext_module.GNUTranslations.__init__(self)
if domain is not None:
self.domain = domain
self.__language = language
self.__to_language = to_language(language)
self.__locale = to_locale(language)
self._catalog = None
# If a language doesn't have a catalog, use the Germanic default for
# pluralization: anything except one is pluralized.
self.plural = lambda n: int(n != 1)
if self.domain == "django":
if localedirs is not None:
# A module-level cache is used for caching 'django' translations
warnings.warn(
"localedirs is ignored when domain is 'django'.", RuntimeWarning
)
localedirs = None
self._init_translation_catalog()
if localedirs:
for localedir in localedirs:
translation = self._new_gnu_trans(localedir)
self.merge(translation)
else:
self._add_installed_apps_translations()
self._add_local_translations()
if (
self.__language == settings.LANGUAGE_CODE
and self.domain == "django"
and self._catalog is None
):
# default lang should have at least one translation file available.
raise OSError(
"No translation files found for default language %s."
% settings.LANGUAGE_CODE
)
self._add_fallback(localedirs)
if self._catalog is None:
# No catalogs found for this language, set an empty catalog.
self._catalog = TranslationCatalog()
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
def _new_gnu_trans(self, localedir, use_null_fallback=True):
"""
Return a mergeable gettext.GNUTranslations instance.
A convenience wrapper. By default gettext uses 'fallback=False'.
Using param `use_null_fallback` to avoid confusion with any other
references to 'fallback'.
"""
return gettext_module.translation(
domain=self.domain,
localedir=localedir,
languages=[self.__locale],
fallback=use_null_fallback,
)
def _init_translation_catalog(self):
"""Create a base catalog using global django translations."""
settingsfile = sys.modules[settings.__module__].__file__
localedir = os.path.join(os.path.dirname(settingsfile), "locale")
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_installed_apps_translations(self):
"""Merge translations from each installed app."""
try:
app_configs = reversed(apps.get_app_configs())
except AppRegistryNotReady:
raise AppRegistryNotReady(
"The translation infrastructure cannot be initialized before the "
"apps registry is ready. Check that you don't make non-lazy "
"gettext calls at import time."
)
for app_config in app_configs:
localedir = os.path.join(app_config.path, "locale")
if os.path.exists(localedir):
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_local_translations(self):
"""Merge translations defined in LOCALE_PATHS."""
for localedir in reversed(settings.LOCALE_PATHS):
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_fallback(self, localedirs=None):
"""Set the GNUTranslations() fallback with the default language."""
# Don't set a fallback for the default language or any English variant
# (as it's empty, so it'll ALWAYS fall back to the default language)
if self.__language == settings.LANGUAGE_CODE or self.__language.startswith(
"en"
):
return
if self.domain == "django":
# Get from cache
default_translation = translation(settings.LANGUAGE_CODE)
else:
default_translation = DjangoTranslation(
settings.LANGUAGE_CODE, domain=self.domain, localedirs=localedirs
)
self.add_fallback(default_translation)
def merge(self, other):
"""Merge another translation into this catalog."""
if not getattr(other, "_catalog", None):
return # NullTranslations() has no _catalog
if self._catalog is None:
# Take plural and _info from first catalog found (generally Django's).
self.plural = other.plural
self._info = other._info.copy()
self._catalog = TranslationCatalog(other)
else:
self._catalog.update(other)
if other._fallback:
self.add_fallback(other._fallback)
def language(self):
"""Return the translation language."""
return self.__language
def to_language(self):
"""Return the translation language name."""
return self.__to_language
def ngettext(self, msgid1, msgid2, n):
try:
tmsg = self._catalog.plural(msgid1, n)
except KeyError:
if self._fallback:
return self._fallback.ngettext(msgid1, msgid2, n)
if n == 1:
tmsg = msgid1
else:
tmsg = msgid2
return tmsg
def translation(language):
"""
Return a translation object in the default 'django' domain.
"""
global _translations
if language not in _translations:
_translations[language] = DjangoTranslation(language)
return _translations[language]
def activate(language):
"""
Fetch the translation object for a given language and install it as the
current translation object for the current thread.
"""
if not language:
return
_active.value = translation(language)
def deactivate():
"""
Uninstall the active translation object so that further _() calls resolve
to the default translation object.
"""
if hasattr(_active, "value"):
del _active.value
def deactivate_all():
"""
Make the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active.value = gettext_module.NullTranslations()
_active.value.to_language = lambda *args: None
def get_language():
"""Return the currently selected language."""
t = getattr(_active, "value", None)
if t is not None:
try:
return t.to_language()
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Return selected language's BiDi layout.
* False = left-to-right layout
* True = right-to-left layout
"""
lang = get_language()
if lang is None:
return False
else:
base_lang = get_language().split("-")[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Return the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default
t = getattr(_active, "value", None)
if t is not None:
return t
if _default is None:
_default = translation(settings.LANGUAGE_CODE)
return _default
def gettext(message):
"""
Translate the 'message' string. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
eol_message = message.replace("\r\n", "\n").replace("\r", "\n")
if eol_message:
_default = _default or translation(settings.LANGUAGE_CODE)
translation_object = getattr(_active, "value", _default)
result = translation_object.gettext(eol_message)
else:
# Return an empty value of the corresponding type if an empty message
# is given, instead of metadata, which is the default gettext behavior.
result = type(message)("")
if isinstance(message, SafeData):
return mark_safe(result)
return result
def pgettext(context, message):
msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message)
result = gettext(msg_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = message
elif isinstance(message, SafeData):
result = mark_safe(result)
return result
def gettext_noop(message):
"""
Mark strings for translation but don't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default
t = getattr(_active, "value", None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Return a string of the translation of either the singular or plural,
based on the number.
"""
return do_ntranslate(singular, plural, number, "ngettext")
def npgettext(context, singular, plural, number):
msgs_with_ctxt = (
"%s%s%s" % (context, CONTEXT_SEPARATOR, singular),
"%s%s%s" % (context, CONTEXT_SEPARATOR, plural),
number,
)
result = ngettext(*msgs_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = ngettext(singular, plural, number)
return result
def all_locale_paths():
"""
Return a list of paths to user-provides languages files.
"""
globalpath = os.path.join(
os.path.dirname(sys.modules[settings.__module__].__file__), "locale"
)
app_paths = []
for app_config in apps.get_app_configs():
locale_path = os.path.join(app_config.path, "locale")
if os.path.exists(locale_path):
app_paths.append(locale_path)
return [globalpath, *settings.LOCALE_PATHS, *app_paths]
@functools.lru_cache(maxsize=1000)
def check_for_language(lang_code):
"""
Check whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
# First, a quick check to make sure lang_code is well-formed (#21458)
if lang_code is None or not language_code_re.search(lang_code):
return False
return any(
gettext_module.find("django", path, [to_locale(lang_code)]) is not None
for path in all_locale_paths()
)
@functools.lru_cache
def get_languages():
"""
Cache of settings.LANGUAGES in a dictionary for easy lookups by key.
Convert keys to lowercase as they should be treated as case-insensitive.
"""
return {key.lower(): value for key, value in dict(settings.LANGUAGES).items()}
@functools.lru_cache(maxsize=1000)
def get_supported_language_variant(lang_code, strict=False):
"""
Return the language code that's listed in supported languages, possibly
selecting a more generic variant. Raise LookupError if nothing is found.
If `strict` is False (the default), look for a country-specific variant
when neither the language code nor its generic variant is found.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
if lang_code:
# If 'zh-hant-tw' is not supported, try special fallback or subsequent
# language codes i.e. 'zh-hant' and 'zh'.
possible_lang_codes = [lang_code]
try:
possible_lang_codes.extend(LANG_INFO[lang_code]["fallback"])
except KeyError:
pass
i = None
while (i := lang_code.rfind("-", 0, i)) > -1:
possible_lang_codes.append(lang_code[:i])
generic_lang_code = possible_lang_codes[-1]
supported_lang_codes = get_languages()
for code in possible_lang_codes:
if code.lower() in supported_lang_codes and check_for_language(code):
return code
if not strict:
# if fr-fr is not supported, try fr-ca.
for supported_code in supported_lang_codes:
if supported_code.startswith(generic_lang_code + "-"):
return supported_code
raise LookupError(lang_code)
def get_language_from_path(path, strict=False):
"""
Return the language code if there's a valid language code found in `path`.
If `strict` is False (the default), look for a country-specific variant
when neither the language code nor its generic variant is found.
"""
regex_match = language_code_prefix_re.match(path)
if not regex_match:
return None
lang_code = regex_match[1]
try:
return get_supported_language_variant(lang_code, strict=strict)
except LookupError:
return None
def get_language_from_request(request, check_path=False):
"""
Analyze the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
If check_path is True, the URL path prefix will be checked for a language
code, otherwise this is skipped for backwards compatibility.
"""
if check_path:
lang_code = get_language_from_path(request.path_info)
if lang_code is not None:
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
if (
lang_code is not None
and lang_code in get_languages()
and check_for_language(lang_code)
):
return lang_code
try:
return get_supported_language_variant(lang_code)
except LookupError:
pass
accept = request.META.get("HTTP_ACCEPT_LANGUAGE", "")
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == "*":
break
if not language_code_re.search(accept_lang):
continue
try:
return get_supported_language_variant(accept_lang)
except LookupError:
continue
return None
@functools.lru_cache(maxsize=1000)
def parse_accept_lang_header(lang_string):
"""
Parse the lang_string, which is the body of an HTTP Accept-Language
header, and return a tuple of (lang, q-value), ordered by 'q' values.
Return an empty tuple if there are any format errors in lang_string.
"""
result = []
pieces = accept_language_re.split(lang_string.lower())
if pieces[-1]:
return ()
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i : i + 3]
if first:
return ()
if priority:
priority = float(priority)
else:
priority = 1.0
result.append((lang, priority))
result.sort(key=lambda k: k[1], reverse=True)
return tuple(result)
|
d16cad071e5a2bc9d8e619058f024bf5a63bf1d09ff8e376f9e191bb5332c7ac | """
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import operator
import warnings
from itertools import chain, islice
from asgiref.sync import sync_to_async
import django
from django.conf import settings
from django.core import exceptions
from django.db import (
DJANGO_VERSION_PICKLE_KEY,
IntegrityError,
NotSupportedError,
connections,
router,
transaction,
)
from django.db.models import AutoField, DateField, DateTimeField, Field, sql
from django.db.models.constants import LOOKUP_SEP, OnConflict
from django.db.models.deletion import Collector
from django.db.models.expressions import Case, F, Ref, Value, When
from django.db.models.functions import Cast, Trunc
from django.db.models.query_utils import FilteredRelation, Q
from django.db.models.sql.constants import CURSOR, GET_ITERATOR_CHUNK_SIZE
from django.db.models.utils import (
AltersData,
create_namedtuple_class,
resolve_callables,
)
from django.utils import timezone
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.functional import cached_property, partition
# The maximum number of results to fetch in a get() query.
MAX_GET_RESULTS = 21
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
class BaseIterable:
def __init__(
self, queryset, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE
):
self.queryset = queryset
self.chunked_fetch = chunked_fetch
self.chunk_size = chunk_size
async def _async_generator(self):
# Generators don't actually start running until the first time you call
# next() on them, so make the generator object in the async thread and
# then repeatedly dispatch to it in a sync thread.
sync_generator = self.__iter__()
def next_slice(gen):
return list(islice(gen, self.chunk_size))
while True:
chunk = await sync_to_async(next_slice)(sync_generator)
for item in chunk:
yield item
if len(chunk) < self.chunk_size:
break
# __aiter__() is a *synchronous* method that has to then return an
# *asynchronous* iterator/generator. Thus, nest an async generator inside
# it.
# This is a generic iterable converter for now, and is going to suffer a
# performance penalty on large sets of items due to the cost of crossing
# over the sync barrier for each chunk. Custom __aiter__() methods should
# be added to each Iterable subclass, but that needs some work in the
# Compiler first.
def __aiter__(self):
return self._async_generator()
class ModelIterable(BaseIterable):
"""Iterable that yields a model instance for each row."""
def __iter__(self):
queryset = self.queryset
db = queryset.db
compiler = queryset.query.get_compiler(using=db)
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql(
chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
)
select, klass_info, annotation_col_map = (
compiler.select,
compiler.klass_info,
compiler.annotation_col_map,
)
model_cls = klass_info["model"]
select_fields = klass_info["select_fields"]
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [
f[0].target.attname for f in select[model_fields_start:model_fields_end]
]
related_populators = get_related_populators(klass_info, select, db)
known_related_objects = [
(
field,
related_objs,
operator.attrgetter(
*[
field.attname
if from_field == "self"
else queryset.model._meta.get_field(from_field).attname
for from_field in field.from_fields
]
),
)
for field, related_objs in queryset._known_related_objects.items()
]
for row in compiler.results_iter(results):
obj = model_cls.from_db(
db, init_list, row[model_fields_start:model_fields_end]
)
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model.
for field, rel_objs, rel_getter in known_related_objects:
# Avoid overwriting objects loaded by, e.g., select_related().
if field.is_cached(obj):
continue
rel_obj_id = rel_getter(obj)
try:
rel_obj = rel_objs[rel_obj_id]
except KeyError:
pass # May happen in qs1 | qs2 scenarios.
else:
setattr(obj, field.name, rel_obj)
yield obj
class RawModelIterable(BaseIterable):
"""
Iterable that yields a model instance for each row from a raw queryset.
"""
def __iter__(self):
# Cache some things for performance reasons outside the loop.
db = self.queryset.db
query = self.queryset.query
connection = connections[db]
compiler = connection.ops.compiler("SQLCompiler")(query, connection, db)
query_iterator = iter(query)
try:
(
model_init_names,
model_init_pos,
annotation_fields,
) = self.queryset.resolve_model_init_order()
model_cls = self.queryset.model
if model_cls._meta.pk.attname not in model_init_names:
raise exceptions.FieldDoesNotExist(
"Raw query must include the primary key"
)
fields = [self.queryset.model_fields.get(c) for c in self.queryset.columns]
converters = compiler.get_converters(
[f.get_col(f.model._meta.db_table) if f else None for f in fields]
)
if converters:
query_iterator = compiler.apply_converters(query_iterator, converters)
for values in query_iterator:
# Associate fields to values
model_init_values = [values[pos] for pos in model_init_pos]
instance = model_cls.from_db(db, model_init_names, model_init_values)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(query, "cursor") and query.cursor:
query.cursor.close()
class ValuesIterable(BaseIterable):
"""
Iterable returned by QuerySet.values() that yields a dict for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
# extra(select=...) cols are always at the start of the row.
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
indexes = range(len(names))
for row in compiler.results_iter(
chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
):
yield {names[i]: row[i] for i in indexes}
class ValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=False) that yields a tuple
for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
if queryset._fields:
# extra(select=...) cols are always at the start of the row.
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
fields = [
*queryset._fields,
*(f for f in query.annotation_select if f not in queryset._fields),
]
if fields != names:
# Reorder according to fields.
index_map = {name: idx for idx, name in enumerate(names)}
rowfactory = operator.itemgetter(*[index_map[f] for f in fields])
return map(
rowfactory,
compiler.results_iter(
chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
),
)
return compiler.results_iter(
tuple_expected=True,
chunked_fetch=self.chunked_fetch,
chunk_size=self.chunk_size,
)
class NamedValuesListIterable(ValuesListIterable):
"""
Iterable returned by QuerySet.values_list(named=True) that yields a
namedtuple for each row.
"""
def __iter__(self):
queryset = self.queryset
if queryset._fields:
names = queryset._fields
else:
query = queryset.query
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
tuple_class = create_namedtuple_class(*names)
new = tuple.__new__
for row in super().__iter__():
yield new(tuple_class, row)
class FlatValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=True) that yields single
values.
"""
def __iter__(self):
queryset = self.queryset
compiler = queryset.query.get_compiler(queryset.db)
for row in compiler.results_iter(
chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
):
yield row[0]
class QuerySet(AltersData):
"""Represent a lazy database lookup for a set of objects."""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self._query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = ()
self._prefetch_done = False
self._known_related_objects = {} # {rel_field: {pk: rel_obj}}
self._iterable_class = ModelIterable
self._fields = None
self._defer_next_filter = False
self._deferred_filter = None
@property
def query(self):
if self._deferred_filter:
negate, args, kwargs = self._deferred_filter
self._filter_or_exclude_inplace(negate, args, kwargs)
self._deferred_filter = None
return self._query
@query.setter
def query(self, value):
if value.values_select:
self._iterable_class = ValuesIterable
self._query = value
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
manager = Manager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""Don't populate the QuerySet's cache."""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == "_result_cache":
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
# Force the cache to be fully populated.
self._fetch_all()
return {**self.__dict__, DJANGO_VERSION_PICKLE_KEY: django.__version__}
def __setstate__(self, state):
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
if pickled_version != django.__version__:
warnings.warn(
"Pickled queryset instance's Django version %s does not "
"match the current version %s."
% (pickled_version, django.__version__),
RuntimeWarning,
stacklevel=2,
)
else:
warnings.warn(
"Pickled queryset instance's Django version is not specified.",
RuntimeWarning,
stacklevel=2,
)
self.__dict__.update(state)
def __repr__(self):
data = list(self[: REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return "<%s %r>" % (self.__class__.__name__, data)
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler.execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql.compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __aiter__(self):
# Remember, __aiter__ itself is synchronous, it's the thing it returns
# that is async!
async def generator():
await sync_to_async(self._fetch_all)()
for item in self._result_cache:
yield item
return generator()
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __getitem__(self, k):
"""Retrieve an item or slice from the set of results."""
if not isinstance(k, (int, slice)):
raise TypeError(
"QuerySet indices must be integers or slices, not %s."
% type(k).__name__
)
if (isinstance(k, int) and k < 0) or (
isinstance(k, slice)
and (
(k.start is not None and k.start < 0)
or (k.stop is not None and k.stop < 0)
)
):
raise ValueError("Negative indexing is not supported.")
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._chain()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[:: k.step] if k.step else qs
qs = self._chain()
qs.query.set_limits(k, k + 1)
qs._fetch_all()
return qs._result_cache[0]
def __class_getitem__(cls, *args, **kwargs):
return cls
def __and__(self, other):
self._check_operator_queryset(other, "&")
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._chain()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._check_operator_queryset(other, "|")
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
query = (
self
if self.query.can_filter()
else self.model._base_manager.filter(pk__in=self.values("pk"))
)
combined = query._chain()
combined._merge_known_related_objects(other)
if not other.query.can_filter():
other = other.model._base_manager.filter(pk__in=other.values("pk"))
combined.query.combine(other.query, sql.OR)
return combined
def __xor__(self, other):
self._check_operator_queryset(other, "^")
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
query = (
self
if self.query.can_filter()
else self.model._base_manager.filter(pk__in=self.values("pk"))
)
combined = query._chain()
combined._merge_known_related_objects(other)
if not other.query.can_filter():
other = other.model._base_manager.filter(pk__in=other.values("pk"))
combined.query.combine(other.query, sql.XOR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def _iterator(self, use_chunked_fetch, chunk_size):
iterable = self._iterable_class(
self,
chunked_fetch=use_chunked_fetch,
chunk_size=chunk_size or 2000,
)
if not self._prefetch_related_lookups or chunk_size is None:
yield from iterable
return
iterator = iter(iterable)
while results := list(islice(iterator, chunk_size)):
prefetch_related_objects(results, *self._prefetch_related_lookups)
yield from results
def iterator(self, chunk_size=None):
"""
An iterator over the results from applying this QuerySet to the
database. chunk_size must be provided for QuerySets that prefetch
related objects. Otherwise, a default chunk_size of 2000 is supplied.
"""
if chunk_size is None:
if self._prefetch_related_lookups:
# When the deprecation ends, replace with:
# raise ValueError(
# 'chunk_size must be provided when using '
# 'QuerySet.iterator() after prefetch_related().'
# )
warnings.warn(
"Using QuerySet.iterator() after prefetch_related() "
"without specifying chunk_size is deprecated.",
category=RemovedInDjango50Warning,
stacklevel=2,
)
elif chunk_size <= 0:
raise ValueError("Chunk size must be strictly positive.")
use_chunked_fetch = not connections[self.db].settings_dict.get(
"DISABLE_SERVER_SIDE_CURSORS"
)
return self._iterator(use_chunked_fetch, chunk_size)
async def aiterator(self, chunk_size=2000):
"""
An asynchronous iterator over the results from applying this QuerySet
to the database.
"""
if self._prefetch_related_lookups:
raise NotSupportedError(
"Using QuerySet.aiterator() after prefetch_related() is not supported."
)
if chunk_size <= 0:
raise ValueError("Chunk size must be strictly positive.")
use_chunked_fetch = not connections[self.db].settings_dict.get(
"DISABLE_SERVER_SIDE_CURSORS"
)
async for item in self._iterable_class(
self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size
):
yield item
def aggregate(self, *args, **kwargs):
"""
Return a dictionary containing the calculations (aggregation)
over the current queryset.
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
self._validate_values_are_expressions(
(*args, *kwargs.values()), method_name="aggregate"
)
for arg in args:
# The default_alias property raises TypeError if default_alias
# can't be set automatically or AttributeError if it isn't an
# attribute.
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
query = self.query.chain()
for (alias, aggregate_expr) in kwargs.items():
query.add_annotation(aggregate_expr, alias, is_summary=True)
annotation = query.annotations[alias]
if not annotation.contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
for expr in annotation.get_source_expressions():
if (
expr.contains_aggregate
and isinstance(expr, Ref)
and expr.refs in kwargs
):
name = expr.refs
raise exceptions.FieldError(
"Cannot compute %s('%s'): '%s' is an aggregate"
% (annotation.name, name, name)
)
return query.get_aggregation(self.db, kwargs)
async def aaggregate(self, *args, **kwargs):
return await sync_to_async(self.aggregate)(*args, **kwargs)
def count(self):
"""
Perform a SELECT COUNT() and return the number of records as an
integer.
If the QuerySet is already fully cached, return the length of the
cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
async def acount(self):
return await sync_to_async(self.count)()
def get(self, *args, **kwargs):
"""
Perform the query and return a single object matching the given
keyword arguments.
"""
if self.query.combinator and (args or kwargs):
raise NotSupportedError(
"Calling QuerySet.get(...) with filters after %s() is not "
"supported." % self.query.combinator
)
clone = self._chain() if self.query.combinator else self.filter(*args, **kwargs)
if self.query.can_filter() and not self.query.distinct_fields:
clone = clone.order_by()
limit = None
if (
not clone.query.select_for_update
or connections[clone.db].features.supports_select_for_update_with_limit
):
limit = MAX_GET_RESULTS
clone.query.set_limits(high=limit)
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." % self.model._meta.object_name
)
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s!"
% (
self.model._meta.object_name,
num if not limit or num < limit else "more than %s" % (limit - 1),
)
)
async def aget(self, *args, **kwargs):
return await sync_to_async(self.get)(*args, **kwargs)
def create(self, **kwargs):
"""
Create a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
async def acreate(self, **kwargs):
return await sync_to_async(self.create)(**kwargs)
def _prepare_for_bulk_create(self, objs):
for obj in objs:
if obj.pk is None:
# Populate new PK values.
obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
obj._prepare_related_fields_for_save(operation_name="bulk_create")
def _check_bulk_create_options(
self, ignore_conflicts, update_conflicts, update_fields, unique_fields
):
if ignore_conflicts and update_conflicts:
raise ValueError(
"ignore_conflicts and update_conflicts are mutually exclusive."
)
db_features = connections[self.db].features
if ignore_conflicts:
if not db_features.supports_ignore_conflicts:
raise NotSupportedError(
"This database backend does not support ignoring conflicts."
)
return OnConflict.IGNORE
elif update_conflicts:
if not db_features.supports_update_conflicts:
raise NotSupportedError(
"This database backend does not support updating conflicts."
)
if not update_fields:
raise ValueError(
"Fields that will be updated when a row insertion fails "
"on conflicts must be provided."
)
if unique_fields and not db_features.supports_update_conflicts_with_target:
raise NotSupportedError(
"This database backend does not support updating "
"conflicts with specifying unique fields that can trigger "
"the upsert."
)
if not unique_fields and db_features.supports_update_conflicts_with_target:
raise ValueError(
"Unique fields that can trigger the upsert must be provided."
)
# Updating primary keys and non-concrete fields is forbidden.
update_fields = [self.model._meta.get_field(name) for name in update_fields]
if any(not f.concrete or f.many_to_many for f in update_fields):
raise ValueError(
"bulk_create() can only be used with concrete fields in "
"update_fields."
)
if any(f.primary_key for f in update_fields):
raise ValueError(
"bulk_create() cannot be used with primary keys in "
"update_fields."
)
if unique_fields:
unique_fields = [
self.model._meta.get_field(name) for name in unique_fields
]
if any(not f.concrete or f.many_to_many for f in unique_fields):
raise ValueError(
"bulk_create() can only be used with concrete fields "
"in unique_fields."
)
return OnConflict.UPDATE
return None
def bulk_create(
self,
objs,
batch_size=None,
ignore_conflicts=False,
update_conflicts=False,
update_fields=None,
unique_fields=None,
):
"""
Insert each of the instances into the database. Do *not* call
save() on each of the instances, do not send any pre/post_save
signals, and do not set the primary key attribute if it is an
autoincrement field (except if features.can_return_rows_from_bulk_insert=True).
Multi-table models are not supported.
"""
# When you bulk insert you don't get the primary keys back (if it's an
# autoincrement, except if can_return_rows_from_bulk_insert=True), so
# you can't insert into the child tables which references this. There
# are two workarounds:
# 1) This could be implemented if you didn't have an autoincrement pk
# 2) You could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back and then doing a single bulk
# insert into the childmost table.
# We currently set the primary keys on the objects when using
# PostgreSQL via the RETURNING ID clause. It should be possible for
# Oracle as well, but the semantics for extracting the primary keys is
# trickier so it's not done yet.
if batch_size is not None and batch_size <= 0:
raise ValueError("Batch size must be a positive integer.")
# Check that the parents share the same concrete model with the our
# model to detect the inheritance pattern ConcreteGrandParent ->
# MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy
# would not identify that case as involving multiple tables.
for parent in self.model._meta.get_parent_list():
if parent._meta.concrete_model is not self.model._meta.concrete_model:
raise ValueError("Can't bulk create a multi-table inherited model")
if not objs:
return objs
opts = self.model._meta
if unique_fields:
# Primary key is allowed in unique_fields.
unique_fields = [
opts.pk.name if name == "pk" else name for name in unique_fields
]
on_conflict = self._check_bulk_create_options(
ignore_conflicts,
update_conflicts,
update_fields,
unique_fields,
)
self._for_write = True
fields = opts.concrete_fields
objs = list(objs)
self._prepare_for_bulk_create(objs)
with transaction.atomic(using=self.db, savepoint=False):
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
returned_columns = self._batched_insert(
objs_with_pk,
fields,
batch_size,
on_conflict=on_conflict,
update_fields=update_fields,
unique_fields=unique_fields,
)
for obj_with_pk, results in zip(objs_with_pk, returned_columns):
for result, field in zip(results, opts.db_returning_fields):
if field != opts.pk:
setattr(obj_with_pk, field.attname, result)
for obj_with_pk in objs_with_pk:
obj_with_pk._state.adding = False
obj_with_pk._state.db = self.db
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
returned_columns = self._batched_insert(
objs_without_pk,
fields,
batch_size,
on_conflict=on_conflict,
update_fields=update_fields,
unique_fields=unique_fields,
)
connection = connections[self.db]
if (
connection.features.can_return_rows_from_bulk_insert
and on_conflict is None
):
assert len(returned_columns) == len(objs_without_pk)
for obj_without_pk, results in zip(objs_without_pk, returned_columns):
for result, field in zip(results, opts.db_returning_fields):
setattr(obj_without_pk, field.attname, result)
obj_without_pk._state.adding = False
obj_without_pk._state.db = self.db
return objs
async def abulk_create(
self,
objs,
batch_size=None,
ignore_conflicts=False,
update_conflicts=False,
update_fields=None,
unique_fields=None,
):
return await sync_to_async(self.bulk_create)(
objs=objs,
batch_size=batch_size,
ignore_conflicts=ignore_conflicts,
update_conflicts=update_conflicts,
update_fields=update_fields,
unique_fields=unique_fields,
)
def bulk_update(self, objs, fields, batch_size=None):
"""
Update the given fields in each of the given objects in the database.
"""
if batch_size is not None and batch_size <= 0:
raise ValueError("Batch size must be a positive integer.")
if not fields:
raise ValueError("Field names must be given to bulk_update().")
objs = tuple(objs)
if any(obj.pk is None for obj in objs):
raise ValueError("All bulk_update() objects must have a primary key set.")
fields = [self.model._meta.get_field(name) for name in fields]
if any(not f.concrete or f.many_to_many for f in fields):
raise ValueError("bulk_update() can only be used with concrete fields.")
if any(f.primary_key for f in fields):
raise ValueError("bulk_update() cannot be used with primary key fields.")
if not objs:
return 0
for obj in objs:
obj._prepare_related_fields_for_save(
operation_name="bulk_update", fields=fields
)
# PK is used twice in the resulting update query, once in the filter
# and once in the WHEN. Each field will also have one CAST.
self._for_write = True
connection = connections[self.db]
max_batch_size = connection.ops.bulk_batch_size(["pk", "pk"] + fields, objs)
batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size
requires_casting = connection.features.requires_casted_case_in_updates
batches = (objs[i : i + batch_size] for i in range(0, len(objs), batch_size))
updates = []
for batch_objs in batches:
update_kwargs = {}
for field in fields:
when_statements = []
for obj in batch_objs:
attr = getattr(obj, field.attname)
if not hasattr(attr, "resolve_expression"):
attr = Value(attr, output_field=field)
when_statements.append(When(pk=obj.pk, then=attr))
case_statement = Case(*when_statements, output_field=field)
if requires_casting:
case_statement = Cast(case_statement, output_field=field)
update_kwargs[field.attname] = case_statement
updates.append(([obj.pk for obj in batch_objs], update_kwargs))
rows_updated = 0
queryset = self.using(self.db)
with transaction.atomic(using=self.db, savepoint=False):
for pks, update_kwargs in updates:
rows_updated += queryset.filter(pk__in=pks).update(**update_kwargs)
return rows_updated
bulk_update.alters_data = True
async def abulk_update(self, objs, fields, batch_size=None):
return await sync_to_async(self.bulk_update)(
objs=objs,
fields=fields,
batch_size=batch_size,
)
abulk_update.alters_data = True
def get_or_create(self, defaults=None, **kwargs):
"""
Look up an object with the given kwargs, creating one if necessary.
Return a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
# The get() needs to be targeted at the write database in order
# to avoid potential transaction consistency problems.
self._for_write = True
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
params = self._extract_model_params(defaults, **kwargs)
# Try to create an object using passed params.
try:
with transaction.atomic(using=self.db):
params = dict(resolve_callables(params))
return self.create(**params), True
except IntegrityError:
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
pass
raise
async def aget_or_create(self, defaults=None, **kwargs):
return await sync_to_async(self.get_or_create)(
defaults=defaults,
**kwargs,
)
def update_or_create(self, defaults=None, **kwargs):
"""
Look up an object with the given kwargs, updating one with defaults
if it exists, otherwise create a new one.
Return a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
self._for_write = True
with transaction.atomic(using=self.db):
# Lock the row so that a concurrent update is blocked until
# update_or_create() has performed its save.
obj, created = self.select_for_update().get_or_create(defaults, **kwargs)
if created:
return obj, created
for k, v in resolve_callables(defaults):
setattr(obj, k, v)
update_fields = set(defaults)
concrete_field_names = self.model._meta._non_pk_concrete_field_names
# update_fields does not support non-concrete fields.
if concrete_field_names.issuperset(update_fields):
# Add fields which are set on pre_save(), e.g. auto_now fields.
# This is to maintain backward compatibility as these fields
# are not updated unless explicitly specified in the
# update_fields list.
for field in self.model._meta.local_concrete_fields:
if not (
field.primary_key or field.__class__.pre_save is Field.pre_save
):
update_fields.add(field.name)
if field.name != field.attname:
update_fields.add(field.attname)
obj.save(using=self.db, update_fields=update_fields)
else:
obj.save(using=self.db)
return obj, False
async def aupdate_or_create(self, defaults=None, **kwargs):
return await sync_to_async(self.update_or_create)(
defaults=defaults,
**kwargs,
)
def _extract_model_params(self, defaults, **kwargs):
"""
Prepare `params` for creating a model instance based on the given
kwargs; for use by get_or_create().
"""
defaults = defaults or {}
params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
params.update(defaults)
property_names = self.model._meta._property_names
invalid_params = []
for param in params:
try:
self.model._meta.get_field(param)
except exceptions.FieldDoesNotExist:
# It's okay to use a model's property if it has a setter.
if not (param in property_names and getattr(self.model, param).fset):
invalid_params.append(param)
if invalid_params:
raise exceptions.FieldError(
"Invalid field name(s) for model %s: '%s'."
% (
self.model._meta.object_name,
"', '".join(sorted(invalid_params)),
)
)
return params
def _earliest(self, *fields):
"""
Return the earliest object according to fields (if given) or by the
model's Meta.get_latest_by.
"""
if fields:
order_by = fields
else:
order_by = getattr(self.model._meta, "get_latest_by")
if order_by and not isinstance(order_by, (tuple, list)):
order_by = (order_by,)
if order_by is None:
raise ValueError(
"earliest() and latest() require either fields as positional "
"arguments or 'get_latest_by' in the model's Meta."
)
obj = self._chain()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force=True)
obj.query.add_ordering(*order_by)
return obj.get()
def earliest(self, *fields):
if self.query.is_sliced:
raise TypeError("Cannot change a query once a slice has been taken.")
return self._earliest(*fields)
async def aearliest(self, *fields):
return await sync_to_async(self.earliest)(*fields)
def latest(self, *fields):
"""
Return the latest object according to fields (if given) or by the
model's Meta.get_latest_by.
"""
if self.query.is_sliced:
raise TypeError("Cannot change a query once a slice has been taken.")
return self.reverse()._earliest(*fields)
async def alatest(self, *fields):
return await sync_to_async(self.latest)(*fields)
def first(self):
"""Return the first object of a query or None if no match is found."""
if self.ordered:
queryset = self
else:
self._check_ordering_first_last_queryset_aggregation(method="first")
queryset = self.order_by("pk")
for obj in queryset[:1]:
return obj
async def afirst(self):
return await sync_to_async(self.first)()
def last(self):
"""Return the last object of a query or None if no match is found."""
if self.ordered:
queryset = self.reverse()
else:
self._check_ordering_first_last_queryset_aggregation(method="last")
queryset = self.order_by("-pk")
for obj in queryset[:1]:
return obj
async def alast(self):
return await sync_to_async(self.last)()
def in_bulk(self, id_list=None, *, field_name="pk"):
"""
Return a dictionary mapping each of the given IDs to the object with
that ID. If `id_list` isn't provided, evaluate the entire QuerySet.
"""
if self.query.is_sliced:
raise TypeError("Cannot use 'limit' or 'offset' with in_bulk().")
opts = self.model._meta
unique_fields = [
constraint.fields[0]
for constraint in opts.total_unique_constraints
if len(constraint.fields) == 1
]
if (
field_name != "pk"
and not opts.get_field(field_name).unique
and field_name not in unique_fields
and self.query.distinct_fields != (field_name,)
):
raise ValueError(
"in_bulk()'s field_name must be a unique field but %r isn't."
% field_name
)
if id_list is not None:
if not id_list:
return {}
filter_key = "{}__in".format(field_name)
batch_size = connections[self.db].features.max_query_params
id_list = tuple(id_list)
# If the database has a limit on the number of query parameters
# (e.g. SQLite), retrieve objects in batches if necessary.
if batch_size and batch_size < len(id_list):
qs = ()
for offset in range(0, len(id_list), batch_size):
batch = id_list[offset : offset + batch_size]
qs += tuple(self.filter(**{filter_key: batch}).order_by())
else:
qs = self.filter(**{filter_key: id_list}).order_by()
else:
qs = self._chain()
return {getattr(obj, field_name): obj for obj in qs}
async def ain_bulk(self, id_list=None, *, field_name="pk"):
return await sync_to_async(self.in_bulk)(
id_list=id_list,
field_name=field_name,
)
def delete(self):
"""Delete the records in the current QuerySet."""
self._not_support_combined_queries("delete")
if self.query.is_sliced:
raise TypeError("Cannot use 'limit' or 'offset' with delete().")
if self.query.distinct or self.query.distinct_fields:
raise TypeError("Cannot call delete() after .distinct().")
if self._fields is not None:
raise TypeError("Cannot call delete() after .values() or .values_list()")
del_query = self._chain()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force=True)
collector = Collector(using=del_query.db, origin=self)
collector.collect(del_query)
deleted, _rows_count = collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
return deleted, _rows_count
delete.alters_data = True
delete.queryset_only = True
async def adelete(self):
return await sync_to_async(self.delete)()
adelete.alters_data = True
adelete.queryset_only = True
def _raw_delete(self, using):
"""
Delete objects found from the given queryset in single direct SQL
query. No signals are sent and there is no protection for cascades.
"""
query = self.query.clone()
query.__class__ = sql.DeleteQuery
cursor = query.get_compiler(using).execute_sql(CURSOR)
if cursor:
with cursor:
return cursor.rowcount
return 0
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Update all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
self._not_support_combined_queries("update")
if self.query.is_sliced:
raise TypeError("Cannot update a query once a slice has been taken.")
self._for_write = True
query = self.query.chain(sql.UpdateQuery)
query.add_update_values(kwargs)
# Inline annotations in order_by(), if possible.
new_order_by = []
for col in query.order_by:
if annotation := query.annotations.get(col):
if getattr(annotation, "contains_aggregate", False):
raise exceptions.FieldError(
f"Cannot update when ordering by an aggregate: {annotation}"
)
new_order_by.append(annotation)
else:
new_order_by.append(col)
query.order_by = tuple(new_order_by)
# Clear any annotations so that they won't be present in subqueries.
query.annotations = {}
with transaction.mark_for_rollback_on_error(using=self.db):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows
update.alters_data = True
async def aupdate(self, **kwargs):
return await sync_to_async(self.update)(**kwargs)
aupdate.alters_data = True
def _update(self, values):
"""
A version of update() that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
if self.query.is_sliced:
raise TypeError("Cannot update a query once a slice has been taken.")
query = self.query.chain(sql.UpdateQuery)
query.add_update_fields(values)
# Clear any annotations so that they won't be present in subqueries.
query.annotations = {}
self._result_cache = None
return query.get_compiler(self.db).execute_sql(CURSOR)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
"""
Return True if the QuerySet would have any results, False otherwise.
"""
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
async def aexists(self):
return await sync_to_async(self.exists)()
def contains(self, obj):
"""
Return True if the QuerySet contains the provided obj,
False otherwise.
"""
self._not_support_combined_queries("contains")
if self._fields is not None:
raise TypeError(
"Cannot call QuerySet.contains() after .values() or .values_list()."
)
try:
if obj._meta.concrete_model != self.model._meta.concrete_model:
return False
except AttributeError:
raise TypeError("'obj' must be a model instance.")
if obj.pk is None:
raise ValueError("QuerySet.contains() cannot be used on unsaved objects.")
if self._result_cache is not None:
return obj in self._result_cache
return self.filter(pk=obj.pk).exists()
async def acontains(self, obj):
return await sync_to_async(self.contains)(obj=obj)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
def explain(self, *, format=None, **options):
"""
Runs an EXPLAIN on the SQL query this QuerySet would perform, and
returns the results.
"""
return self.query.explain(using=self.db, format=format, **options)
async def aexplain(self, *, format=None, **options):
return await sync_to_async(self.explain)(format=format, **options)
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=(), translations=None, using=None):
if using is None:
using = self.db
qs = RawQuerySet(
raw_query,
model=self.model,
params=params,
translations=translations,
using=using,
)
qs._prefetch_related_lookups = self._prefetch_related_lookups[:]
return qs
def _values(self, *fields, **expressions):
clone = self._chain()
if expressions:
clone = clone.annotate(**expressions)
clone._fields = fields
clone.query.set_values(fields)
return clone
def values(self, *fields, **expressions):
fields += tuple(expressions)
clone = self._values(*fields, **expressions)
clone._iterable_class = ValuesIterable
return clone
def values_list(self, *fields, flat=False, named=False):
if flat and named:
raise TypeError("'flat' and 'named' can't be used together.")
if flat and len(fields) > 1:
raise TypeError(
"'flat' is not valid when values_list is called with more than one "
"field."
)
field_names = {f for f in fields if not hasattr(f, "resolve_expression")}
_fields = []
expressions = {}
counter = 1
for field in fields:
if hasattr(field, "resolve_expression"):
field_id_prefix = getattr(
field, "default_alias", field.__class__.__name__.lower()
)
while True:
field_id = field_id_prefix + str(counter)
counter += 1
if field_id not in field_names:
break
expressions[field_id] = field
_fields.append(field_id)
else:
_fields.append(field)
clone = self._values(*_fields, **expressions)
clone._iterable_class = (
NamedValuesListIterable
if named
else FlatValuesListIterable
if flat
else ValuesListIterable
)
return clone
def dates(self, field_name, kind, order="ASC"):
"""
Return a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
if kind not in ("year", "month", "week", "day"):
raise ValueError("'kind' must be one of 'year', 'month', 'week', or 'day'.")
if order not in ("ASC", "DESC"):
raise ValueError("'order' must be either 'ASC' or 'DESC'.")
return (
self.annotate(
datefield=Trunc(field_name, kind, output_field=DateField()),
plain_field=F(field_name),
)
.values_list("datefield", flat=True)
.distinct()
.filter(plain_field__isnull=False)
.order_by(("-" if order == "DESC" else "") + "datefield")
)
# RemovedInDjango50Warning: when the deprecation ends, remove is_dst
# argument.
def datetimes(
self, field_name, kind, order="ASC", tzinfo=None, is_dst=timezone.NOT_PASSED
):
"""
Return a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
if kind not in ("year", "month", "week", "day", "hour", "minute", "second"):
raise ValueError(
"'kind' must be one of 'year', 'month', 'week', 'day', "
"'hour', 'minute', or 'second'."
)
if order not in ("ASC", "DESC"):
raise ValueError("'order' must be either 'ASC' or 'DESC'.")
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return (
self.annotate(
datetimefield=Trunc(
field_name,
kind,
output_field=DateTimeField(),
tzinfo=tzinfo,
is_dst=is_dst,
),
plain_field=F(field_name),
)
.values_list("datetimefield", flat=True)
.distinct()
.filter(plain_field__isnull=False)
.order_by(("-" if order == "DESC" else "") + "datetimefield")
)
def none(self):
"""Return an empty QuerySet."""
clone = self._chain()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Return a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._chain()
def filter(self, *args, **kwargs):
"""
Return a new QuerySet instance with the args ANDed to the existing
set.
"""
self._not_support_combined_queries("filter")
return self._filter_or_exclude(False, args, kwargs)
def exclude(self, *args, **kwargs):
"""
Return a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
self._not_support_combined_queries("exclude")
return self._filter_or_exclude(True, args, kwargs)
def _filter_or_exclude(self, negate, args, kwargs):
if (args or kwargs) and self.query.is_sliced:
raise TypeError("Cannot filter a query once a slice has been taken.")
clone = self._chain()
if self._defer_next_filter:
self._defer_next_filter = False
clone._deferred_filter = negate, args, kwargs
else:
clone._filter_or_exclude_inplace(negate, args, kwargs)
return clone
def _filter_or_exclude_inplace(self, negate, args, kwargs):
if negate:
self._query.add_q(~Q(*args, **kwargs))
else:
self._query.add_q(Q(*args, **kwargs))
def complex_filter(self, filter_obj):
"""
Return a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object or a dictionary of keyword lookup
arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q):
clone = self._chain()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(False, args=(), kwargs=filter_obj)
def _combinator_query(self, combinator, *other_qs, all=False):
# Clone the query to inherit the select list and everything
clone = self._chain()
# Clear limits and ordering so they can be reapplied
clone.query.clear_ordering(force=True)
clone.query.clear_limits()
clone.query.combined_queries = (self.query,) + tuple(
qs.query for qs in other_qs
)
clone.query.combinator = combinator
clone.query.combinator_all = all
return clone
def union(self, *other_qs, all=False):
# If the query is an EmptyQuerySet, combine all nonempty querysets.
if isinstance(self, EmptyQuerySet):
qs = [q for q in other_qs if not isinstance(q, EmptyQuerySet)]
if not qs:
return self
if len(qs) == 1:
return qs[0]
return qs[0]._combinator_query("union", *qs[1:], all=all)
return self._combinator_query("union", *other_qs, all=all)
def intersection(self, *other_qs):
# If any query is an EmptyQuerySet, return it.
if isinstance(self, EmptyQuerySet):
return self
for other in other_qs:
if isinstance(other, EmptyQuerySet):
return other
return self._combinator_query("intersection", *other_qs)
def difference(self, *other_qs):
# If the query is an EmptyQuerySet, return it.
if isinstance(self, EmptyQuerySet):
return self
return self._combinator_query("difference", *other_qs)
def select_for_update(self, nowait=False, skip_locked=False, of=(), no_key=False):
"""
Return a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
if nowait and skip_locked:
raise ValueError("The nowait option cannot be used with skip_locked.")
obj = self._chain()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
obj.query.select_for_update_skip_locked = skip_locked
obj.query.select_for_update_of = of
obj.query.select_for_no_key_update = no_key
return obj
def select_related(self, *fields):
"""
Return a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, clear the list.
"""
self._not_support_combined_queries("select_related")
if self._fields is not None:
raise TypeError(
"Cannot call select_related() after .values() or .values_list()"
)
obj = self._chain()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Return a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, append to the list of
prefetch lookups. If prefetch_related(None) is called, clear the list.
"""
self._not_support_combined_queries("prefetch_related")
clone = self._chain()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
for lookup in lookups:
if isinstance(lookup, Prefetch):
lookup = lookup.prefetch_to
lookup = lookup.split(LOOKUP_SEP, 1)[0]
if lookup in self.query._filtered_relations:
raise ValueError(
"prefetch_related() is not supported with FilteredRelation."
)
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
self._not_support_combined_queries("annotate")
return self._annotate(args, kwargs, select=True)
def alias(self, *args, **kwargs):
"""
Return a query set with added aliases for extra data or aggregations.
"""
self._not_support_combined_queries("alias")
return self._annotate(args, kwargs, select=False)
def _annotate(self, args, kwargs, select=True):
self._validate_values_are_expressions(
args + tuple(kwargs.values()), method_name="annotate"
)
annotations = {}
for arg in args:
# The default_alias property may raise a TypeError.
try:
if arg.default_alias in kwargs:
raise ValueError(
"The named annotation '%s' conflicts with the "
"default name for another annotation." % arg.default_alias
)
except TypeError:
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
annotations.update(kwargs)
clone = self._chain()
names = self._fields
if names is None:
names = set(
chain.from_iterable(
(field.name, field.attname)
if hasattr(field, "attname")
else (field.name,)
for field in self.model._meta.get_fields()
)
)
for alias, annotation in annotations.items():
if alias in names:
raise ValueError(
"The annotation '%s' conflicts with a field on "
"the model." % alias
)
if isinstance(annotation, FilteredRelation):
clone.query.add_filtered_relation(annotation, alias)
else:
clone.query.add_annotation(
annotation,
alias,
is_summary=False,
select=select,
)
for alias, annotation in clone.query.annotations.items():
if alias in annotations and annotation.contains_aggregate:
if clone._fields is None:
clone.query.group_by = True
else:
clone.query.set_group_by()
break
return clone
def order_by(self, *field_names):
"""Return a new QuerySet instance with the ordering changed."""
if self.query.is_sliced:
raise TypeError("Cannot reorder a query once a slice has been taken.")
obj = self._chain()
obj.query.clear_ordering(force=True, clear_default=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Return a new QuerySet instance that will select only distinct results.
"""
self._not_support_combined_queries("distinct")
if self.query.is_sliced:
raise TypeError(
"Cannot create distinct fields once a slice has been taken."
)
obj = self._chain()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(
self,
select=None,
where=None,
params=None,
tables=None,
order_by=None,
select_params=None,
):
"""Add extra SQL fragments to the query."""
self._not_support_combined_queries("extra")
if self.query.is_sliced:
raise TypeError("Cannot change a query once a slice has been taken.")
clone = self._chain()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""Reverse the ordering of the QuerySet."""
if self.query.is_sliced:
raise TypeError("Cannot reverse a query once a slice has been taken.")
clone = self._chain()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defer the loading of data for certain fields until they are accessed.
Add the set of deferred fields to any existing set of deferred fields.
The only exception to this is if None is passed in as the only
parameter, in which case removal all deferrals.
"""
self._not_support_combined_queries("defer")
if self._fields is not None:
raise TypeError("Cannot call defer() after .values() or .values_list()")
clone = self._chain()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer(). Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
self._not_support_combined_queries("only")
if self._fields is not None:
raise TypeError("Cannot call only() after .values() or .values_list()")
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
for field in fields:
field = field.split(LOOKUP_SEP, 1)[0]
if field in self.query._filtered_relations:
raise ValueError("only() is not supported with FilteredRelation.")
clone = self._chain()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""Select which database this QuerySet should execute against."""
clone = self._chain()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Return True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model (or is empty).
"""
if isinstance(self, EmptyQuerySet):
return True
if self.query.extra_order_by or self.query.order_by:
return True
elif (
self.query.default_ordering
and self.query.get_meta().ordering
and
# A default ordering doesn't affect GROUP BY queries.
not self.query.group_by
):
return True
else:
return False
@property
def db(self):
"""Return the database used if this query is executed now."""
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(
self,
objs,
fields,
returning_fields=None,
raw=False,
using=None,
on_conflict=None,
update_fields=None,
unique_fields=None,
):
"""
Insert a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(
self.model,
on_conflict=on_conflict,
update_fields=update_fields,
unique_fields=unique_fields,
)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(returning_fields)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(
self,
objs,
fields,
batch_size,
on_conflict=None,
update_fields=None,
unique_fields=None,
):
"""
Helper method for bulk_create() to insert objs one batch at a time.
"""
connection = connections[self.db]
ops = connection.ops
max_batch_size = max(ops.bulk_batch_size(fields, objs), 1)
batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size
inserted_rows = []
bulk_return = connection.features.can_return_rows_from_bulk_insert
for item in [objs[i : i + batch_size] for i in range(0, len(objs), batch_size)]:
if bulk_return and on_conflict is None:
inserted_rows.extend(
self._insert(
item,
fields=fields,
using=self.db,
returning_fields=self.model._meta.db_returning_fields,
)
)
else:
self._insert(
item,
fields=fields,
using=self.db,
on_conflict=on_conflict,
update_fields=update_fields,
unique_fields=unique_fields,
)
return inserted_rows
def _chain(self):
"""
Return a copy of the current QuerySet that's ready for another
operation.
"""
obj = self._clone()
if obj._sticky_filter:
obj.query.filter_is_sticky = True
obj._sticky_filter = False
return obj
def _clone(self):
"""
Return a copy of the current QuerySet. A lightweight alternative
to deepcopy().
"""
c = self.__class__(
model=self.model,
query=self.query.chain(),
using=self._db,
hints=self._hints,
)
c._sticky_filter = self._sticky_filter
c._for_write = self._for_write
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
c._known_related_objects = self._known_related_objects
c._iterable_class = self._iterable_class
c._fields = self._fields
return c
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self._iterable_class(self))
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicate that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""Check that two QuerySet classes may be merged."""
if self._fields is not None and (
set(self.query.values_select) != set(other.query.values_select)
or set(self.query.extra_select) != set(other.query.extra_select)
or set(self.query.annotation_select) != set(other.query.annotation_select)
):
raise TypeError(
"Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__
)
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def resolve_expression(self, *args, **kwargs):
if self._fields and len(self._fields) > 1:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
raise TypeError("Cannot use multi-field values as a filter value.")
query = self.query.resolve_expression(*args, **kwargs)
query._db = self._db
return query
resolve_expression.queryset_only = True
def _add_hints(self, **hints):
"""
Update hinting information for use by routers. Add new key/values or
overwrite existing key/values.
"""
self._hints.update(hints)
def _has_filters(self):
"""
Check if this QuerySet has any filtering going on. This isn't
equivalent with checking if all objects are present in results, for
example, qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
@staticmethod
def _validate_values_are_expressions(values, method_name):
invalid_args = sorted(
str(arg) for arg in values if not hasattr(arg, "resolve_expression")
)
if invalid_args:
raise TypeError(
"QuerySet.%s() received non-expression(s): %s."
% (
method_name,
", ".join(invalid_args),
)
)
def _not_support_combined_queries(self, operation_name):
if self.query.combinator:
raise NotSupportedError(
"Calling QuerySet.%s() after %s() is not supported."
% (operation_name, self.query.combinator)
)
def _check_operator_queryset(self, other, operator_):
if self.query.combinator or other.query.combinator:
raise TypeError(f"Cannot use {operator_} operator with combined queryset.")
def _check_ordering_first_last_queryset_aggregation(self, method):
if isinstance(self.query.group_by, tuple) and not any(
col.output_field is self.model._meta.pk for col in self.query.group_by
):
raise TypeError(
f"Cannot use QuerySet.{method}() on an unordered queryset performing "
f"aggregation. Add an ordering with order_by()."
)
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return isinstance(instance, QuerySet) and instance.query.is_empty()
class EmptyQuerySet(metaclass=InstanceCheckMeta):
"""
Marker class to checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class RawQuerySet:
"""
Provide an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(
self,
raw_query,
model=None,
query=None,
params=(),
translations=None,
using=None,
hints=None,
):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params
self.translations = translations or {}
self._result_cache = None
self._prefetch_related_lookups = ()
self._prefetch_done = False
def resolve_model_init_order(self):
"""Resolve the init field names and value positions."""
converter = connections[self.db].introspection.identifier_converter
model_init_fields = [
f for f in self.model._meta.fields if converter(f.column) in self.columns
]
annotation_fields = [
(column, pos)
for pos, column in enumerate(self.columns)
if column not in self.model_fields
]
model_init_order = [
self.columns.index(converter(f.column)) for f in model_init_fields
]
model_init_names = [f.attname for f in model_init_fields]
return model_init_names, model_init_order, annotation_fields
def prefetch_related(self, *lookups):
"""Same as QuerySet.prefetch_related()"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def _prefetch_related_objects(self):
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
def _clone(self):
"""Same as QuerySet._clone()"""
c = self.__class__(
self.raw_query,
model=self.model,
query=self.query,
params=self.params,
translations=self.translations,
using=self._db,
hints=self._hints,
)
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
return c
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self.iterator())
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __iter__(self):
self._fetch_all()
return iter(self._result_cache)
def __aiter__(self):
# Remember, __aiter__ itself is synchronous, it's the thing it returns
# that is async!
async def generator():
await sync_to_async(self._fetch_all)()
for item in self._result_cache:
yield item
return generator()
def iterator(self):
yield from RawModelIterable(self)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.query)
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"""Return the database used if this query is executed now."""
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""Select the database this RawQuerySet should execute against."""
return RawQuerySet(
self.raw_query,
model=self.model,
query=self.query.chain(using=alias),
params=self.params,
translations=self.translations,
using=alias,
)
@cached_property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
# Ignore translations for nonexistent column names
try:
index = columns.index(query_name)
except ValueError:
pass
else:
columns[index] = model_name
return columns
@cached_property
def model_fields(self):
"""A dict mapping column names to model field names."""
converter = connections[self.db].introspection.identifier_converter
model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
model_fields[converter(column)] = field
return model_fields
class Prefetch:
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if queryset is not None and (
isinstance(queryset, RawQuerySet)
or (
hasattr(queryset, "_iterable_class")
and not issubclass(queryset._iterable_class, ModelIterable)
)
):
raise ValueError(
"Prefetch querysets cannot use raw(), values(), and values_list()."
)
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(
lookup.split(LOOKUP_SEP)[:-1] + [to_attr]
)
self.queryset = queryset
self.to_attr = to_attr
def __getstate__(self):
obj_dict = self.__dict__.copy()
if self.queryset is not None:
queryset = self.queryset._chain()
# Prevent the QuerySet from being evaluated
queryset._result_cache = []
queryset._prefetch_done = True
obj_dict["queryset"] = queryset
return obj_dict
def add_prefix(self, prefix):
self.prefetch_through = prefix + LOOKUP_SEP + self.prefetch_through
self.prefetch_to = prefix + LOOKUP_SEP + self.prefetch_to
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[: level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_queryset(self, level):
if self.get_current_prefetch_to(level) == self.prefetch_to:
return self.queryset
return None
def __eq__(self, other):
if not isinstance(other, Prefetch):
return NotImplemented
return self.prefetch_to == other.prefetch_to
def __hash__(self):
return hash((self.__class__, self.prefetch_to))
def normalize_prefetch_lookups(lookups, prefix=None):
"""Normalize lookups into Prefetch objects."""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(model_instances, *related_lookups):
"""
Populate prefetched object caches for a list of model instances based on
the lookups/Prefetch instances given.
"""
if not model_instances:
return # nothing to do
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = normalize_prefetch_lookups(reversed(related_lookups))
while all_lookups:
lookup = all_lookups.pop()
if lookup.prefetch_to in done_queries:
if lookup.queryset is not None:
raise ValueError(
"'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups."
% lookup.prefetch_to
)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = model_instances
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if not obj_list:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, "_prefetched_objects_cache"):
try:
obj._prefetched_objects_cache = {}
except (AttributeError, TypeError):
# Must be an immutable object from
# values_list(flat=True), for example (TypeError) or
# a QuerySet subclass that isn't returning Model
# instances (AttributeError), either in Django or a 3rd
# party. prefetch_related() doesn't make sense, so quit.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
to_attr = lookup.get_current_to_attr(level)[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(
first_obj, through_attr, to_attr
)
if not attr_found:
raise AttributeError(
"Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()"
% (
through_attr,
first_obj.__class__.__name__,
lookup.prefetch_through,
)
)
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError(
"'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through
)
obj_to_fetch = None
if prefetcher is not None:
obj_to_fetch = [obj for obj in obj_list if not is_fetched(obj)]
if obj_to_fetch:
obj_list, additional_lookups = prefetch_one_level(
obj_to_fetch,
prefetcher,
lookup,
level,
)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (
prefetch_to in done_queries
and lookup in auto_lookups
and descriptor in followed_descriptors
):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(
reversed(additional_lookups), prefetch_to
)
auto_lookups.update(new_lookups)
all_lookups.extend(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
if through_attr in getattr(obj, "_prefetched_objects_cache", ()):
# If related objects have been prefetched, use the
# cache rather than the object's through_attr.
new_obj = list(obj._prefetched_objects_cache.get(through_attr))
else:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally match
# user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, through_attr, to_attr):
"""
For the attribute 'through_attr' on the given instance, find
an object that has a get_prefetch_queryset().
Return a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a function that takes an instance and returns a boolean that is True if
the attribute has already been fetched for that instance)
"""
def has_to_attr_attribute(instance):
return hasattr(instance, to_attr)
prefetcher = None
is_fetched = has_to_attr_attribute
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, through_attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, through_attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_queryset() method.
if hasattr(rel_obj_descriptor, "get_prefetch_queryset"):
prefetcher = rel_obj_descriptor
is_fetched = rel_obj_descriptor.is_cached
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, through_attr)
if hasattr(rel_obj, "get_prefetch_queryset"):
prefetcher = rel_obj
if through_attr != to_attr:
# Special case cached_property instances because hasattr
# triggers attribute computation and assignment.
if isinstance(
getattr(instance.__class__, to_attr, None), cached_property
):
def has_cached_property(instance):
return to_attr in instance.__dict__
is_fetched = has_cached_property
else:
def in_prefetched_cache(instance):
return through_attr in instance._prefetched_objects_cache
is_fetched = in_prefetched_cache
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects().
Run prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
Return the prefetched objects along with any additional prefetches that
must be done due to prefetch_related lookups found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache or field name to assign to,
# boolean that is True when the previous argument is a cache name vs a field name).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
(
rel_qs,
rel_obj_attr,
instance_attr,
single,
cache_name,
is_descriptor,
) = prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level))
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
# Copy the lookups in case it is a Prefetch object which could be reused
# later (happens in nested prefetch_related).
additional_lookups = [
copy.copy(additional_lookup)
for additional_lookup in getattr(rel_qs, "_prefetch_related_lookups", ())
]
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = ()
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
to_attr, as_attr = lookup.get_current_to_attr(level)
# Make sure `to_attr` does not conflict with a field.
if as_attr and instances:
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
model = instances[0].__class__
try:
model._meta.get_field(to_attr)
except exceptions.FieldDoesNotExist:
pass
else:
msg = "to_attr={} conflicts with a field on the {} model."
raise ValueError(msg.format(to_attr, model.__name__))
# Whether or not we're prefetching the last part of the lookup.
leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
if single:
val = vals[0] if vals else None
if as_attr:
# A to_attr has been given for the prefetch.
setattr(obj, to_attr, val)
elif is_descriptor:
# cache_name points to a field name in obj.
# This field is a descriptor for a related object.
setattr(obj, cache_name, val)
else:
# No to_attr has been given for this prefetch operation and the
# cache_name does not point to a descriptor. Store the value of
# the field in the object's field cache.
obj._state.fields_cache[cache_name] = val
else:
if as_attr:
setattr(obj, to_attr, vals)
else:
manager = getattr(obj, to_attr)
if leaf and lookup.queryset is not None:
qs = manager._apply_rel_filters(lookup.queryset)
else:
qs = manager.get_queryset()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
class RelatedPopulator:
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db):
self.db = db
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - local_setter, remote_setter: Methods to set cached values on
# the object being populated and on the remote object. Usually
# these are Field.set_cached_value() methods.
select_fields = klass_info["select_fields"]
from_parent = klass_info["from_parent"]
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start : self.cols_end]
]
self.reorder_for_init = None
else:
attname_indexes = {
select[idx][0].target.attname: idx for idx in select_fields
}
model_init_attnames = (
f.attname for f in klass_info["model"]._meta.concrete_fields
)
self.init_list = [
attname for attname in model_init_attnames if attname in attname_indexes
]
self.reorder_for_init = operator.itemgetter(
*[attname_indexes[attname] for attname in self.init_list]
)
self.model_cls = klass_info["model"]
self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
self.related_populators = get_related_populators(klass_info, select, self.db)
self.local_setter = klass_info["local_setter"]
self.remote_setter = klass_info["remote_setter"]
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start : self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
self.local_setter(from_obj, obj)
if obj is not None:
self.remote_setter(obj, from_obj)
def get_related_populators(klass_info, select, db):
iterators = []
related_klass_infos = klass_info.get("related_klass_infos", [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db)
iterators.append(rel_cls)
return iterators
|
6602d26f94fc3d99cd9fb3f940fc5f79d6679d65dd61c2ed35db4c8ace40f8d6 | """
Various data structures used in query construction.
Factored out from django.db.models.query to avoid making the main module very
large and/or so that they can be used by other modules without getting into
circular import difficulties.
"""
import functools
import inspect
import logging
from collections import namedtuple
from django.core.exceptions import FieldError
from django.db import DEFAULT_DB_ALIAS, DatabaseError, connections
from django.db.models.constants import LOOKUP_SEP
from django.utils import tree
logger = logging.getLogger("django.db.models")
# PathInfo is used when converting lookups (fk__somecol). The contents
# describe the relation in Model terms (model Options and Fields for both
# sides of the relation. The join_field is the field backing the relation.
PathInfo = namedtuple(
"PathInfo",
"from_opts to_opts target_fields join_field m2m direct filtered_relation",
)
def subclasses(cls):
yield cls
for subclass in cls.__subclasses__():
yield from subclasses(subclass)
class Q(tree.Node):
"""
Encapsulate filters as objects that can then be combined logically (using
`&` and `|`).
"""
# Connection types
AND = "AND"
OR = "OR"
XOR = "XOR"
default = AND
conditional = True
def __init__(self, *args, _connector=None, _negated=False, **kwargs):
super().__init__(
children=[*args, *sorted(kwargs.items())],
connector=_connector,
negated=_negated,
)
def _combine(self, other, conn):
if getattr(other, "conditional", False) is False:
raise TypeError(other)
if not self:
return other.copy()
if not other and isinstance(other, Q):
return self.copy()
obj = self.create(connector=conn)
obj.add(self, conn)
obj.add(other, conn)
return obj
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __xor__(self, other):
return self._combine(other, self.XOR)
def __invert__(self):
obj = self.copy()
obj.negate()
return obj
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
# We must promote any new joins to left outer joins so that when Q is
# used as an expression, rows aren't filtered due to joins.
clause, joins = query._add_q(
self,
reuse,
allow_joins=allow_joins,
split_subq=False,
check_filterable=False,
summarize=summarize,
)
query.promote_joins(joins)
return clause
def flatten(self):
"""
Recursively yield this Q object and all subexpressions, in depth-first
order.
"""
yield self
for child in self.children:
if isinstance(child, tuple):
# Use the lookup.
child = child[1]
if hasattr(child, "flatten"):
yield from child.flatten()
else:
yield child
def check(self, against, using=DEFAULT_DB_ALIAS):
"""
Do a database query to check if the expressions of the Q instance
matches against the expressions.
"""
# Avoid circular imports.
from django.db.models import BooleanField, Value
from django.db.models.functions import Coalesce
from django.db.models.sql import Query
from django.db.models.sql.constants import SINGLE
query = Query(None)
for name, value in against.items():
if not hasattr(value, "resolve_expression"):
value = Value(value)
query.add_annotation(value, name, select=False)
query.add_annotation(Value(1), "_check")
# This will raise a FieldError if a field is missing in "against".
if connections[using].features.supports_comparing_boolean_expr:
query.add_q(Q(Coalesce(self, True, output_field=BooleanField())))
else:
query.add_q(self)
compiler = query.get_compiler(using=using)
try:
return compiler.execute_sql(SINGLE) is not None
except DatabaseError as e:
logger.warning("Got a database error calling check() on %r: %s", self, e)
return True
def deconstruct(self):
path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if path.startswith("django.db.models.query_utils"):
path = path.replace("django.db.models.query_utils", "django.db.models")
args = tuple(self.children)
kwargs = {}
if self.connector != self.default:
kwargs["_connector"] = self.connector
if self.negated:
kwargs["_negated"] = True
return path, args, kwargs
class DeferredAttribute:
"""
A wrapper for a deferred-loading field. When the value is read from this
object the first time, the query is executed.
"""
def __init__(self, field):
self.field = field
def __get__(self, instance, cls=None):
"""
Retrieve and caches the value from the datastore on the first lookup.
Return the cached value.
"""
if instance is None:
return self
data = instance.__dict__
field_name = self.field.attname
if field_name not in data:
# Let's see if the field is part of the parent chain. If so we
# might be able to reuse the already loaded value. Refs #18343.
val = self._check_parent_chain(instance)
if val is None:
instance.refresh_from_db(fields=[field_name])
else:
data[field_name] = val
return data[field_name]
def _check_parent_chain(self, instance):
"""
Check if the field value can be fetched from a parent field already
loaded in the instance. This can be done if the to-be fetched
field is a primary key field.
"""
opts = instance._meta
link_field = opts.get_ancestor_link(self.field.model)
if self.field.primary_key and self.field != link_field:
return getattr(instance, link_field.attname)
return None
class class_or_instance_method:
"""
Hook used in RegisterLookupMixin to return partial functions depending on
the caller type (instance or class of models.Field).
"""
def __init__(self, class_method, instance_method):
self.class_method = class_method
self.instance_method = instance_method
def __get__(self, instance, owner):
if instance is None:
return functools.partial(self.class_method, owner)
return functools.partial(self.instance_method, instance)
class RegisterLookupMixin:
def _get_lookup(self, lookup_name):
return self.get_lookups().get(lookup_name, None)
@functools.lru_cache(maxsize=None)
def get_class_lookups(cls):
class_lookups = [
parent.__dict__.get("class_lookups", {}) for parent in inspect.getmro(cls)
]
return cls.merge_dicts(class_lookups)
def get_instance_lookups(self):
class_lookups = self.get_class_lookups()
if instance_lookups := getattr(self, "instance_lookups", None):
return {**class_lookups, **instance_lookups}
return class_lookups
get_lookups = class_or_instance_method(get_class_lookups, get_instance_lookups)
get_class_lookups = classmethod(get_class_lookups)
def get_lookup(self, lookup_name):
from django.db.models.lookups import Lookup
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, "output_field"):
return self.output_field.get_lookup(lookup_name)
if found is not None and not issubclass(found, Lookup):
return None
return found
def get_transform(self, lookup_name):
from django.db.models.lookups import Transform
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, "output_field"):
return self.output_field.get_transform(lookup_name)
if found is not None and not issubclass(found, Transform):
return None
return found
@staticmethod
def merge_dicts(dicts):
"""
Merge dicts in reverse to preference the order of the original list. e.g.,
merge_dicts([a, b]) will preference the keys in 'a' over those in 'b'.
"""
merged = {}
for d in reversed(dicts):
merged.update(d)
return merged
@classmethod
def _clear_cached_class_lookups(cls):
for subclass in subclasses(cls):
subclass.get_class_lookups.cache_clear()
def register_class_lookup(cls, lookup, lookup_name=None):
if lookup_name is None:
lookup_name = lookup.lookup_name
if "class_lookups" not in cls.__dict__:
cls.class_lookups = {}
cls.class_lookups[lookup_name] = lookup
cls._clear_cached_class_lookups()
return lookup
def register_instance_lookup(self, lookup, lookup_name=None):
if lookup_name is None:
lookup_name = lookup.lookup_name
if "instance_lookups" not in self.__dict__:
self.instance_lookups = {}
self.instance_lookups[lookup_name] = lookup
return lookup
register_lookup = class_or_instance_method(
register_class_lookup, register_instance_lookup
)
register_class_lookup = classmethod(register_class_lookup)
def _unregister_class_lookup(cls, lookup, lookup_name=None):
"""
Remove given lookup from cls lookups. For use in tests only as it's
not thread-safe.
"""
if lookup_name is None:
lookup_name = lookup.lookup_name
del cls.class_lookups[lookup_name]
cls._clear_cached_class_lookups()
def _unregister_instance_lookup(self, lookup, lookup_name=None):
"""
Remove given lookup from instance lookups. For use in tests only as
it's not thread-safe.
"""
if lookup_name is None:
lookup_name = lookup.lookup_name
del self.instance_lookups[lookup_name]
_unregister_lookup = class_or_instance_method(
_unregister_class_lookup, _unregister_instance_lookup
)
_unregister_class_lookup = classmethod(_unregister_class_lookup)
def select_related_descend(field, restricted, requested, select_mask, reverse=False):
"""
Return True if this field should be used to descend deeper for
select_related() purposes. Used by both the query construction code
(compiler.get_related_selections()) and the model instance creation code
(compiler.klass_info).
Arguments:
* field - the field to be checked
* restricted - a boolean field, indicating if the field list has been
manually restricted using a requested clause)
* requested - The select_related() dictionary.
* select_mask - the dictionary of selected fields.
* reverse - boolean, True if we are checking a reverse select related
"""
if not field.remote_field:
return False
if field.remote_field.parent_link and not reverse:
return False
if restricted:
if reverse and field.related_query_name() not in requested:
return False
if not reverse and field.name not in requested:
return False
if not restricted and field.null:
return False
if (
restricted
and select_mask
and field.name in requested
and field not in select_mask
):
raise FieldError(
f"Field {field.model._meta.object_name}.{field.name} cannot be both "
"deferred and traversed using select_related at the same time."
)
return True
def refs_expression(lookup_parts, annotations):
"""
Check if the lookup_parts contains references to the given annotations set.
Because the LOOKUP_SEP is contained in the default annotation names, check
each prefix of the lookup_parts for a match.
"""
for n in range(1, len(lookup_parts) + 1):
level_n_lookup = LOOKUP_SEP.join(lookup_parts[0:n])
if annotations.get(level_n_lookup):
return level_n_lookup, lookup_parts[n:]
return None, ()
def check_rel_lookup_compatibility(model, target_opts, field):
"""
Check that self.model is compatible with target_opts. Compatibility
is OK if:
1) model and opts match (where proxy inheritance is removed)
2) model is parent of opts' model or the other way around
"""
def check(opts):
return (
model._meta.concrete_model == opts.concrete_model
or opts.concrete_model in model._meta.get_parent_list()
or model in opts.get_parent_list()
)
# If the field is a primary key, then doing a query against the field's
# model is ok, too. Consider the case:
# class Restaurant(models.Model):
# place = OneToOneField(Place, primary_key=True):
# Restaurant.objects.filter(pk__in=Restaurant.objects.all()).
# If we didn't have the primary key check, then pk__in (== place__in) would
# give Place's opts as the target opts, but Restaurant isn't compatible
# with that. This logic applies only to primary keys, as when doing __in=qs,
# we are going to turn this into __in=qs.values('pk') later on.
return check(target_opts) or (
getattr(field, "primary_key", False) and check(field.model._meta)
)
class FilteredRelation:
"""Specify custom filtering in the ON clause of SQL joins."""
def __init__(self, relation_name, *, condition=Q()):
if not relation_name:
raise ValueError("relation_name cannot be empty.")
self.relation_name = relation_name
self.alias = None
if not isinstance(condition, Q):
raise ValueError("condition argument must be a Q() instance.")
self.condition = condition
self.path = []
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.relation_name == other.relation_name
and self.alias == other.alias
and self.condition == other.condition
)
def clone(self):
clone = FilteredRelation(self.relation_name, condition=self.condition)
clone.alias = self.alias
clone.path = self.path[:]
return clone
def resolve_expression(self, *args, **kwargs):
"""
QuerySet.annotate() only accepts expression-like arguments
(with a resolve_expression() method).
"""
raise NotImplementedError("FilteredRelation.resolve_expression() is unused.")
def as_sql(self, compiler, connection):
# Resolve the condition in Join.filtered_relation.
query = compiler.query
where = query.build_filtered_relation_q(self.condition, reuse=set(self.path))
return compiler.compile(where)
|
23d203834a79312f4e5571832c2938b99421b7d6de13206f47c70f42cc2c57e3 | import copy
import datetime
import functools
import inspect
import warnings
from collections import defaultdict
from decimal import Decimal
from uuid import UUID
from django.core.exceptions import EmptyResultSet, FieldError, FullResultSet
from django.db import DatabaseError, NotSupportedError, connection
from django.db.models import fields
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import Q
from django.utils.deconstruct import deconstructible
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.functional import cached_property
from django.utils.hashable import make_hashable
class SQLiteNumericMixin:
"""
Some expressions with output_field=DecimalField() must be cast to
numeric to be properly filtered.
"""
def as_sqlite(self, compiler, connection, **extra_context):
sql, params = self.as_sql(compiler, connection, **extra_context)
try:
if self.output_field.get_internal_type() == "DecimalField":
sql = "CAST(%s AS NUMERIC)" % sql
except FieldError:
pass
return sql, params
class Combinable:
"""
Provide the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
# Arithmetic connectors
ADD = "+"
SUB = "-"
MUL = "*"
DIV = "/"
POW = "^"
# The following is a quoted % operator - it is quoted because it can be
# used in strings that also have parameter substitution.
MOD = "%%"
# Bitwise operators - note that these are generated by .bitand()
# and .bitor(), the '&' and '|' are reserved for boolean operator
# usage.
BITAND = "&"
BITOR = "|"
BITLEFTSHIFT = "<<"
BITRIGHTSHIFT = ">>"
BITXOR = "#"
def _combine(self, other, connector, reversed):
if not hasattr(other, "resolve_expression"):
# everything must be resolvable to an expression
other = Value(other)
if reversed:
return CombinedExpression(other, connector, self)
return CombinedExpression(self, connector, other)
#############
# OPERATORS #
#############
def __neg__(self):
return self._combine(-1, self.MUL, False)
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __truediv__(self, other):
return self._combine(other, self.DIV, False)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __pow__(self, other):
return self._combine(other, self.POW, False)
def __and__(self, other):
if getattr(self, "conditional", False) and getattr(other, "conditional", False):
return Q(self) & Q(other)
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def bitand(self, other):
return self._combine(other, self.BITAND, False)
def bitleftshift(self, other):
return self._combine(other, self.BITLEFTSHIFT, False)
def bitrightshift(self, other):
return self._combine(other, self.BITRIGHTSHIFT, False)
def __xor__(self, other):
if getattr(self, "conditional", False) and getattr(other, "conditional", False):
return Q(self) ^ Q(other)
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def bitxor(self, other):
return self._combine(other, self.BITXOR, False)
def __or__(self, other):
if getattr(self, "conditional", False) and getattr(other, "conditional", False):
return Q(self) | Q(other)
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def bitor(self, other):
return self._combine(other, self.BITOR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rtruediv__(self, other):
return self._combine(other, self.DIV, True)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rpow__(self, other):
return self._combine(other, self.POW, True)
def __rand__(self, other):
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def __ror__(self, other):
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def __rxor__(self, other):
raise NotImplementedError(
"Use .bitand(), .bitor(), and .bitxor() for bitwise logical operations."
)
def __invert__(self):
return NegatedExpression(self)
class BaseExpression:
"""Base class for all query expressions."""
empty_result_set_value = NotImplemented
# aggregate specific fields
is_summary = False
_output_field_resolved_to_none = False
# Can the expression be used in a WHERE clause?
filterable = True
# Can the expression can be used as a source expression in Window?
window_compatible = False
def __init__(self, output_field=None):
if output_field is not None:
self.output_field = output_field
def __getstate__(self):
state = self.__dict__.copy()
state.pop("convert_value", None)
return state
def get_db_converters(self, connection):
return (
[]
if self.convert_value is self._convert_value_noop
else [self.convert_value]
) + self.output_field.get_db_converters(connection)
def get_source_expressions(self):
return []
def set_source_expressions(self, exprs):
assert not exprs
def _parse_expressions(self, *expressions):
return [
arg
if hasattr(arg, "resolve_expression")
else (F(arg) if isinstance(arg, str) else Value(arg))
for arg in expressions
]
def as_sql(self, compiler, connection):
"""
Responsible for returning a (sql, [params]) tuple to be included
in the current query.
Different backends can provide their own implementation, by
providing an `as_{vendor}` method and patching the Expression:
```
def override_as_sql(self, compiler, connection):
# custom logic
return super().as_sql(compiler, connection)
setattr(Expression, 'as_' + connection.vendor, override_as_sql)
```
Arguments:
* compiler: the query compiler responsible for generating the query.
Must have a compile method, returning a (sql, [params]) tuple.
Calling compiler(value) will return a quoted `value`.
* connection: the database connection used for the current query.
Return: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
raise NotImplementedError("Subclasses must implement as_sql()")
@cached_property
def contains_aggregate(self):
return any(
expr and expr.contains_aggregate for expr in self.get_source_expressions()
)
@cached_property
def contains_over_clause(self):
return any(
expr and expr.contains_over_clause for expr in self.get_source_expressions()
)
@cached_property
def contains_column_references(self):
return any(
expr and expr.contains_column_references
for expr in self.get_source_expressions()
)
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
"""
Provide the chance to do any preprocessing or validation before being
added to the query.
Arguments:
* query: the backend query implementation
* allow_joins: boolean allowing or denying use of joins
in this query
* reuse: a set of reusable joins for multijoins
* summarize: a terminal aggregate clause
* for_save: whether this expression about to be used in a save or update
Return: an Expression to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
c.set_source_expressions(
[
expr.resolve_expression(query, allow_joins, reuse, summarize)
if expr
else None
for expr in c.get_source_expressions()
]
)
return c
@property
def conditional(self):
return isinstance(self.output_field, fields.BooleanField)
@property
def field(self):
return self.output_field
@cached_property
def output_field(self):
"""Return the output type of this expressions."""
output_field = self._resolve_output_field()
if output_field is None:
self._output_field_resolved_to_none = True
raise FieldError("Cannot resolve expression type, unknown output_field")
return output_field
@cached_property
def _output_field_or_none(self):
"""
Return the output field of this expression, or None if
_resolve_output_field() didn't return an output type.
"""
try:
return self.output_field
except FieldError:
if not self._output_field_resolved_to_none:
raise
def _resolve_output_field(self):
"""
Attempt to infer the output type of the expression.
As a guess, if the output fields of all source fields match then simply
infer the same type here.
If a source's output field resolves to None, exclude it from this check.
If all sources are None, then an error is raised higher up the stack in
the output_field property.
"""
# This guess is mostly a bad idea, but there is quite a lot of code
# (especially 3rd party Func subclasses) that depend on it, we'd need a
# deprecation path to fix it.
sources_iter = (
source for source in self.get_source_fields() if source is not None
)
for output_field in sources_iter:
for source in sources_iter:
if not isinstance(output_field, source.__class__):
raise FieldError(
"Expression contains mixed types: %s, %s. You must "
"set output_field."
% (
output_field.__class__.__name__,
source.__class__.__name__,
)
)
return output_field
@staticmethod
def _convert_value_noop(value, expression, connection):
return value
@cached_property
def convert_value(self):
"""
Expressions provide their own converters because users have the option
of manually specifying the output_field which may be a different type
from the one the database returns.
"""
field = self.output_field
internal_type = field.get_internal_type()
if internal_type == "FloatField":
return (
lambda value, expression, connection: None
if value is None
else float(value)
)
elif internal_type.endswith("IntegerField"):
return (
lambda value, expression, connection: None
if value is None
else int(value)
)
elif internal_type == "DecimalField":
return (
lambda value, expression, connection: None
if value is None
else Decimal(value)
)
return self._convert_value_noop
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def get_transform(self, name):
return self.output_field.get_transform(name)
def relabeled_clone(self, change_map):
clone = self.copy()
clone.set_source_expressions(
[
e.relabeled_clone(change_map) if e is not None else None
for e in self.get_source_expressions()
]
)
return clone
def replace_expressions(self, replacements):
if replacement := replacements.get(self):
return replacement
clone = self.copy()
source_expressions = clone.get_source_expressions()
clone.set_source_expressions(
[
expr.replace_expressions(replacements) if expr else None
for expr in source_expressions
]
)
return clone
def get_refs(self):
refs = set()
for expr in self.get_source_expressions():
refs |= expr.get_refs()
return refs
def copy(self):
return copy.copy(self)
def prefix_references(self, prefix):
clone = self.copy()
clone.set_source_expressions(
[
F(f"{prefix}{expr.name}")
if isinstance(expr, F)
else expr.prefix_references(prefix)
for expr in self.get_source_expressions()
]
)
return clone
def get_group_by_cols(self):
if not self.contains_aggregate:
return [self]
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def get_source_fields(self):
"""Return the underlying field types used by this aggregate."""
return [e._output_field_or_none for e in self.get_source_expressions()]
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def reverse_ordering(self):
return self
def flatten(self):
"""
Recursively yield this expression and all subexpressions, in
depth-first order.
"""
yield self
for expr in self.get_source_expressions():
if expr:
if hasattr(expr, "flatten"):
yield from expr.flatten()
else:
yield expr
def select_format(self, compiler, sql, params):
"""
Custom format for select clauses. For example, EXISTS expressions need
to be wrapped in CASE WHEN on Oracle.
"""
if hasattr(self.output_field, "select_format"):
return self.output_field.select_format(compiler, sql, params)
return sql, params
@deconstructible
class Expression(BaseExpression, Combinable):
"""An expression that can be combined with other expressions."""
@cached_property
def identity(self):
constructor_signature = inspect.signature(self.__init__)
args, kwargs = self._constructor_args
signature = constructor_signature.bind_partial(*args, **kwargs)
signature.apply_defaults()
arguments = signature.arguments.items()
identity = [self.__class__]
for arg, value in arguments:
if isinstance(value, fields.Field):
if value.name and value.model:
value = (value.model._meta.label, value.name)
else:
value = type(value)
else:
value = make_hashable(value)
identity.append((arg, value))
return tuple(identity)
def __eq__(self, other):
if not isinstance(other, Expression):
return NotImplemented
return other.identity == self.identity
def __hash__(self):
return hash(self.identity)
# Type inference for CombinedExpression.output_field.
# Missing items will result in FieldError, by design.
#
# The current approach for NULL is based on lowest common denominator behavior
# i.e. if one of the supported databases is raising an error (rather than
# return NULL) for `val <op> NULL`, then Django raises FieldError.
NoneType = type(None)
_connector_combinations = [
# Numeric operations - operands of same type.
{
connector: [
(fields.IntegerField, fields.IntegerField, fields.IntegerField),
(fields.FloatField, fields.FloatField, fields.FloatField),
(fields.DecimalField, fields.DecimalField, fields.DecimalField),
]
for connector in (
Combinable.ADD,
Combinable.SUB,
Combinable.MUL,
# Behavior for DIV with integer arguments follows Postgres/SQLite,
# not MySQL/Oracle.
Combinable.DIV,
Combinable.MOD,
Combinable.POW,
)
},
# Numeric operations - operands of different type.
{
connector: [
(fields.IntegerField, fields.DecimalField, fields.DecimalField),
(fields.DecimalField, fields.IntegerField, fields.DecimalField),
(fields.IntegerField, fields.FloatField, fields.FloatField),
(fields.FloatField, fields.IntegerField, fields.FloatField),
]
for connector in (
Combinable.ADD,
Combinable.SUB,
Combinable.MUL,
Combinable.DIV,
Combinable.MOD,
)
},
# Bitwise operators.
{
connector: [
(fields.IntegerField, fields.IntegerField, fields.IntegerField),
]
for connector in (
Combinable.BITAND,
Combinable.BITOR,
Combinable.BITLEFTSHIFT,
Combinable.BITRIGHTSHIFT,
Combinable.BITXOR,
)
},
# Numeric with NULL.
{
connector: [
(field_type, NoneType, field_type),
(NoneType, field_type, field_type),
]
for connector in (
Combinable.ADD,
Combinable.SUB,
Combinable.MUL,
Combinable.DIV,
Combinable.MOD,
Combinable.POW,
)
for field_type in (fields.IntegerField, fields.DecimalField, fields.FloatField)
},
# Date/DateTimeField/DurationField/TimeField.
{
Combinable.ADD: [
# Date/DateTimeField.
(fields.DateField, fields.DurationField, fields.DateTimeField),
(fields.DateTimeField, fields.DurationField, fields.DateTimeField),
(fields.DurationField, fields.DateField, fields.DateTimeField),
(fields.DurationField, fields.DateTimeField, fields.DateTimeField),
# DurationField.
(fields.DurationField, fields.DurationField, fields.DurationField),
# TimeField.
(fields.TimeField, fields.DurationField, fields.TimeField),
(fields.DurationField, fields.TimeField, fields.TimeField),
],
},
{
Combinable.SUB: [
# Date/DateTimeField.
(fields.DateField, fields.DurationField, fields.DateTimeField),
(fields.DateTimeField, fields.DurationField, fields.DateTimeField),
(fields.DateField, fields.DateField, fields.DurationField),
(fields.DateField, fields.DateTimeField, fields.DurationField),
(fields.DateTimeField, fields.DateField, fields.DurationField),
(fields.DateTimeField, fields.DateTimeField, fields.DurationField),
# DurationField.
(fields.DurationField, fields.DurationField, fields.DurationField),
# TimeField.
(fields.TimeField, fields.DurationField, fields.TimeField),
(fields.TimeField, fields.TimeField, fields.DurationField),
],
},
]
_connector_combinators = defaultdict(list)
def register_combinable_fields(lhs, connector, rhs, result):
"""
Register combinable types:
lhs <connector> rhs -> result
e.g.
register_combinable_fields(
IntegerField, Combinable.ADD, FloatField, FloatField
)
"""
_connector_combinators[connector].append((lhs, rhs, result))
for d in _connector_combinations:
for connector, field_types in d.items():
for lhs, rhs, result in field_types:
register_combinable_fields(lhs, connector, rhs, result)
@functools.lru_cache(maxsize=128)
def _resolve_combined_type(connector, lhs_type, rhs_type):
combinators = _connector_combinators.get(connector, ())
for combinator_lhs_type, combinator_rhs_type, combined_type in combinators:
if issubclass(lhs_type, combinator_lhs_type) and issubclass(
rhs_type, combinator_rhs_type
):
return combined_type
class CombinedExpression(SQLiteNumericMixin, Expression):
def __init__(self, lhs, connector, rhs, output_field=None):
super().__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self)
def __str__(self):
return "{} {} {}".format(self.lhs, self.connector, self.rhs)
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def _resolve_output_field(self):
# We avoid using super() here for reasons given in
# Expression._resolve_output_field()
combined_type = _resolve_combined_type(
self.connector,
type(self.lhs._output_field_or_none),
type(self.rhs._output_field_or_none),
)
if combined_type is None:
raise FieldError(
f"Cannot infer type of {self.connector!r} expression involving these "
f"types: {self.lhs.output_field.__class__.__name__}, "
f"{self.rhs.output_field.__class__.__name__}. You must set "
f"output_field."
)
return combined_type()
def as_sql(self, compiler, connection):
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = "(%s)"
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
lhs = self.lhs.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
rhs = self.rhs.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
if not isinstance(self, (DurationExpression, TemporalSubtraction)):
try:
lhs_type = lhs.output_field.get_internal_type()
except (AttributeError, FieldError):
lhs_type = None
try:
rhs_type = rhs.output_field.get_internal_type()
except (AttributeError, FieldError):
rhs_type = None
if "DurationField" in {lhs_type, rhs_type} and lhs_type != rhs_type:
return DurationExpression(
self.lhs, self.connector, self.rhs
).resolve_expression(
query,
allow_joins,
reuse,
summarize,
for_save,
)
datetime_fields = {"DateField", "DateTimeField", "TimeField"}
if (
self.connector == self.SUB
and lhs_type in datetime_fields
and lhs_type == rhs_type
):
return TemporalSubtraction(self.lhs, self.rhs).resolve_expression(
query,
allow_joins,
reuse,
summarize,
for_save,
)
c = self.copy()
c.is_summary = summarize
c.lhs = lhs
c.rhs = rhs
return c
class DurationExpression(CombinedExpression):
def compile(self, side, compiler, connection):
try:
output = side.output_field
except FieldError:
pass
else:
if output.get_internal_type() == "DurationField":
sql, params = compiler.compile(side)
return connection.ops.format_for_duration_arithmetic(sql), params
return compiler.compile(side)
def as_sql(self, compiler, connection):
if connection.features.has_native_duration_field:
return super().as_sql(compiler, connection)
connection.ops.check_expression_support(self)
expressions = []
expression_params = []
sql, params = self.compile(self.lhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
sql, params = self.compile(self.rhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = "(%s)"
sql = connection.ops.combine_duration_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def as_sqlite(self, compiler, connection, **extra_context):
sql, params = self.as_sql(compiler, connection, **extra_context)
if self.connector in {Combinable.MUL, Combinable.DIV}:
try:
lhs_type = self.lhs.output_field.get_internal_type()
rhs_type = self.rhs.output_field.get_internal_type()
except (AttributeError, FieldError):
pass
else:
allowed_fields = {
"DecimalField",
"DurationField",
"FloatField",
"IntegerField",
}
if lhs_type not in allowed_fields or rhs_type not in allowed_fields:
raise DatabaseError(
f"Invalid arguments for operator {self.connector}."
)
return sql, params
class TemporalSubtraction(CombinedExpression):
output_field = fields.DurationField()
def __init__(self, lhs, rhs):
super().__init__(lhs, self.SUB, rhs)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
lhs = compiler.compile(self.lhs)
rhs = compiler.compile(self.rhs)
return connection.ops.subtract_temporals(
self.lhs.output_field.get_internal_type(), lhs, rhs
)
@deconstructible(path="django.db.models.F")
class F(Combinable):
"""An object capable of resolving references to existing query objects."""
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.name)
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
return query.resolve_ref(self.name, allow_joins, reuse, summarize)
def replace_expressions(self, replacements):
return replacements.get(self, self)
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.name == other.name
def __hash__(self):
return hash(self.name)
def copy(self):
return copy.copy(self)
class ResolvedOuterRef(F):
"""
An object that contains a reference to an outer query.
In this case, the reference to the outer query has been resolved because
the inner query has been used as a subquery.
"""
contains_aggregate = False
contains_over_clause = False
def as_sql(self, *args, **kwargs):
raise ValueError(
"This queryset contains a reference to an outer query and may "
"only be used in a subquery."
)
def resolve_expression(self, *args, **kwargs):
col = super().resolve_expression(*args, **kwargs)
# FIXME: Rename possibly_multivalued to multivalued and fix detection
# for non-multivalued JOINs (e.g. foreign key fields). This should take
# into account only many-to-many and one-to-many relationships.
col.possibly_multivalued = LOOKUP_SEP in self.name
return col
def relabeled_clone(self, relabels):
return self
def get_group_by_cols(self):
return []
class OuterRef(F):
contains_aggregate = False
def resolve_expression(self, *args, **kwargs):
if isinstance(self.name, self.__class__):
return self.name
return ResolvedOuterRef(self.name)
def relabeled_clone(self, relabels):
return self
@deconstructible(path="django.db.models.Func")
class Func(SQLiteNumericMixin, Expression):
"""An SQL function call."""
function = None
template = "%(function)s(%(expressions)s)"
arg_joiner = ", "
arity = None # The number of arguments the function accepts.
def __init__(self, *expressions, output_field=None, **extra):
if self.arity is not None and len(expressions) != self.arity:
raise TypeError(
"'%s' takes exactly %s %s (%s given)"
% (
self.__class__.__name__,
self.arity,
"argument" if self.arity == 1 else "arguments",
len(expressions),
)
)
super().__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def __repr__(self):
args = self.arg_joiner.join(str(arg) for arg in self.source_expressions)
extra = {**self.extra, **self._get_repr_options()}
if extra:
extra = ", ".join(
str(key) + "=" + str(val) for key, val in sorted(extra.items())
)
return "{}({}, {})".format(self.__class__.__name__, args, extra)
return "{}({})".format(self.__class__.__name__, args)
def _get_repr_options(self):
"""Return a dict of extra __init__() options to include in the repr."""
return {}
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
for pos, arg in enumerate(c.source_expressions):
c.source_expressions[pos] = arg.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def as_sql(
self,
compiler,
connection,
function=None,
template=None,
arg_joiner=None,
**extra_context,
):
connection.ops.check_expression_support(self)
sql_parts = []
params = []
for arg in self.source_expressions:
try:
arg_sql, arg_params = compiler.compile(arg)
except EmptyResultSet:
empty_result_set_value = getattr(
arg, "empty_result_set_value", NotImplemented
)
if empty_result_set_value is NotImplemented:
raise
arg_sql, arg_params = compiler.compile(Value(empty_result_set_value))
except FullResultSet:
arg_sql, arg_params = compiler.compile(Value(True))
sql_parts.append(arg_sql)
params.extend(arg_params)
data = {**self.extra, **extra_context}
# Use the first supplied value in this order: the parameter to this
# method, a value supplied in __init__()'s **extra (the value in
# `data`), or the value defined on the class.
if function is not None:
data["function"] = function
else:
data.setdefault("function", self.function)
template = template or data.get("template", self.template)
arg_joiner = arg_joiner or data.get("arg_joiner", self.arg_joiner)
data["expressions"] = data["field"] = arg_joiner.join(sql_parts)
return template % data, params
def copy(self):
copy = super().copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
@deconstructible(path="django.db.models.Value")
class Value(SQLiteNumericMixin, Expression):
"""Represent a wrapped value as a node within an expression."""
# Provide a default value for `for_save` in order to allow unresolved
# instances to be compiled until a decision is taken in #25425.
for_save = False
def __init__(self, value, output_field=None):
"""
Arguments:
* value: the value this expression represents. The value will be
added into the sql parameter list and properly quoted.
* output_field: an instance of the model field type that this
expression will return, such as IntegerField() or CharField().
"""
super().__init__(output_field=output_field)
self.value = value
def __repr__(self):
return f"{self.__class__.__name__}({self.value!r})"
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
val = self.value
output_field = self._output_field_or_none
if output_field is not None:
if self.for_save:
val = output_field.get_db_prep_save(val, connection=connection)
else:
val = output_field.get_db_prep_value(val, connection=connection)
if hasattr(output_field, "get_placeholder"):
return output_field.get_placeholder(val, compiler, connection), [val]
if val is None:
# cx_Oracle does not always convert None to the appropriate
# NULL type (like in case expressions using numbers), so we
# use a literal SQL NULL
return "NULL", []
return "%s", [val]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = super().resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.for_save = for_save
return c
def get_group_by_cols(self):
return []
def _resolve_output_field(self):
if isinstance(self.value, str):
return fields.CharField()
if isinstance(self.value, bool):
return fields.BooleanField()
if isinstance(self.value, int):
return fields.IntegerField()
if isinstance(self.value, float):
return fields.FloatField()
if isinstance(self.value, datetime.datetime):
return fields.DateTimeField()
if isinstance(self.value, datetime.date):
return fields.DateField()
if isinstance(self.value, datetime.time):
return fields.TimeField()
if isinstance(self.value, datetime.timedelta):
return fields.DurationField()
if isinstance(self.value, Decimal):
return fields.DecimalField()
if isinstance(self.value, bytes):
return fields.BinaryField()
if isinstance(self.value, UUID):
return fields.UUIDField()
@property
def empty_result_set_value(self):
return self.value
class RawSQL(Expression):
def __init__(self, sql, params, output_field=None):
if output_field is None:
output_field = fields.Field()
self.sql, self.params = sql, params
super().__init__(output_field=output_field)
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params)
def as_sql(self, compiler, connection):
return "(%s)" % self.sql, self.params
def get_group_by_cols(self):
return [self]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
# Resolve parents fields used in raw SQL.
if query.model:
for parent in query.model._meta.get_parent_list():
for parent_field in parent._meta.local_fields:
_, column_name = parent_field.get_attname_column()
if column_name.lower() in self.sql.lower():
query.resolve_ref(
parent_field.name, allow_joins, reuse, summarize
)
break
return super().resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
class Star(Expression):
def __repr__(self):
return "'*'"
def as_sql(self, compiler, connection):
return "*", []
class Col(Expression):
contains_column_references = True
possibly_multivalued = False
def __init__(self, alias, target, output_field=None):
if output_field is None:
output_field = target
super().__init__(output_field=output_field)
self.alias, self.target = alias, target
def __repr__(self):
alias, target = self.alias, self.target
identifiers = (alias, str(target)) if alias else (str(target),)
return "{}({})".format(self.__class__.__name__, ", ".join(identifiers))
def as_sql(self, compiler, connection):
alias, column = self.alias, self.target.column
identifiers = (alias, column) if alias else (column,)
sql = ".".join(map(compiler.quote_name_unless_alias, identifiers))
return sql, []
def relabeled_clone(self, relabels):
if self.alias is None:
return self
return self.__class__(
relabels.get(self.alias, self.alias), self.target, self.output_field
)
def get_group_by_cols(self):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return self.output_field.get_db_converters(
connection
) + self.target.get_db_converters(connection)
class Ref(Expression):
"""
Reference to column alias of the query. For example, Ref('sum_cost') in
qs.annotate(sum_cost=Sum('cost')) query.
"""
def __init__(self, refs, source):
super().__init__()
self.refs, self.source = refs, source
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source)
def get_source_expressions(self):
return [self.source]
def set_source_expressions(self, exprs):
(self.source,) = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
# The sub-expression `source` has already been resolved, as this is
# just a reference to the name of `source`.
return self
def get_refs(self):
return {self.refs}
def relabeled_clone(self, relabels):
return self
def as_sql(self, compiler, connection):
return connection.ops.quote_name(self.refs), []
def get_group_by_cols(self):
return [self]
class ExpressionList(Func):
"""
An expression containing multiple expressions. Can be used to provide a
list of expressions as an argument to another expression, like a partition
clause.
"""
template = "%(expressions)s"
def __init__(self, *expressions, **extra):
if not expressions:
raise ValueError(
"%s requires at least one expression." % self.__class__.__name__
)
super().__init__(*expressions, **extra)
def __str__(self):
return self.arg_joiner.join(str(arg) for arg in self.source_expressions)
def as_sqlite(self, compiler, connection, **extra_context):
# Casting to numeric is unnecessary.
return self.as_sql(compiler, connection, **extra_context)
class OrderByList(Func):
template = "ORDER BY %(expressions)s"
def __init__(self, *expressions, **extra):
expressions = (
(
OrderBy(F(expr[1:]), descending=True)
if isinstance(expr, str) and expr[0] == "-"
else expr
)
for expr in expressions
)
super().__init__(*expressions, **extra)
def as_sql(self, *args, **kwargs):
if not self.source_expressions:
return "", ()
return super().as_sql(*args, **kwargs)
def get_group_by_cols(self):
group_by_cols = []
for order_by in self.get_source_expressions():
group_by_cols.extend(order_by.get_group_by_cols())
return group_by_cols
@deconstructible(path="django.db.models.ExpressionWrapper")
class ExpressionWrapper(SQLiteNumericMixin, Expression):
"""
An expression that can wrap another expression so that it can provide
extra context to the inner expression, such as the output_field.
"""
def __init__(self, expression, output_field):
super().__init__(output_field=output_field)
self.expression = expression
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def get_group_by_cols(self):
if isinstance(self.expression, Expression):
expression = self.expression.copy()
expression.output_field = self.output_field
return expression.get_group_by_cols()
# For non-expressions e.g. an SQL WHERE clause, the entire
# `expression` must be included in the GROUP BY clause.
return super().get_group_by_cols()
def as_sql(self, compiler, connection):
return compiler.compile(self.expression)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.expression)
class NegatedExpression(ExpressionWrapper):
"""The logical negation of a conditional expression."""
def __init__(self, expression):
super().__init__(expression, output_field=fields.BooleanField())
def __invert__(self):
return self.expression.copy()
def as_sql(self, compiler, connection):
try:
sql, params = super().as_sql(compiler, connection)
except EmptyResultSet:
features = compiler.connection.features
if not features.supports_boolean_expr_in_select_clause:
return "1=1", ()
return compiler.compile(Value(True))
ops = compiler.connection.ops
# Some database backends (e.g. Oracle) don't allow EXISTS() and filters
# to be compared to another expression unless they're wrapped in a CASE
# WHEN.
if not ops.conditional_expression_supported_in_where_clause(self.expression):
return f"CASE WHEN {sql} = 0 THEN 1 ELSE 0 END", params
return f"NOT {sql}", params
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
resolved = super().resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
if not getattr(resolved.expression, "conditional", False):
raise TypeError("Cannot negate non-conditional expressions.")
return resolved
def select_format(self, compiler, sql, params):
# Wrap boolean expressions with a CASE WHEN expression if a database
# backend (e.g. Oracle) doesn't support boolean expression in SELECT or
# GROUP BY list.
expression_supported_in_where_clause = (
compiler.connection.ops.conditional_expression_supported_in_where_clause
)
if (
not compiler.connection.features.supports_boolean_expr_in_select_clause
# Avoid double wrapping.
and expression_supported_in_where_clause(self.expression)
):
sql = "CASE WHEN {} THEN 1 ELSE 0 END".format(sql)
return sql, params
@deconstructible(path="django.db.models.When")
class When(Expression):
template = "WHEN %(condition)s THEN %(result)s"
# This isn't a complete conditional expression, must be used in Case().
conditional = False
def __init__(self, condition=None, then=None, **lookups):
if lookups:
if condition is None:
condition, lookups = Q(**lookups), None
elif getattr(condition, "conditional", False):
condition, lookups = Q(condition, **lookups), None
if condition is None or not getattr(condition, "conditional", False) or lookups:
raise TypeError(
"When() supports a Q object, a boolean expression, or lookups "
"as a condition."
)
if isinstance(condition, Q) and not condition:
raise ValueError("An empty Q() can't be used as a When() condition.")
super().__init__(output_field=None)
self.condition = condition
self.result = self._parse_expressions(then)[0]
def __str__(self):
return "WHEN %r THEN %r" % (self.condition, self.result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return [self.condition, self.result]
def set_source_expressions(self, exprs):
self.condition, self.result = exprs
def get_source_fields(self):
# We're only interested in the fields of the result expressions.
return [self.result._output_field_or_none]
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
if hasattr(c.condition, "resolve_expression"):
c.condition = c.condition.resolve_expression(
query, allow_joins, reuse, summarize, False
)
c.result = c.result.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = extra_context
sql_params = []
condition_sql, condition_params = compiler.compile(self.condition)
template_params["condition"] = condition_sql
result_sql, result_params = compiler.compile(self.result)
template_params["result"] = result_sql
template = template or self.template
return template % template_params, (
*sql_params,
*condition_params,
*result_params,
)
def get_group_by_cols(self):
# This is not a complete expression and cannot be used in GROUP BY.
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
@deconstructible(path="django.db.models.Case")
class Case(SQLiteNumericMixin, Expression):
"""
An SQL searched CASE expression:
CASE
WHEN n > 0
THEN 'positive'
WHEN n < 0
THEN 'negative'
ELSE 'zero'
END
"""
template = "CASE %(cases)s ELSE %(default)s END"
case_joiner = " "
def __init__(self, *cases, default=None, output_field=None, **extra):
if not all(isinstance(case, When) for case in cases):
raise TypeError("Positional arguments must all be When objects.")
super().__init__(output_field)
self.cases = list(cases)
self.default = self._parse_expressions(default)[0]
self.extra = extra
def __str__(self):
return "CASE %s, ELSE %r" % (
", ".join(str(c) for c in self.cases),
self.default,
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return self.cases + [self.default]
def set_source_expressions(self, exprs):
*self.cases, self.default = exprs
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
for pos, case in enumerate(c.cases):
c.cases[pos] = case.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
c.default = c.default.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def copy(self):
c = super().copy()
c.cases = c.cases[:]
return c
def as_sql(
self, compiler, connection, template=None, case_joiner=None, **extra_context
):
connection.ops.check_expression_support(self)
if not self.cases:
return compiler.compile(self.default)
template_params = {**self.extra, **extra_context}
case_parts = []
sql_params = []
default_sql, default_params = compiler.compile(self.default)
for case in self.cases:
try:
case_sql, case_params = compiler.compile(case)
except EmptyResultSet:
continue
except FullResultSet:
default_sql, default_params = compiler.compile(case.result)
break
case_parts.append(case_sql)
sql_params.extend(case_params)
if not case_parts:
return default_sql, default_params
case_joiner = case_joiner or self.case_joiner
template_params["cases"] = case_joiner.join(case_parts)
template_params["default"] = default_sql
sql_params.extend(default_params)
template = template or template_params.get("template", self.template)
sql = template % template_params
if self._output_field_or_none is not None:
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
def get_group_by_cols(self):
if not self.cases:
return self.default.get_group_by_cols()
return super().get_group_by_cols()
class Subquery(BaseExpression, Combinable):
"""
An explicit subquery. It may contain OuterRef() references to the outer
query which will be resolved when it is applied to that query.
"""
template = "(%(subquery)s)"
contains_aggregate = False
empty_result_set_value = None
def __init__(self, queryset, output_field=None, **extra):
# Allow the usage of both QuerySet and sql.Query objects.
self.query = getattr(queryset, "query", queryset).clone()
self.query.subquery = True
self.extra = extra
super().__init__(output_field)
def get_source_expressions(self):
return [self.query]
def set_source_expressions(self, exprs):
self.query = exprs[0]
def _resolve_output_field(self):
return self.query.output_field
def copy(self):
clone = super().copy()
clone.query = clone.query.clone()
return clone
@property
def external_aliases(self):
return self.query.external_aliases
def get_external_cols(self):
return self.query.get_external_cols()
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = {**self.extra, **extra_context}
subquery_sql, sql_params = self.query.as_sql(compiler, connection)
template_params["subquery"] = subquery_sql[1:-1]
template = template or template_params.get("template", self.template)
sql = template % template_params
return sql, sql_params
def get_group_by_cols(self):
return self.query.get_group_by_cols(wrapper=self)
class Exists(Subquery):
template = "EXISTS(%(subquery)s)"
output_field = fields.BooleanField()
def __init__(self, queryset, **kwargs):
super().__init__(queryset, **kwargs)
self.query = self.query.exists()
def select_format(self, compiler, sql, params):
# Wrap EXISTS() with a CASE WHEN expression if a database backend
# (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP
# BY list.
if not compiler.connection.features.supports_boolean_expr_in_select_clause:
sql = "CASE WHEN {} THEN 1 ELSE 0 END".format(sql)
return sql, params
@deconstructible(path="django.db.models.OrderBy")
class OrderBy(Expression):
template = "%(expression)s %(ordering)s"
conditional = False
def __init__(self, expression, descending=False, nulls_first=None, nulls_last=None):
if nulls_first and nulls_last:
raise ValueError("nulls_first and nulls_last are mutually exclusive")
if nulls_first is False or nulls_last is False:
# When the deprecation ends, replace with:
# raise ValueError(
# "nulls_first and nulls_last values must be True or None."
# )
warnings.warn(
"Passing nulls_first=False or nulls_last=False is deprecated, use None "
"instead.",
RemovedInDjango50Warning,
stacklevel=2,
)
self.nulls_first = nulls_first
self.nulls_last = nulls_last
self.descending = descending
if not hasattr(expression, "resolve_expression"):
raise ValueError("expression must be an expression type")
self.expression = expression
def __repr__(self):
return "{}({}, descending={})".format(
self.__class__.__name__, self.expression, self.descending
)
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection, template=None, **extra_context):
template = template or self.template
if connection.features.supports_order_by_nulls_modifier:
if self.nulls_last:
template = "%s NULLS LAST" % template
elif self.nulls_first:
template = "%s NULLS FIRST" % template
else:
if self.nulls_last and not (
self.descending and connection.features.order_by_nulls_first
):
template = "%%(expression)s IS NULL, %s" % template
elif self.nulls_first and not (
not self.descending and connection.features.order_by_nulls_first
):
template = "%%(expression)s IS NOT NULL, %s" % template
connection.ops.check_expression_support(self)
expression_sql, params = compiler.compile(self.expression)
placeholders = {
"expression": expression_sql,
"ordering": "DESC" if self.descending else "ASC",
**extra_context,
}
params *= template.count("%(expression)s")
return (template % placeholders).rstrip(), params
def as_oracle(self, compiler, connection):
# Oracle doesn't allow ORDER BY EXISTS() or filters unless it's wrapped
# in a CASE WHEN.
if connection.ops.conditional_expression_supported_in_where_clause(
self.expression
):
copy = self.copy()
copy.expression = Case(
When(self.expression, then=True),
default=False,
)
return copy.as_sql(compiler, connection)
return self.as_sql(compiler, connection)
def get_group_by_cols(self):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def reverse_ordering(self):
self.descending = not self.descending
if self.nulls_first:
self.nulls_last = True
self.nulls_first = None
elif self.nulls_last:
self.nulls_first = True
self.nulls_last = None
return self
def asc(self):
self.descending = False
def desc(self):
self.descending = True
class Window(SQLiteNumericMixin, Expression):
template = "%(expression)s OVER (%(window)s)"
# Although the main expression may either be an aggregate or an
# expression with an aggregate function, the GROUP BY that will
# be introduced in the query as a result is not desired.
contains_aggregate = False
contains_over_clause = True
def __init__(
self,
expression,
partition_by=None,
order_by=None,
frame=None,
output_field=None,
):
self.partition_by = partition_by
self.order_by = order_by
self.frame = frame
if not getattr(expression, "window_compatible", False):
raise ValueError(
"Expression '%s' isn't compatible with OVER clauses."
% expression.__class__.__name__
)
if self.partition_by is not None:
if not isinstance(self.partition_by, (tuple, list)):
self.partition_by = (self.partition_by,)
self.partition_by = ExpressionList(*self.partition_by)
if self.order_by is not None:
if isinstance(self.order_by, (list, tuple)):
self.order_by = OrderByList(*self.order_by)
elif isinstance(self.order_by, (BaseExpression, str)):
self.order_by = OrderByList(self.order_by)
else:
raise ValueError(
"Window.order_by must be either a string reference to a "
"field, an expression, or a list or tuple of them."
)
super().__init__(output_field=output_field)
self.source_expression = self._parse_expressions(expression)[0]
def _resolve_output_field(self):
return self.source_expression.output_field
def get_source_expressions(self):
return [self.source_expression, self.partition_by, self.order_by, self.frame]
def set_source_expressions(self, exprs):
self.source_expression, self.partition_by, self.order_by, self.frame = exprs
def as_sql(self, compiler, connection, template=None):
connection.ops.check_expression_support(self)
if not connection.features.supports_over_clause:
raise NotSupportedError("This backend does not support window expressions.")
expr_sql, params = compiler.compile(self.source_expression)
window_sql, window_params = [], ()
if self.partition_by is not None:
sql_expr, sql_params = self.partition_by.as_sql(
compiler=compiler,
connection=connection,
template="PARTITION BY %(expressions)s",
)
window_sql.append(sql_expr)
window_params += tuple(sql_params)
if self.order_by is not None:
order_sql, order_params = compiler.compile(self.order_by)
window_sql.append(order_sql)
window_params += tuple(order_params)
if self.frame:
frame_sql, frame_params = compiler.compile(self.frame)
window_sql.append(frame_sql)
window_params += tuple(frame_params)
template = template or self.template
return (
template % {"expression": expr_sql, "window": " ".join(window_sql).strip()},
(*params, *window_params),
)
def as_sqlite(self, compiler, connection):
if isinstance(self.output_field, fields.DecimalField):
# Casting to numeric must be outside of the window expression.
copy = self.copy()
source_expressions = copy.get_source_expressions()
source_expressions[0].output_field = fields.FloatField()
copy.set_source_expressions(source_expressions)
return super(Window, copy).as_sqlite(compiler, connection)
return self.as_sql(compiler, connection)
def __str__(self):
return "{} OVER ({}{}{})".format(
str(self.source_expression),
"PARTITION BY " + str(self.partition_by) if self.partition_by else "",
str(self.order_by or ""),
str(self.frame or ""),
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_group_by_cols(self):
group_by_cols = []
if self.partition_by:
group_by_cols.extend(self.partition_by.get_group_by_cols())
if self.order_by is not None:
group_by_cols.extend(self.order_by.get_group_by_cols())
return group_by_cols
class WindowFrame(Expression):
"""
Model the frame clause in window expressions. There are two types of frame
clauses which are subclasses, however, all processing and validation (by no
means intended to be complete) is done here. Thus, providing an end for a
frame is optional (the default is UNBOUNDED FOLLOWING, which is the last
row in the frame).
"""
template = "%(frame_type)s BETWEEN %(start)s AND %(end)s"
def __init__(self, start=None, end=None):
self.start = Value(start)
self.end = Value(end)
def set_source_expressions(self, exprs):
self.start, self.end = exprs
def get_source_expressions(self):
return [self.start, self.end]
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
start, end = self.window_frame_start_end(
connection, self.start.value, self.end.value
)
return (
self.template
% {
"frame_type": self.frame_type,
"start": start,
"end": end,
},
[],
)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_group_by_cols(self):
return []
def __str__(self):
if self.start.value is not None and self.start.value < 0:
start = "%d %s" % (abs(self.start.value), connection.ops.PRECEDING)
elif self.start.value is not None and self.start.value == 0:
start = connection.ops.CURRENT_ROW
else:
start = connection.ops.UNBOUNDED_PRECEDING
if self.end.value is not None and self.end.value > 0:
end = "%d %s" % (self.end.value, connection.ops.FOLLOWING)
elif self.end.value is not None and self.end.value == 0:
end = connection.ops.CURRENT_ROW
else:
end = connection.ops.UNBOUNDED_FOLLOWING
return self.template % {
"frame_type": self.frame_type,
"start": start,
"end": end,
}
def window_frame_start_end(self, connection, start, end):
raise NotImplementedError("Subclasses must implement window_frame_start_end().")
class RowRange(WindowFrame):
frame_type = "ROWS"
def window_frame_start_end(self, connection, start, end):
return connection.ops.window_frame_rows_start_end(start, end)
class ValueRange(WindowFrame):
frame_type = "RANGE"
def window_frame_start_end(self, connection, start, end):
return connection.ops.window_frame_range_start_end(start, end)
|
f698a574b250a894b5536b94e1a3a228fee176881665632cf7e324fcef01d46d | import itertools
import math
from django.core.exceptions import EmptyResultSet
from django.db.models.expressions import Case, Expression, Func, Value, When
from django.db.models.fields import (
BooleanField,
CharField,
DateTimeField,
Field,
IntegerField,
UUIDField,
)
from django.db.models.query_utils import RegisterLookupMixin
from django.utils.datastructures import OrderedSet
from django.utils.functional import cached_property
from django.utils.hashable import make_hashable
class Lookup(Expression):
lookup_name = None
prepare_rhs = True
can_use_none_as_rhs = False
def __init__(self, lhs, rhs):
self.lhs, self.rhs = lhs, rhs
self.rhs = self.get_prep_lookup()
self.lhs = self.get_prep_lhs()
if hasattr(self.lhs, "get_bilateral_transforms"):
bilateral_transforms = self.lhs.get_bilateral_transforms()
else:
bilateral_transforms = []
if bilateral_transforms:
# Warn the user as soon as possible if they are trying to apply
# a bilateral transformation on a nested QuerySet: that won't work.
from django.db.models.sql.query import Query # avoid circular import
if isinstance(rhs, Query):
raise NotImplementedError(
"Bilateral transformations on nested querysets are not implemented."
)
self.bilateral_transforms = bilateral_transforms
def apply_bilateral_transforms(self, value):
for transform in self.bilateral_transforms:
value = transform(value)
return value
def __repr__(self):
return f"{self.__class__.__name__}({self.lhs!r}, {self.rhs!r})"
def batch_process_rhs(self, compiler, connection, rhs=None):
if rhs is None:
rhs = self.rhs
if self.bilateral_transforms:
sqls, sqls_params = [], []
for p in rhs:
value = Value(p, output_field=self.lhs.output_field)
value = self.apply_bilateral_transforms(value)
value = value.resolve_expression(compiler.query)
sql, sql_params = compiler.compile(value)
sqls.append(sql)
sqls_params.extend(sql_params)
else:
_, params = self.get_db_prep_lookup(rhs, connection)
sqls, sqls_params = ["%s"] * len(params), params
return sqls, sqls_params
def get_source_expressions(self):
if self.rhs_is_direct_value():
return [self.lhs]
return [self.lhs, self.rhs]
def set_source_expressions(self, new_exprs):
if len(new_exprs) == 1:
self.lhs = new_exprs[0]
else:
self.lhs, self.rhs = new_exprs
def get_prep_lookup(self):
if not self.prepare_rhs or hasattr(self.rhs, "resolve_expression"):
return self.rhs
if hasattr(self.lhs, "output_field"):
if hasattr(self.lhs.output_field, "get_prep_value"):
return self.lhs.output_field.get_prep_value(self.rhs)
elif self.rhs_is_direct_value():
return Value(self.rhs)
return self.rhs
def get_prep_lhs(self):
if hasattr(self.lhs, "resolve_expression"):
return self.lhs
return Value(self.lhs)
def get_db_prep_lookup(self, value, connection):
return ("%s", [value])
def process_lhs(self, compiler, connection, lhs=None):
lhs = lhs or self.lhs
if hasattr(lhs, "resolve_expression"):
lhs = lhs.resolve_expression(compiler.query)
sql, params = compiler.compile(lhs)
if isinstance(lhs, Lookup):
# Wrapped in parentheses to respect operator precedence.
sql = f"({sql})"
return sql, params
def process_rhs(self, compiler, connection):
value = self.rhs
if self.bilateral_transforms:
if self.rhs_is_direct_value():
# Do not call get_db_prep_lookup here as the value will be
# transformed before being used for lookup
value = Value(value, output_field=self.lhs.output_field)
value = self.apply_bilateral_transforms(value)
value = value.resolve_expression(compiler.query)
if hasattr(value, "as_sql"):
sql, params = compiler.compile(value)
# Ensure expression is wrapped in parentheses to respect operator
# precedence but avoid double wrapping as it can be misinterpreted
# on some backends (e.g. subqueries on SQLite).
if sql and sql[0] != "(":
sql = "(%s)" % sql
return sql, params
else:
return self.get_db_prep_lookup(value, connection)
def rhs_is_direct_value(self):
return not hasattr(self.rhs, "as_sql")
def get_group_by_cols(self):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def as_oracle(self, compiler, connection):
# Oracle doesn't allow EXISTS() and filters to be compared to another
# expression unless they're wrapped in a CASE WHEN.
wrapped = False
exprs = []
for expr in (self.lhs, self.rhs):
if connection.ops.conditional_expression_supported_in_where_clause(expr):
expr = Case(When(expr, then=True), default=False)
wrapped = True
exprs.append(expr)
lookup = type(self)(*exprs) if wrapped else self
return lookup.as_sql(compiler, connection)
@cached_property
def output_field(self):
return BooleanField()
@property
def identity(self):
return self.__class__, self.lhs, self.rhs
def __eq__(self, other):
if not isinstance(other, Lookup):
return NotImplemented
return self.identity == other.identity
def __hash__(self):
return hash(make_hashable(self.identity))
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
c = self.copy()
c.is_summary = summarize
c.lhs = self.lhs.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
if hasattr(self.rhs, "resolve_expression"):
c.rhs = self.rhs.resolve_expression(
query, allow_joins, reuse, summarize, for_save
)
return c
def select_format(self, compiler, sql, params):
# Wrap filters with a CASE WHEN expression if a database backend
# (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP
# BY list.
if not compiler.connection.features.supports_boolean_expr_in_select_clause:
sql = f"CASE WHEN {sql} THEN 1 ELSE 0 END"
return sql, params
class Transform(RegisterLookupMixin, Func):
"""
RegisterLookupMixin() is first so that get_lookup() and get_transform()
first examine self and then check output_field.
"""
bilateral = False
arity = 1
@property
def lhs(self):
return self.get_source_expressions()[0]
def get_bilateral_transforms(self):
if hasattr(self.lhs, "get_bilateral_transforms"):
bilateral_transforms = self.lhs.get_bilateral_transforms()
else:
bilateral_transforms = []
if self.bilateral:
bilateral_transforms.append(self.__class__)
return bilateral_transforms
class BuiltinLookup(Lookup):
def process_lhs(self, compiler, connection, lhs=None):
lhs_sql, params = super().process_lhs(compiler, connection, lhs)
field_internal_type = self.lhs.output_field.get_internal_type()
db_type = self.lhs.output_field.db_type(connection=connection)
lhs_sql = connection.ops.field_cast_sql(db_type, field_internal_type) % lhs_sql
lhs_sql = (
connection.ops.lookup_cast(self.lookup_name, field_internal_type) % lhs_sql
)
return lhs_sql, list(params)
def as_sql(self, compiler, connection):
lhs_sql, params = self.process_lhs(compiler, connection)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
params.extend(rhs_params)
rhs_sql = self.get_rhs_op(connection, rhs_sql)
return "%s %s" % (lhs_sql, rhs_sql), params
def get_rhs_op(self, connection, rhs):
return connection.operators[self.lookup_name] % rhs
class FieldGetDbPrepValueMixin:
"""
Some lookups require Field.get_db_prep_value() to be called on their
inputs.
"""
get_db_prep_lookup_value_is_iterable = False
def get_db_prep_lookup(self, value, connection):
# For relational fields, use the 'target_field' attribute of the
# output_field.
field = getattr(self.lhs.output_field, "target_field", None)
get_db_prep_value = (
getattr(field, "get_db_prep_value", None)
or self.lhs.output_field.get_db_prep_value
)
return (
"%s",
[get_db_prep_value(v, connection, prepared=True) for v in value]
if self.get_db_prep_lookup_value_is_iterable
else [get_db_prep_value(value, connection, prepared=True)],
)
class FieldGetDbPrepValueIterableMixin(FieldGetDbPrepValueMixin):
"""
Some lookups require Field.get_db_prep_value() to be called on each value
in an iterable.
"""
get_db_prep_lookup_value_is_iterable = True
def get_prep_lookup(self):
if hasattr(self.rhs, "resolve_expression"):
return self.rhs
prepared_values = []
for rhs_value in self.rhs:
if hasattr(rhs_value, "resolve_expression"):
# An expression will be handled by the database but can coexist
# alongside real values.
pass
elif self.prepare_rhs and hasattr(self.lhs.output_field, "get_prep_value"):
rhs_value = self.lhs.output_field.get_prep_value(rhs_value)
prepared_values.append(rhs_value)
return prepared_values
def process_rhs(self, compiler, connection):
if self.rhs_is_direct_value():
# rhs should be an iterable of values. Use batch_process_rhs()
# to prepare/transform those values.
return self.batch_process_rhs(compiler, connection)
else:
return super().process_rhs(compiler, connection)
def resolve_expression_parameter(self, compiler, connection, sql, param):
params = [param]
if hasattr(param, "resolve_expression"):
param = param.resolve_expression(compiler.query)
if hasattr(param, "as_sql"):
sql, params = compiler.compile(param)
return sql, params
def batch_process_rhs(self, compiler, connection, rhs=None):
pre_processed = super().batch_process_rhs(compiler, connection, rhs)
# The params list may contain expressions which compile to a
# sql/param pair. Zip them to get sql and param pairs that refer to the
# same argument and attempt to replace them with the result of
# compiling the param step.
sql, params = zip(
*(
self.resolve_expression_parameter(compiler, connection, sql, param)
for sql, param in zip(*pre_processed)
)
)
params = itertools.chain.from_iterable(params)
return sql, tuple(params)
class PostgresOperatorLookup(Lookup):
"""Lookup defined by operators on PostgreSQL."""
postgres_operator = None
def as_postgresql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = tuple(lhs_params) + tuple(rhs_params)
return "%s %s %s" % (lhs, self.postgres_operator, rhs), params
@Field.register_lookup
class Exact(FieldGetDbPrepValueMixin, BuiltinLookup):
lookup_name = "exact"
def get_prep_lookup(self):
from django.db.models.sql.query import Query # avoid circular import
if isinstance(self.rhs, Query):
if self.rhs.has_limit_one():
if not self.rhs.has_select_fields:
self.rhs.clear_select_clause()
self.rhs.add_fields(["pk"])
else:
raise ValueError(
"The QuerySet value for an exact lookup must be limited to "
"one result using slicing."
)
return super().get_prep_lookup()
def as_sql(self, compiler, connection):
# Avoid comparison against direct rhs if lhs is a boolean value. That
# turns "boolfield__exact=True" into "WHERE boolean_field" instead of
# "WHERE boolean_field = True" when allowed.
if (
isinstance(self.rhs, bool)
and getattr(self.lhs, "conditional", False)
and connection.ops.conditional_expression_supported_in_where_clause(
self.lhs
)
):
lhs_sql, params = self.process_lhs(compiler, connection)
template = "%s" if self.rhs else "NOT %s"
return template % lhs_sql, params
return super().as_sql(compiler, connection)
@Field.register_lookup
class IExact(BuiltinLookup):
lookup_name = "iexact"
prepare_rhs = False
def process_rhs(self, qn, connection):
rhs, params = super().process_rhs(qn, connection)
if params:
params[0] = connection.ops.prep_for_iexact_query(params[0])
return rhs, params
@Field.register_lookup
class GreaterThan(FieldGetDbPrepValueMixin, BuiltinLookup):
lookup_name = "gt"
@Field.register_lookup
class GreaterThanOrEqual(FieldGetDbPrepValueMixin, BuiltinLookup):
lookup_name = "gte"
@Field.register_lookup
class LessThan(FieldGetDbPrepValueMixin, BuiltinLookup):
lookup_name = "lt"
@Field.register_lookup
class LessThanOrEqual(FieldGetDbPrepValueMixin, BuiltinLookup):
lookup_name = "lte"
class IntegerFieldFloatRounding:
"""
Allow floats to work as query values for IntegerField. Without this, the
decimal portion of the float would always be discarded.
"""
def get_prep_lookup(self):
if isinstance(self.rhs, float):
self.rhs = math.ceil(self.rhs)
return super().get_prep_lookup()
@IntegerField.register_lookup
class IntegerGreaterThanOrEqual(IntegerFieldFloatRounding, GreaterThanOrEqual):
pass
@IntegerField.register_lookup
class IntegerLessThan(IntegerFieldFloatRounding, LessThan):
pass
@Field.register_lookup
class In(FieldGetDbPrepValueIterableMixin, BuiltinLookup):
lookup_name = "in"
def get_prep_lookup(self):
from django.db.models.sql.query import Query # avoid circular import
if isinstance(self.rhs, Query):
self.rhs.clear_ordering(clear_default=True)
if not self.rhs.has_select_fields:
self.rhs.clear_select_clause()
self.rhs.add_fields(["pk"])
return super().get_prep_lookup()
def process_rhs(self, compiler, connection):
db_rhs = getattr(self.rhs, "_db", None)
if db_rhs is not None and db_rhs != connection.alias:
raise ValueError(
"Subqueries aren't allowed across different databases. Force "
"the inner query to be evaluated using `list(inner_query)`."
)
if self.rhs_is_direct_value():
# Remove None from the list as NULL is never equal to anything.
try:
rhs = OrderedSet(self.rhs)
rhs.discard(None)
except TypeError: # Unhashable items in self.rhs
rhs = [r for r in self.rhs if r is not None]
if not rhs:
raise EmptyResultSet
# rhs should be an iterable; use batch_process_rhs() to
# prepare/transform those values.
sqls, sqls_params = self.batch_process_rhs(compiler, connection, rhs)
placeholder = "(" + ", ".join(sqls) + ")"
return (placeholder, sqls_params)
return super().process_rhs(compiler, connection)
def get_rhs_op(self, connection, rhs):
return "IN %s" % rhs
def as_sql(self, compiler, connection):
max_in_list_size = connection.ops.max_in_list_size()
if (
self.rhs_is_direct_value()
and max_in_list_size
and len(self.rhs) > max_in_list_size
):
return self.split_parameter_list_as_sql(compiler, connection)
return super().as_sql(compiler, connection)
def split_parameter_list_as_sql(self, compiler, connection):
# This is a special case for databases which limit the number of
# elements which can appear in an 'IN' clause.
max_in_list_size = connection.ops.max_in_list_size()
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.batch_process_rhs(compiler, connection)
in_clause_elements = ["("]
params = []
for offset in range(0, len(rhs_params), max_in_list_size):
if offset > 0:
in_clause_elements.append(" OR ")
in_clause_elements.append("%s IN (" % lhs)
params.extend(lhs_params)
sqls = rhs[offset : offset + max_in_list_size]
sqls_params = rhs_params[offset : offset + max_in_list_size]
param_group = ", ".join(sqls)
in_clause_elements.append(param_group)
in_clause_elements.append(")")
params.extend(sqls_params)
in_clause_elements.append(")")
return "".join(in_clause_elements), params
class PatternLookup(BuiltinLookup):
param_pattern = "%%%s%%"
prepare_rhs = False
def get_rhs_op(self, connection, rhs):
# Assume we are in startswith. We need to produce SQL like:
# col LIKE %s, ['thevalue%']
# For python values we can (and should) do that directly in Python,
# but if the value is for example reference to other column, then
# we need to add the % pattern match to the lookup by something like
# col LIKE othercol || '%%'
# So, for Python values we don't need any special pattern, but for
# SQL reference values or SQL transformations we need the correct
# pattern added.
if hasattr(self.rhs, "as_sql") or self.bilateral_transforms:
pattern = connection.pattern_ops[self.lookup_name].format(
connection.pattern_esc
)
return pattern.format(rhs)
else:
return super().get_rhs_op(connection, rhs)
def process_rhs(self, qn, connection):
rhs, params = super().process_rhs(qn, connection)
if self.rhs_is_direct_value() and params and not self.bilateral_transforms:
params[0] = self.param_pattern % connection.ops.prep_for_like_query(
params[0]
)
return rhs, params
@Field.register_lookup
class Contains(PatternLookup):
lookup_name = "contains"
@Field.register_lookup
class IContains(Contains):
lookup_name = "icontains"
@Field.register_lookup
class StartsWith(PatternLookup):
lookup_name = "startswith"
param_pattern = "%s%%"
@Field.register_lookup
class IStartsWith(StartsWith):
lookup_name = "istartswith"
@Field.register_lookup
class EndsWith(PatternLookup):
lookup_name = "endswith"
param_pattern = "%%%s"
@Field.register_lookup
class IEndsWith(EndsWith):
lookup_name = "iendswith"
@Field.register_lookup
class Range(FieldGetDbPrepValueIterableMixin, BuiltinLookup):
lookup_name = "range"
def get_rhs_op(self, connection, rhs):
return "BETWEEN %s AND %s" % (rhs[0], rhs[1])
@Field.register_lookup
class IsNull(BuiltinLookup):
lookup_name = "isnull"
prepare_rhs = False
def as_sql(self, compiler, connection):
if not isinstance(self.rhs, bool):
raise ValueError(
"The QuerySet value for an isnull lookup must be True or False."
)
sql, params = compiler.compile(self.lhs)
if self.rhs:
return "%s IS NULL" % sql, params
else:
return "%s IS NOT NULL" % sql, params
@Field.register_lookup
class Regex(BuiltinLookup):
lookup_name = "regex"
prepare_rhs = False
def as_sql(self, compiler, connection):
if self.lookup_name in connection.operators:
return super().as_sql(compiler, connection)
else:
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
sql_template = connection.ops.regex_lookup(self.lookup_name)
return sql_template % (lhs, rhs), lhs_params + rhs_params
@Field.register_lookup
class IRegex(Regex):
lookup_name = "iregex"
class YearLookup(Lookup):
def year_lookup_bounds(self, connection, year):
from django.db.models.functions import ExtractIsoYear
iso_year = isinstance(self.lhs, ExtractIsoYear)
output_field = self.lhs.lhs.output_field
if isinstance(output_field, DateTimeField):
bounds = connection.ops.year_lookup_bounds_for_datetime_field(
year,
iso_year=iso_year,
)
else:
bounds = connection.ops.year_lookup_bounds_for_date_field(
year,
iso_year=iso_year,
)
return bounds
def as_sql(self, compiler, connection):
# Avoid the extract operation if the rhs is a direct value to allow
# indexes to be used.
if self.rhs_is_direct_value():
# Skip the extract part by directly using the originating field,
# that is self.lhs.lhs.
lhs_sql, params = self.process_lhs(compiler, connection, self.lhs.lhs)
rhs_sql, _ = self.process_rhs(compiler, connection)
rhs_sql = self.get_direct_rhs_sql(connection, rhs_sql)
start, finish = self.year_lookup_bounds(connection, self.rhs)
params.extend(self.get_bound_params(start, finish))
return "%s %s" % (lhs_sql, rhs_sql), params
return super().as_sql(compiler, connection)
def get_direct_rhs_sql(self, connection, rhs):
return connection.operators[self.lookup_name] % rhs
def get_bound_params(self, start, finish):
raise NotImplementedError(
"subclasses of YearLookup must provide a get_bound_params() method"
)
class YearExact(YearLookup, Exact):
def get_direct_rhs_sql(self, connection, rhs):
return "BETWEEN %s AND %s"
def get_bound_params(self, start, finish):
return (start, finish)
class YearGt(YearLookup, GreaterThan):
def get_bound_params(self, start, finish):
return (finish,)
class YearGte(YearLookup, GreaterThanOrEqual):
def get_bound_params(self, start, finish):
return (start,)
class YearLt(YearLookup, LessThan):
def get_bound_params(self, start, finish):
return (start,)
class YearLte(YearLookup, LessThanOrEqual):
def get_bound_params(self, start, finish):
return (finish,)
class UUIDTextMixin:
"""
Strip hyphens from a value when filtering a UUIDField on backends without
a native datatype for UUID.
"""
def process_rhs(self, qn, connection):
if not connection.features.has_native_uuid_field:
from django.db.models.functions import Replace
if self.rhs_is_direct_value():
self.rhs = Value(self.rhs)
self.rhs = Replace(
self.rhs, Value("-"), Value(""), output_field=CharField()
)
rhs, params = super().process_rhs(qn, connection)
return rhs, params
@UUIDField.register_lookup
class UUIDIExact(UUIDTextMixin, IExact):
pass
@UUIDField.register_lookup
class UUIDContains(UUIDTextMixin, Contains):
pass
@UUIDField.register_lookup
class UUIDIContains(UUIDTextMixin, IContains):
pass
@UUIDField.register_lookup
class UUIDStartsWith(UUIDTextMixin, StartsWith):
pass
@UUIDField.register_lookup
class UUIDIStartsWith(UUIDTextMixin, IStartsWith):
pass
@UUIDField.register_lookup
class UUIDEndsWith(UUIDTextMixin, EndsWith):
pass
@UUIDField.register_lookup
class UUIDIEndsWith(UUIDTextMixin, IEndsWith):
pass
|
f6a4e300316a71681556e97dfaeeba7a140685f32d8afc30f05cf39a65cbd6e0 | import datetime
import decimal
import functools
import logging
import time
from contextlib import contextmanager
from django.db import NotSupportedError
from django.utils.crypto import md5
from django.utils.dateparse import parse_time
logger = logging.getLogger("django.db.backends")
class CursorWrapper:
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
WRAP_ERROR_ATTRS = frozenset(["fetchone", "fetchmany", "fetchall", "nextset"])
def __getattr__(self, attr):
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
with self.db.wrap_database_errors:
yield from self.cursor
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Close instead of passing through to avoid backend-specific behavior
# (#17671). Catch errors liberally because errors in cleanup code
# aren't useful.
try:
self.close()
except self.db.Database.Error:
pass
# The following methods cannot be implemented in __getattr__, because the
# code must run when the method is invoked, not just when it is accessed.
def callproc(self, procname, params=None, kparams=None):
# Keyword parameters for callproc aren't supported in PEP 249, but the
# database driver may support them (e.g. cx_Oracle).
if kparams is not None and not self.db.features.supports_callproc_kwargs:
raise NotSupportedError(
"Keyword parameters for callproc are not supported on this "
"database backend."
)
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None and kparams is None:
return self.cursor.callproc(procname)
elif kparams is None:
return self.cursor.callproc(procname, params)
else:
params = params or ()
return self.cursor.callproc(procname, params, kparams)
def execute(self, sql, params=None):
return self._execute_with_wrappers(
sql, params, many=False, executor=self._execute
)
def executemany(self, sql, param_list):
return self._execute_with_wrappers(
sql, param_list, many=True, executor=self._executemany
)
def _execute_with_wrappers(self, sql, params, many, executor):
context = {"connection": self.db, "cursor": self}
for wrapper in reversed(self.db.execute_wrappers):
executor = functools.partial(wrapper, executor)
return executor(sql, params, many, context)
def _execute(self, sql, params, *ignored_wrapper_args):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None:
# params default might be backend specific.
return self.cursor.execute(sql)
else:
return self.cursor.execute(sql, params)
def _executemany(self, sql, param_list, *ignored_wrapper_args):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
class CursorDebugWrapper(CursorWrapper):
# XXX callproc isn't instrumented at this time.
def execute(self, sql, params=None):
with self.debug_sql(sql, params, use_last_executed_query=True):
return super().execute(sql, params)
def executemany(self, sql, param_list):
with self.debug_sql(sql, param_list, many=True):
return super().executemany(sql, param_list)
@contextmanager
def debug_sql(
self, sql=None, params=None, use_last_executed_query=False, many=False
):
start = time.monotonic()
try:
yield
finally:
stop = time.monotonic()
duration = stop - start
if use_last_executed_query:
sql = self.db.ops.last_executed_query(self.cursor, sql, params)
try:
times = len(params) if many else ""
except TypeError:
# params could be an iterator.
times = "?"
self.db.queries_log.append(
{
"sql": "%s times: %s" % (times, sql) if many else sql,
"time": "%.3f" % duration,
}
)
logger.debug(
"(%.3f) %s; args=%s; alias=%s",
duration,
sql,
params,
self.db.alias,
extra={
"duration": duration,
"sql": sql,
"params": params,
"alias": self.db.alias,
},
)
@contextmanager
def debug_transaction(connection, sql):
start = time.monotonic()
try:
yield
finally:
if connection.queries_logged:
stop = time.monotonic()
duration = stop - start
connection.queries_log.append(
{
"sql": "%s" % sql,
"time": "%.3f" % duration,
}
)
logger.debug(
"(%.3f) %s; args=%s; alias=%s",
duration,
sql,
None,
connection.alias,
extra={
"duration": duration,
"sql": sql,
"alias": connection.alias,
},
)
def split_tzname_delta(tzname):
"""
Split a time zone name into a 3-tuple of (name, sign, offset).
"""
for sign in ["+", "-"]:
if sign in tzname:
name, offset = tzname.rsplit(sign, 1)
if offset and parse_time(offset):
return name, sign, offset
return tzname, None, None
###############################################
# Converters from database (string) to Python #
###############################################
def typecast_date(s):
return (
datetime.date(*map(int, s.split("-"))) if s else None
) # return None if s is null
def typecast_time(s): # does NOT store time zone information
if not s:
return None
hour, minutes, seconds = s.split(":")
if "." in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split(".")
else:
microseconds = "0"
return datetime.time(
int(hour), int(minutes), int(seconds), int((microseconds + "000000")[:6])
)
def typecast_timestamp(s): # does NOT store time zone information
# "2005-07-29 15:48:00.590358-05"
# "2005-07-29 09:56:00-05"
if not s:
return None
if " " not in s:
return typecast_date(s)
d, t = s.split()
# Remove timezone information.
if "-" in t:
t, _ = t.split("-", 1)
elif "+" in t:
t, _ = t.split("+", 1)
dates = d.split("-")
times = t.split(":")
seconds = times[2]
if "." in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split(".")
else:
microseconds = "0"
return datetime.datetime(
int(dates[0]),
int(dates[1]),
int(dates[2]),
int(times[0]),
int(times[1]),
int(seconds),
int((microseconds + "000000")[:6]),
)
###############################################
# Converters from Python to database (string) #
###############################################
def split_identifier(identifier):
"""
Split an SQL identifier into a two element tuple of (namespace, name).
The identifier could be a table, column, or sequence name might be prefixed
by a namespace.
"""
try:
namespace, name = identifier.split('"."')
except ValueError:
namespace, name = "", identifier
return namespace.strip('"'), name.strip('"')
def truncate_name(identifier, length=None, hash_len=4):
"""
Shorten an SQL identifier to a repeatable mangled version with the given
length.
If a quote stripped name contains a namespace, e.g. USERNAME"."TABLE,
truncate the table portion only.
"""
namespace, name = split_identifier(identifier)
if length is None or len(name) <= length:
return identifier
digest = names_digest(name, length=hash_len)
return "%s%s%s" % (
'%s"."' % namespace if namespace else "",
name[: length - hash_len],
digest,
)
def names_digest(*args, length):
"""
Generate a 32-bit digest of a set of arguments that can be used to shorten
identifying names.
"""
h = md5(usedforsecurity=False)
for arg in args:
h.update(arg.encode())
return h.hexdigest()[:length]
def format_number(value, max_digits, decimal_places):
"""
Format a number into a string with the requisite number of digits and
decimal places.
"""
if value is None:
return None
context = decimal.getcontext().copy()
if max_digits is not None:
context.prec = max_digits
if decimal_places is not None:
value = value.quantize(
decimal.Decimal(1).scaleb(-decimal_places), context=context
)
else:
context.traps[decimal.Rounded] = 1
value = context.create_decimal(value)
return "{:f}".format(value)
def strip_quotes(table_name):
"""
Strip quotes off of quoted table names to make them safe for use in index
names, sequence names, etc. For example '"USER"."TABLE"' (an Oracle naming
scheme) becomes 'USER"."TABLE'.
"""
has_quotes = table_name.startswith('"') and table_name.endswith('"')
return table_name[1:-1] if has_quotes else table_name
|
ca5d38fc0fd71426f488e330d840beed1167a9551d7a68da93a3224fb0971931 | import functools
import inspect
from functools import partial
from django import forms
from django.apps import apps
from django.conf import SettingsReference, settings
from django.core import checks, exceptions
from django.db import connection, router
from django.db.backends import utils
from django.db.models import Q
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import CASCADE, SET_DEFAULT, SET_NULL
from django.db.models.query_utils import PathInfo
from django.db.models.utils import make_model_tuple
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from . import Field
from .mixins import FieldCacheMixin
from .related_descriptors import (
ForeignKeyDeferredAttribute,
ForwardManyToOneDescriptor,
ForwardOneToOneDescriptor,
ManyToManyDescriptor,
ReverseManyToOneDescriptor,
ReverseOneToOneDescriptor,
)
from .related_lookups import (
RelatedExact,
RelatedGreaterThan,
RelatedGreaterThanOrEqual,
RelatedIn,
RelatedIsNull,
RelatedLessThan,
RelatedLessThanOrEqual,
)
from .reverse_related import ForeignObjectRel, ManyToManyRel, ManyToOneRel, OneToOneRel
RECURSIVE_RELATIONSHIP_CONSTANT = "self"
def resolve_relation(scope_model, relation):
"""
Transform relation into a model or fully-qualified model string of the form
"app_label.ModelName", relative to scope_model.
The relation argument can be:
* RECURSIVE_RELATIONSHIP_CONSTANT, i.e. the string "self", in which case
the model argument will be returned.
* A bare model name without an app_label, in which case scope_model's
app_label will be prepended.
* An "app_label.ModelName" string.
* A model class, which will be returned unchanged.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
relation = scope_model
# Look for an "app.Model" relation
if isinstance(relation, str):
if "." not in relation:
relation = "%s.%s" % (scope_model._meta.app_label, relation)
return relation
def lazy_related_operation(function, model, *related_models, **kwargs):
"""
Schedule `function` to be called once `model` and all `related_models`
have been imported and registered with the app registry. `function` will
be called with the newly-loaded model classes as its positional arguments,
plus any optional keyword arguments.
The `model` argument must be a model class. Each subsequent positional
argument is another model, or a reference to another model - see
`resolve_relation()` for the various forms these may take. Any relative
references will be resolved relative to `model`.
This is a convenience wrapper for `Apps.lazy_model_operation` - the app
registry model used is the one found in `model._meta.apps`.
"""
models = [model] + [resolve_relation(model, rel) for rel in related_models]
model_keys = (make_model_tuple(m) for m in models)
apps = model._meta.apps
return apps.lazy_model_operation(partial(function, **kwargs), *model_keys)
class RelatedField(FieldCacheMixin, Field):
"""Base class that all relational fields inherit from."""
# Field flags
one_to_many = False
one_to_one = False
many_to_many = False
many_to_one = False
def __init__(
self,
related_name=None,
related_query_name=None,
limit_choices_to=None,
**kwargs,
):
self._related_name = related_name
self._related_query_name = related_query_name
self._limit_choices_to = limit_choices_to
super().__init__(**kwargs)
@cached_property
def related_model(self):
# Can't cache this property until all the models are loaded.
apps.check_models_ready()
return self.remote_field.model
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_related_name_is_valid(),
*self._check_related_query_name_is_valid(),
*self._check_relation_model_exists(),
*self._check_referencing_to_swapped_model(),
*self._check_clashes(),
]
def _check_related_name_is_valid(self):
import keyword
related_name = self.remote_field.related_name
if related_name is None:
return []
is_valid_id = (
not keyword.iskeyword(related_name) and related_name.isidentifier()
)
if not (is_valid_id or related_name.endswith("+")):
return [
checks.Error(
"The name '%s' is invalid related_name for field %s.%s"
% (
self.remote_field.related_name,
self.model._meta.object_name,
self.name,
),
hint=(
"Related name must be a valid Python identifier or end with a "
"'+'"
),
obj=self,
id="fields.E306",
)
]
return []
def _check_related_query_name_is_valid(self):
if self.remote_field.is_hidden():
return []
rel_query_name = self.related_query_name()
errors = []
if rel_query_name.endswith("_"):
errors.append(
checks.Error(
"Reverse query name '%s' must not end with an underscore."
% rel_query_name,
hint=(
"Add or change a related_name or related_query_name "
"argument for this field."
),
obj=self,
id="fields.E308",
)
)
if LOOKUP_SEP in rel_query_name:
errors.append(
checks.Error(
"Reverse query name '%s' must not contain '%s'."
% (rel_query_name, LOOKUP_SEP),
hint=(
"Add or change a related_name or related_query_name "
"argument for this field."
),
obj=self,
id="fields.E309",
)
)
return errors
def _check_relation_model_exists(self):
rel_is_missing = self.remote_field.model not in self.opts.apps.get_models()
rel_is_string = isinstance(self.remote_field.model, str)
model_name = (
self.remote_field.model
if rel_is_string
else self.remote_field.model._meta.object_name
)
if rel_is_missing and (
rel_is_string or not self.remote_field.model._meta.swapped
):
return [
checks.Error(
"Field defines a relation with model '%s', which is either "
"not installed, or is abstract." % model_name,
obj=self,
id="fields.E300",
)
]
return []
def _check_referencing_to_swapped_model(self):
if (
self.remote_field.model not in self.opts.apps.get_models()
and not isinstance(self.remote_field.model, str)
and self.remote_field.model._meta.swapped
):
return [
checks.Error(
"Field defines a relation with the model '%s', which has "
"been swapped out." % self.remote_field.model._meta.label,
hint="Update the relation to point at 'settings.%s'."
% self.remote_field.model._meta.swappable,
obj=self,
id="fields.E301",
)
]
return []
def _check_clashes(self):
"""Check accessor and reverse query name clashes."""
from django.db.models.base import ModelBase
errors = []
opts = self.model._meta
# f.remote_field.model may be a string instead of a model. Skip if
# model name is not resolved.
if not isinstance(self.remote_field.model, ModelBase):
return []
# Consider that we are checking field `Model.foreign` and the models
# are:
#
# class Target(models.Model):
# model = models.IntegerField()
# model_set = models.IntegerField()
#
# class Model(models.Model):
# foreign = models.ForeignKey(Target)
# m2m = models.ManyToManyField(Target)
# rel_opts.object_name == "Target"
rel_opts = self.remote_field.model._meta
# If the field doesn't install a backward relation on the target model
# (so `is_hidden` returns True), then there are no clashes to check
# and we can skip these fields.
rel_is_hidden = self.remote_field.is_hidden()
rel_name = self.remote_field.get_accessor_name() # i. e. "model_set"
rel_query_name = self.related_query_name() # i. e. "model"
# i.e. "app_label.Model.field".
field_name = "%s.%s" % (opts.label, self.name)
# Check clashes between accessor or reverse query name of `field`
# and any other field name -- i.e. accessor for Model.foreign is
# model_set and it clashes with Target.model_set.
potential_clashes = rel_opts.fields + rel_opts.many_to_many
for clash_field in potential_clashes:
# i.e. "app_label.Target.model_set".
clash_name = "%s.%s" % (rel_opts.label, clash_field.name)
if not rel_is_hidden and clash_field.name == rel_name:
errors.append(
checks.Error(
f"Reverse accessor '{rel_opts.object_name}.{rel_name}' "
f"for '{field_name}' clashes with field name "
f"'{clash_name}'.",
hint=(
"Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'."
)
% (clash_name, field_name),
obj=self,
id="fields.E302",
)
)
if clash_field.name == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with field name '%s'."
% (field_name, clash_name),
hint=(
"Rename field '%s', or add/change a related_name "
"argument to the definition for field '%s'."
)
% (clash_name, field_name),
obj=self,
id="fields.E303",
)
)
# Check clashes between accessors/reverse query names of `field` and
# any other field accessor -- i. e. Model.foreign accessor clashes with
# Model.m2m accessor.
potential_clashes = (r for r in rel_opts.related_objects if r.field is not self)
for clash_field in potential_clashes:
# i.e. "app_label.Model.m2m".
clash_name = "%s.%s" % (
clash_field.related_model._meta.label,
clash_field.field.name,
)
if not rel_is_hidden and clash_field.get_accessor_name() == rel_name:
errors.append(
checks.Error(
f"Reverse accessor '{rel_opts.object_name}.{rel_name}' "
f"for '{field_name}' clashes with reverse accessor for "
f"'{clash_name}'.",
hint=(
"Add or change a related_name argument "
"to the definition for '%s' or '%s'."
)
% (field_name, clash_name),
obj=self,
id="fields.E304",
)
)
if clash_field.get_accessor_name() == rel_query_name:
errors.append(
checks.Error(
"Reverse query name for '%s' clashes with reverse query name "
"for '%s'." % (field_name, clash_name),
hint=(
"Add or change a related_name argument "
"to the definition for '%s' or '%s'."
)
% (field_name, clash_name),
obj=self,
id="fields.E305",
)
)
return errors
def db_type(self, connection):
# By default related field will not have a column as it relates to
# columns from another table.
return None
def contribute_to_class(self, cls, name, private_only=False, **kwargs):
super().contribute_to_class(cls, name, private_only=private_only, **kwargs)
self.opts = cls._meta
if not cls._meta.abstract:
if self.remote_field.related_name:
related_name = self.remote_field.related_name
else:
related_name = self.opts.default_related_name
if related_name:
related_name %= {
"class": cls.__name__.lower(),
"model_name": cls._meta.model_name.lower(),
"app_label": cls._meta.app_label.lower(),
}
self.remote_field.related_name = related_name
if self.remote_field.related_query_name:
related_query_name = self.remote_field.related_query_name % {
"class": cls.__name__.lower(),
"app_label": cls._meta.app_label.lower(),
}
self.remote_field.related_query_name = related_query_name
def resolve_related_class(model, related, field):
field.remote_field.model = related
field.do_related_class(related, model)
lazy_related_operation(
resolve_related_class, cls, self.remote_field.model, field=self
)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self._limit_choices_to:
kwargs["limit_choices_to"] = self._limit_choices_to
if self._related_name is not None:
kwargs["related_name"] = self._related_name
if self._related_query_name is not None:
kwargs["related_query_name"] = self._related_query_name
return name, path, args, kwargs
def get_forward_related_filter(self, obj):
"""
Return the keyword arguments that when supplied to
self.model.object.filter(), would select all instances related through
this field to the remote obj. This is used to build the querysets
returned by related descriptors. obj is an instance of
self.related_field.model.
"""
return {
"%s__%s" % (self.name, rh_field.name): getattr(obj, rh_field.attname)
for _, rh_field in self.related_fields
}
def get_reverse_related_filter(self, obj):
"""
Complement to get_forward_related_filter(). Return the keyword
arguments that when passed to self.related_field.model.object.filter()
select all instances of self.related_field.model related through
this field to obj. obj is an instance of self.model.
"""
base_q = Q.create(
[
(rh_field.attname, getattr(obj, lh_field.attname))
for lh_field, rh_field in self.related_fields
]
)
descriptor_filter = self.get_extra_descriptor_filter(obj)
if isinstance(descriptor_filter, dict):
return base_q & Q(**descriptor_filter)
elif descriptor_filter:
return base_q & descriptor_filter
return base_q
@property
def swappable_setting(self):
"""
Get the setting that this is powered from for swapping, or None
if it's not swapped in / marked with swappable=False.
"""
if self.swappable:
# Work out string form of "to"
if isinstance(self.remote_field.model, str):
to_string = self.remote_field.model
else:
to_string = self.remote_field.model._meta.label
return apps.get_swappable_settings_name(to_string)
return None
def set_attributes_from_rel(self):
self.name = self.name or (
self.remote_field.model._meta.model_name
+ "_"
+ self.remote_field.model._meta.pk.name
)
if self.verbose_name is None:
self.verbose_name = self.remote_field.model._meta.verbose_name
self.remote_field.set_field_name()
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
self.contribute_to_related_class(other, self.remote_field)
def get_limit_choices_to(self):
"""
Return ``limit_choices_to`` for this model field.
If it is a callable, it will be invoked and the result will be
returned.
"""
if callable(self.remote_field.limit_choices_to):
return self.remote_field.limit_choices_to()
return self.remote_field.limit_choices_to
def formfield(self, **kwargs):
"""
Pass ``limit_choices_to`` to the field being constructed.
Only passes it if there is a type that supports related fields.
This is a similar strategy used to pass the ``queryset`` to the field
being constructed.
"""
defaults = {}
if hasattr(self.remote_field, "get_related_field"):
# If this is a callable, do not invoke it here. Just pass
# it in the defaults for when the form class will later be
# instantiated.
limit_choices_to = self.remote_field.limit_choices_to
defaults.update(
{
"limit_choices_to": limit_choices_to,
}
)
defaults.update(kwargs)
return super().formfield(**defaults)
def related_query_name(self):
"""
Define the name that can be used to identify this related object in a
table-spanning query.
"""
return (
self.remote_field.related_query_name
or self.remote_field.related_name
or self.opts.model_name
)
@property
def target_field(self):
"""
When filtering against this relation, return the field on the remote
model against which the filtering should happen.
"""
target_fields = self.path_infos[-1].target_fields
if len(target_fields) > 1:
raise exceptions.FieldError(
"The relation has multiple target fields, but only single target field "
"was asked for"
)
return target_fields[0]
def get_cache_name(self):
return self.name
class ForeignObject(RelatedField):
"""
Abstraction of the ForeignKey relation to support multi-column relations.
"""
# Field flags
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
requires_unique_target = True
related_accessor_class = ReverseManyToOneDescriptor
forward_related_accessor_class = ForwardManyToOneDescriptor
rel_class = ForeignObjectRel
def __init__(
self,
to,
on_delete,
from_fields,
to_fields,
rel=None,
related_name=None,
related_query_name=None,
limit_choices_to=None,
parent_link=False,
swappable=True,
**kwargs,
):
if rel is None:
rel = self.rel_class(
self,
to,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
parent_link=parent_link,
on_delete=on_delete,
)
super().__init__(
rel=rel,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
**kwargs,
)
self.from_fields = from_fields
self.to_fields = to_fields
self.swappable = swappable
def __copy__(self):
obj = super().__copy__()
# Remove any cached PathInfo values.
obj.__dict__.pop("path_infos", None)
obj.__dict__.pop("reverse_path_infos", None)
return obj
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_to_fields_exist(),
*self._check_unique_target(),
]
def _check_to_fields_exist(self):
# Skip nonexistent models.
if isinstance(self.remote_field.model, str):
return []
errors = []
for to_field in self.to_fields:
if to_field:
try:
self.remote_field.model._meta.get_field(to_field)
except exceptions.FieldDoesNotExist:
errors.append(
checks.Error(
"The to_field '%s' doesn't exist on the related "
"model '%s'."
% (to_field, self.remote_field.model._meta.label),
obj=self,
id="fields.E312",
)
)
return errors
def _check_unique_target(self):
rel_is_string = isinstance(self.remote_field.model, str)
if rel_is_string or not self.requires_unique_target:
return []
try:
self.foreign_related_fields
except exceptions.FieldDoesNotExist:
return []
if not self.foreign_related_fields:
return []
unique_foreign_fields = {
frozenset([f.name])
for f in self.remote_field.model._meta.get_fields()
if getattr(f, "unique", False)
}
unique_foreign_fields.update(
{frozenset(ut) for ut in self.remote_field.model._meta.unique_together}
)
unique_foreign_fields.update(
{
frozenset(uc.fields)
for uc in self.remote_field.model._meta.total_unique_constraints
}
)
foreign_fields = {f.name for f in self.foreign_related_fields}
has_unique_constraint = any(u <= foreign_fields for u in unique_foreign_fields)
if not has_unique_constraint and len(self.foreign_related_fields) > 1:
field_combination = ", ".join(
"'%s'" % rel_field.name for rel_field in self.foreign_related_fields
)
model_name = self.remote_field.model.__name__
return [
checks.Error(
"No subset of the fields %s on model '%s' is unique."
% (field_combination, model_name),
hint=(
"Mark a single field as unique=True or add a set of "
"fields to a unique constraint (via unique_together "
"or a UniqueConstraint (without condition) in the "
"model Meta.constraints)."
),
obj=self,
id="fields.E310",
)
]
elif not has_unique_constraint:
field_name = self.foreign_related_fields[0].name
model_name = self.remote_field.model.__name__
return [
checks.Error(
"'%s.%s' must be unique because it is referenced by "
"a foreign key." % (model_name, field_name),
hint=(
"Add unique=True to this field or add a "
"UniqueConstraint (without condition) in the model "
"Meta.constraints."
),
obj=self,
id="fields.E311",
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
kwargs["on_delete"] = self.remote_field.on_delete
kwargs["from_fields"] = self.from_fields
kwargs["to_fields"] = self.to_fields
if self.remote_field.parent_link:
kwargs["parent_link"] = self.remote_field.parent_link
if isinstance(self.remote_field.model, str):
if "." in self.remote_field.model:
app_label, model_name = self.remote_field.model.split(".")
kwargs["to"] = "%s.%s" % (app_label, model_name.lower())
else:
kwargs["to"] = self.remote_field.model.lower()
else:
kwargs["to"] = self.remote_field.model._meta.label_lower
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error
if hasattr(kwargs["to"], "setting_name"):
if kwargs["to"].setting_name != swappable_setting:
raise ValueError(
"Cannot deconstruct a ForeignKey pointing to a model "
"that is swapped in place of more than one model (%s and %s)"
% (kwargs["to"].setting_name, swappable_setting)
)
# Set it
kwargs["to"] = SettingsReference(
kwargs["to"],
swappable_setting,
)
return name, path, args, kwargs
def resolve_related_fields(self):
if not self.from_fields or len(self.from_fields) != len(self.to_fields):
raise ValueError(
"Foreign Object from and to fields must be the same non-zero length"
)
if isinstance(self.remote_field.model, str):
raise ValueError(
"Related model %r cannot be resolved" % self.remote_field.model
)
related_fields = []
for index in range(len(self.from_fields)):
from_field_name = self.from_fields[index]
to_field_name = self.to_fields[index]
from_field = (
self
if from_field_name == RECURSIVE_RELATIONSHIP_CONSTANT
else self.opts.get_field(from_field_name)
)
to_field = (
self.remote_field.model._meta.pk
if to_field_name is None
else self.remote_field.model._meta.get_field(to_field_name)
)
related_fields.append((from_field, to_field))
return related_fields
@cached_property
def related_fields(self):
return self.resolve_related_fields()
@cached_property
def reverse_related_fields(self):
return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields]
@cached_property
def local_related_fields(self):
return tuple(lhs_field for lhs_field, rhs_field in self.related_fields)
@cached_property
def foreign_related_fields(self):
return tuple(
rhs_field for lhs_field, rhs_field in self.related_fields if rhs_field
)
def get_local_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.local_related_fields)
def get_foreign_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.foreign_related_fields)
@staticmethod
def get_instance_value_for_fields(instance, fields):
ret = []
opts = instance._meta
for field in fields:
# Gotcha: in some cases (like fixture loading) a model can have
# different values in parent_ptr_id and parent's id. So, use
# instance.pk (that is, parent_ptr_id) when asked for instance.id.
if field.primary_key:
possible_parent_link = opts.get_ancestor_link(field.model)
if (
not possible_parent_link
or possible_parent_link.primary_key
or possible_parent_link.model._meta.abstract
):
ret.append(instance.pk)
continue
ret.append(getattr(instance, field.attname))
return tuple(ret)
def get_attname_column(self):
attname, column = super().get_attname_column()
return attname, None
def get_joining_columns(self, reverse_join=False):
source = self.reverse_related_fields if reverse_join else self.related_fields
return tuple(
(lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source
)
def get_reverse_joining_columns(self):
return self.get_joining_columns(reverse_join=True)
def get_extra_descriptor_filter(self, instance):
"""
Return an extra filter condition for related object fetching when
user does 'instance.fieldname', that is the extra filter is used in
the descriptor of the field.
The filter should be either a dict usable in .filter(**kwargs) call or
a Q-object. The condition will be ANDed together with the relation's
joining columns.
A parallel method is get_extra_restriction() which is used in
JOIN and subquery conditions.
"""
return {}
def get_extra_restriction(self, alias, related_alias):
"""
Return a pair condition used for joining and subquery pushdown. The
condition is something that responds to as_sql(compiler, connection)
method.
Note that currently referring both the 'alias' and 'related_alias'
will not work in some conditions, like subquery pushdown.
A parallel method is get_extra_descriptor_filter() which is used in
instance.fieldname related object fetching.
"""
return None
def get_path_info(self, filtered_relation=None):
"""Get path from this field to the related model."""
opts = self.remote_field.model._meta
from_opts = self.model._meta
return [
PathInfo(
from_opts=from_opts,
to_opts=opts,
target_fields=self.foreign_related_fields,
join_field=self,
m2m=False,
direct=True,
filtered_relation=filtered_relation,
)
]
@cached_property
def path_infos(self):
return self.get_path_info()
def get_reverse_path_info(self, filtered_relation=None):
"""Get path from the related model to this field's model."""
opts = self.model._meta
from_opts = self.remote_field.model._meta
return [
PathInfo(
from_opts=from_opts,
to_opts=opts,
target_fields=(opts.pk,),
join_field=self.remote_field,
m2m=not self.unique,
direct=False,
filtered_relation=filtered_relation,
)
]
@cached_property
def reverse_path_infos(self):
return self.get_reverse_path_info()
@classmethod
@functools.lru_cache(maxsize=None)
def get_class_lookups(cls):
bases = inspect.getmro(cls)
bases = bases[: bases.index(ForeignObject) + 1]
class_lookups = [parent.__dict__.get("class_lookups", {}) for parent in bases]
return cls.merge_dicts(class_lookups)
def contribute_to_class(self, cls, name, private_only=False, **kwargs):
super().contribute_to_class(cls, name, private_only=private_only, **kwargs)
setattr(cls, self.name, self.forward_related_accessor_class(self))
def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# and swapped models don't get a related descriptor.
if (
not self.remote_field.is_hidden()
and not related.related_model._meta.swapped
):
setattr(
cls._meta.concrete_model,
related.get_accessor_name(),
self.related_accessor_class(related),
)
# While 'limit_choices_to' might be a callable, simply pass
# it along for later - this is too early because it's still
# model load time.
if self.remote_field.limit_choices_to:
cls._meta.related_fkey_lookups.append(
self.remote_field.limit_choices_to
)
ForeignObject.register_lookup(RelatedIn)
ForeignObject.register_lookup(RelatedExact)
ForeignObject.register_lookup(RelatedLessThan)
ForeignObject.register_lookup(RelatedGreaterThan)
ForeignObject.register_lookup(RelatedGreaterThanOrEqual)
ForeignObject.register_lookup(RelatedLessThanOrEqual)
ForeignObject.register_lookup(RelatedIsNull)
class ForeignKey(ForeignObject):
"""
Provide a many-to-one relation by adding a column to the local model
to hold the remote value.
By default ForeignKey will target the pk of the remote model but this
behavior can be changed by using the ``to_field`` argument.
"""
descriptor_class = ForeignKeyDeferredAttribute
# Field flags
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
rel_class = ManyToOneRel
empty_strings_allowed = False
default_error_messages = {
"invalid": _("%(model)s instance with %(field)s %(value)r does not exist.")
}
description = _("Foreign Key (type determined by related field)")
def __init__(
self,
to,
on_delete,
related_name=None,
related_query_name=None,
limit_choices_to=None,
parent_link=False,
to_field=None,
db_constraint=True,
**kwargs,
):
try:
to._meta.model_name
except AttributeError:
if not isinstance(to, str):
raise TypeError(
"%s(%r) is invalid. First parameter to ForeignKey must be "
"either a model, a model name, or the string %r"
% (
self.__class__.__name__,
to,
RECURSIVE_RELATIONSHIP_CONSTANT,
)
)
else:
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
if not callable(on_delete):
raise TypeError("on_delete must be callable.")
kwargs["rel"] = self.rel_class(
self,
to,
to_field,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
parent_link=parent_link,
on_delete=on_delete,
)
kwargs.setdefault("db_index", True)
super().__init__(
to,
on_delete,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
from_fields=[RECURSIVE_RELATIONSHIP_CONSTANT],
to_fields=[to_field],
**kwargs,
)
self.db_constraint = db_constraint
def __class_getitem__(cls, *args, **kwargs):
return cls
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_on_delete(),
*self._check_unique(),
]
def _check_on_delete(self):
on_delete = getattr(self.remote_field, "on_delete", None)
if on_delete == SET_NULL and not self.null:
return [
checks.Error(
"Field specifies on_delete=SET_NULL, but cannot be null.",
hint=(
"Set null=True argument on the field, or change the on_delete "
"rule."
),
obj=self,
id="fields.E320",
)
]
elif on_delete == SET_DEFAULT and not self.has_default():
return [
checks.Error(
"Field specifies on_delete=SET_DEFAULT, but has no default value.",
hint="Set a default value, or change the on_delete rule.",
obj=self,
id="fields.E321",
)
]
else:
return []
def _check_unique(self, **kwargs):
return (
[
checks.Warning(
"Setting unique=True on a ForeignKey has the same effect as using "
"a OneToOneField.",
hint=(
"ForeignKey(unique=True) is usually better served by a "
"OneToOneField."
),
obj=self,
id="fields.W342",
)
]
if self.unique
else []
)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs["to_fields"]
del kwargs["from_fields"]
# Handle the simpler arguments
if self.db_index:
del kwargs["db_index"]
else:
kwargs["db_index"] = False
if self.db_constraint is not True:
kwargs["db_constraint"] = self.db_constraint
# Rel needs more work.
to_meta = getattr(self.remote_field.model, "_meta", None)
if self.remote_field.field_name and (
not to_meta
or (to_meta.pk and self.remote_field.field_name != to_meta.pk.name)
):
kwargs["to_field"] = self.remote_field.field_name
return name, path, args, kwargs
def to_python(self, value):
return self.target_field.to_python(value)
@property
def target_field(self):
return self.foreign_related_fields[0]
def validate(self, value, model_instance):
if self.remote_field.parent_link:
return
super().validate(value, model_instance)
if value is None:
return
using = router.db_for_read(self.remote_field.model, instance=model_instance)
qs = self.remote_field.model._base_manager.using(using).filter(
**{self.remote_field.field_name: value}
)
qs = qs.complex_filter(self.get_limit_choices_to())
if not qs.exists():
raise exceptions.ValidationError(
self.error_messages["invalid"],
code="invalid",
params={
"model": self.remote_field.model._meta.verbose_name,
"pk": value,
"field": self.remote_field.field_name,
"value": value,
}, # 'pk' is included for backwards compatibility
)
def resolve_related_fields(self):
related_fields = super().resolve_related_fields()
for from_field, to_field in related_fields:
if (
to_field
and to_field.model != self.remote_field.model._meta.concrete_model
):
raise exceptions.FieldError(
"'%s.%s' refers to field '%s' which is not local to model "
"'%s'."
% (
self.model._meta.label,
self.name,
to_field.name,
self.remote_field.model._meta.concrete_model._meta.label,
)
)
return related_fields
def get_attname(self):
return "%s_id" % self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_default(self):
"""Return the to_field if the default value is an object."""
field_default = super().get_default()
if isinstance(field_default, self.remote_field.model):
return getattr(field_default, self.target_field.attname)
return field_default
def get_db_prep_save(self, value, connection):
if value is None or (
value == ""
and (
not self.target_field.empty_strings_allowed
or connection.features.interprets_empty_strings_as_nulls
)
):
return None
else:
return self.target_field.get_db_prep_save(value, connection=connection)
def get_db_prep_value(self, value, connection, prepared=False):
return self.target_field.get_db_prep_value(value, connection, prepared)
def get_prep_value(self, value):
return self.target_field.get_prep_value(value)
def contribute_to_related_class(self, cls, related):
super().contribute_to_related_class(cls, related)
if self.remote_field.field_name is None:
self.remote_field.field_name = cls._meta.pk.name
def formfield(self, *, using=None, **kwargs):
if isinstance(self.remote_field.model, str):
raise ValueError(
"Cannot create form field for %r yet, because "
"its related model %r has not been loaded yet"
% (self.name, self.remote_field.model)
)
return super().formfield(
**{
"form_class": forms.ModelChoiceField,
"queryset": self.remote_field.model._default_manager.using(using),
"to_field_name": self.remote_field.field_name,
**kwargs,
"blank": self.blank,
}
)
def db_check(self, connection):
return None
def db_type(self, connection):
return self.target_field.rel_db_type(connection=connection)
def db_parameters(self, connection):
target_db_parameters = self.target_field.db_parameters(connection)
return {
"type": self.db_type(connection),
"check": self.db_check(connection),
"collation": target_db_parameters.get("collation"),
}
def convert_empty_strings(self, value, expression, connection):
if (not value) and isinstance(value, str):
return None
return value
def get_db_converters(self, connection):
converters = super().get_db_converters(connection)
if connection.features.interprets_empty_strings_as_nulls:
converters += [self.convert_empty_strings]
return converters
def get_col(self, alias, output_field=None):
if output_field is None:
output_field = self.target_field
while isinstance(output_field, ForeignKey):
output_field = output_field.target_field
if output_field is self:
raise ValueError("Cannot resolve output_field.")
return super().get_col(alias, output_field)
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that it always carries a "unique" constraint with it and the reverse
relation always returns the object pointed to (since there will only ever
be one), rather than returning a list.
"""
# Field flags
many_to_many = False
many_to_one = False
one_to_many = False
one_to_one = True
related_accessor_class = ReverseOneToOneDescriptor
forward_related_accessor_class = ForwardOneToOneDescriptor
rel_class = OneToOneRel
description = _("One-to-one relationship")
def __init__(self, to, on_delete, to_field=None, **kwargs):
kwargs["unique"] = True
super().__init__(to, on_delete, to_field=to_field, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if "unique" in kwargs:
del kwargs["unique"]
return name, path, args, kwargs
def formfield(self, **kwargs):
if self.remote_field.parent_link:
return None
return super().formfield(**kwargs)
def save_form_data(self, instance, data):
if isinstance(data, self.remote_field.model):
setattr(instance, self.name, data)
else:
setattr(instance, self.attname, data)
# Remote field object must be cleared otherwise Model.save()
# will reassign attname using the related object pk.
if data is None:
setattr(instance, self.name, data)
def _check_unique(self, **kwargs):
# Override ForeignKey since check isn't applicable here.
return []
def create_many_to_many_intermediary_model(field, klass):
from django.db import models
def set_managed(model, related, through):
through._meta.managed = model._meta.managed or related._meta.managed
to_model = resolve_relation(klass, field.remote_field.model)
name = "%s_%s" % (klass._meta.object_name, field.name)
lazy_related_operation(set_managed, klass, to_model, name)
to = make_model_tuple(to_model)[1]
from_ = klass._meta.model_name
if to == from_:
to = "to_%s" % to
from_ = "from_%s" % from_
meta = type(
"Meta",
(),
{
"db_table": field._get_m2m_db_table(klass._meta),
"auto_created": klass,
"app_label": klass._meta.app_label,
"db_tablespace": klass._meta.db_tablespace,
"unique_together": (from_, to),
"verbose_name": _("%(from)s-%(to)s relationship")
% {"from": from_, "to": to},
"verbose_name_plural": _("%(from)s-%(to)s relationships")
% {"from": from_, "to": to},
"apps": field.model._meta.apps,
},
)
# Construct and return the new class.
return type(
name,
(models.Model,),
{
"Meta": meta,
"__module__": klass.__module__,
from_: models.ForeignKey(
klass,
related_name="%s+" % name,
db_tablespace=field.db_tablespace,
db_constraint=field.remote_field.db_constraint,
on_delete=CASCADE,
),
to: models.ForeignKey(
to_model,
related_name="%s+" % name,
db_tablespace=field.db_tablespace,
db_constraint=field.remote_field.db_constraint,
on_delete=CASCADE,
),
},
)
class ManyToManyField(RelatedField):
"""
Provide a many-to-many relation by using an intermediary model that
holds two ForeignKey fields pointed at the two sides of the relation.
Unless a ``through`` model was provided, ManyToManyField will use the
create_many_to_many_intermediary_model factory to automatically generate
the intermediary model.
"""
# Field flags
many_to_many = True
many_to_one = False
one_to_many = False
one_to_one = False
rel_class = ManyToManyRel
description = _("Many-to-many relationship")
def __init__(
self,
to,
related_name=None,
related_query_name=None,
limit_choices_to=None,
symmetrical=None,
through=None,
through_fields=None,
db_constraint=True,
db_table=None,
swappable=True,
**kwargs,
):
try:
to._meta
except AttributeError:
if not isinstance(to, str):
raise TypeError(
"%s(%r) is invalid. First parameter to ManyToManyField "
"must be either a model, a model name, or the string %r"
% (
self.__class__.__name__,
to,
RECURSIVE_RELATIONSHIP_CONSTANT,
)
)
if symmetrical is None:
symmetrical = to == RECURSIVE_RELATIONSHIP_CONSTANT
if through is not None and db_table is not None:
raise ValueError(
"Cannot specify a db_table if an intermediary model is used."
)
kwargs["rel"] = self.rel_class(
self,
to,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
symmetrical=symmetrical,
through=through,
through_fields=through_fields,
db_constraint=db_constraint,
)
self.has_null_arg = "null" in kwargs
super().__init__(
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
**kwargs,
)
self.db_table = db_table
self.swappable = swappable
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_unique(**kwargs),
*self._check_relationship_model(**kwargs),
*self._check_ignored_options(**kwargs),
*self._check_table_uniqueness(**kwargs),
]
def _check_unique(self, **kwargs):
if self.unique:
return [
checks.Error(
"ManyToManyFields cannot be unique.",
obj=self,
id="fields.E330",
)
]
return []
def _check_ignored_options(self, **kwargs):
warnings = []
if self.has_null_arg:
warnings.append(
checks.Warning(
"null has no effect on ManyToManyField.",
obj=self,
id="fields.W340",
)
)
if self._validators:
warnings.append(
checks.Warning(
"ManyToManyField does not support validators.",
obj=self,
id="fields.W341",
)
)
if self.remote_field.symmetrical and self._related_name:
warnings.append(
checks.Warning(
"related_name has no effect on ManyToManyField "
'with a symmetrical relationship, e.g. to "self".',
obj=self,
id="fields.W345",
)
)
return warnings
def _check_relationship_model(self, from_model=None, **kwargs):
if hasattr(self.remote_field.through, "_meta"):
qualified_model_name = "%s.%s" % (
self.remote_field.through._meta.app_label,
self.remote_field.through.__name__,
)
else:
qualified_model_name = self.remote_field.through
errors = []
if self.remote_field.through not in self.opts.apps.get_models(
include_auto_created=True
):
# The relationship model is not installed.
errors.append(
checks.Error(
"Field specifies a many-to-many relation through model "
"'%s', which has not been installed." % qualified_model_name,
obj=self,
id="fields.E331",
)
)
else:
assert from_model is not None, (
"ManyToManyField with intermediate "
"tables cannot be checked if you don't pass the model "
"where the field is attached to."
)
# Set some useful local variables
to_model = resolve_relation(from_model, self.remote_field.model)
from_model_name = from_model._meta.object_name
if isinstance(to_model, str):
to_model_name = to_model
else:
to_model_name = to_model._meta.object_name
relationship_model_name = self.remote_field.through._meta.object_name
self_referential = from_model == to_model
# Count foreign keys in intermediate model
if self_referential:
seen_self = sum(
from_model == getattr(field.remote_field, "model", None)
for field in self.remote_field.through._meta.fields
)
if seen_self > 2 and not self.remote_field.through_fields:
errors.append(
checks.Error(
"The model is used as an intermediate model by "
"'%s', but it has more than two foreign keys "
"to '%s', which is ambiguous. You must specify "
"which two foreign keys Django should use via the "
"through_fields keyword argument."
% (self, from_model_name),
hint=(
"Use through_fields to specify which two foreign keys "
"Django should use."
),
obj=self.remote_field.through,
id="fields.E333",
)
)
else:
# Count foreign keys in relationship model
seen_from = sum(
from_model == getattr(field.remote_field, "model", None)
for field in self.remote_field.through._meta.fields
)
seen_to = sum(
to_model == getattr(field.remote_field, "model", None)
for field in self.remote_field.through._meta.fields
)
if seen_from > 1 and not self.remote_field.through_fields:
errors.append(
checks.Error(
(
"The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"from '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument."
)
% (self, from_model_name),
hint=(
"If you want to create a recursive relationship, "
'use ManyToManyField("%s", through="%s").'
)
% (
RECURSIVE_RELATIONSHIP_CONSTANT,
relationship_model_name,
),
obj=self,
id="fields.E334",
)
)
if seen_to > 1 and not self.remote_field.through_fields:
errors.append(
checks.Error(
"The model is used as an intermediate model by "
"'%s', but it has more than one foreign key "
"to '%s', which is ambiguous. You must specify "
"which foreign key Django should use via the "
"through_fields keyword argument." % (self, to_model_name),
hint=(
"If you want to create a recursive relationship, "
'use ManyToManyField("%s", through="%s").'
)
% (
RECURSIVE_RELATIONSHIP_CONSTANT,
relationship_model_name,
),
obj=self,
id="fields.E335",
)
)
if seen_from == 0 or seen_to == 0:
errors.append(
checks.Error(
"The model is used as an intermediate model by "
"'%s', but it does not have a foreign key to '%s' or '%s'."
% (self, from_model_name, to_model_name),
obj=self.remote_field.through,
id="fields.E336",
)
)
# Validate `through_fields`.
if self.remote_field.through_fields is not None:
# Validate that we're given an iterable of at least two items
# and that none of them is "falsy".
if not (
len(self.remote_field.through_fields) >= 2
and self.remote_field.through_fields[0]
and self.remote_field.through_fields[1]
):
errors.append(
checks.Error(
"Field specifies 'through_fields' but does not provide "
"the names of the two link fields that should be used "
"for the relation through model '%s'." % qualified_model_name,
hint=(
"Make sure you specify 'through_fields' as "
"through_fields=('field1', 'field2')"
),
obj=self,
id="fields.E337",
)
)
# Validate the given through fields -- they should be actual
# fields on the through model, and also be foreign keys to the
# expected models.
else:
assert from_model is not None, (
"ManyToManyField with intermediate "
"tables cannot be checked if you don't pass the model "
"where the field is attached to."
)
source, through, target = (
from_model,
self.remote_field.through,
self.remote_field.model,
)
source_field_name, target_field_name = self.remote_field.through_fields[
:2
]
for field_name, related_model in (
(source_field_name, source),
(target_field_name, target),
):
possible_field_names = []
for f in through._meta.fields:
if (
hasattr(f, "remote_field")
and getattr(f.remote_field, "model", None) == related_model
):
possible_field_names.append(f.name)
if possible_field_names:
hint = (
"Did you mean one of the following foreign keys to '%s': "
"%s?"
% (
related_model._meta.object_name,
", ".join(possible_field_names),
)
)
else:
hint = None
try:
field = through._meta.get_field(field_name)
except exceptions.FieldDoesNotExist:
errors.append(
checks.Error(
"The intermediary model '%s' has no field '%s'."
% (qualified_model_name, field_name),
hint=hint,
obj=self,
id="fields.E338",
)
)
else:
if not (
hasattr(field, "remote_field")
and getattr(field.remote_field, "model", None)
== related_model
):
errors.append(
checks.Error(
"'%s.%s' is not a foreign key to '%s'."
% (
through._meta.object_name,
field_name,
related_model._meta.object_name,
),
hint=hint,
obj=self,
id="fields.E339",
)
)
return errors
def _check_table_uniqueness(self, **kwargs):
if (
isinstance(self.remote_field.through, str)
or not self.remote_field.through._meta.managed
):
return []
registered_tables = {
model._meta.db_table: model
for model in self.opts.apps.get_models(include_auto_created=True)
if model != self.remote_field.through and model._meta.managed
}
m2m_db_table = self.m2m_db_table()
model = registered_tables.get(m2m_db_table)
# The second condition allows multiple m2m relations on a model if
# some point to a through model that proxies another through model.
if (
model
and model._meta.concrete_model
!= self.remote_field.through._meta.concrete_model
):
if model._meta.auto_created:
def _get_field_name(model):
for field in model._meta.auto_created._meta.many_to_many:
if field.remote_field.through is model:
return field.name
opts = model._meta.auto_created._meta
clashing_obj = "%s.%s" % (opts.label, _get_field_name(model))
else:
clashing_obj = model._meta.label
if settings.DATABASE_ROUTERS:
error_class, error_id = checks.Warning, "fields.W344"
error_hint = (
"You have configured settings.DATABASE_ROUTERS. Verify "
"that the table of %r is correctly routed to a separate "
"database." % clashing_obj
)
else:
error_class, error_id = checks.Error, "fields.E340"
error_hint = None
return [
error_class(
"The field's intermediary table '%s' clashes with the "
"table name of '%s'." % (m2m_db_table, clashing_obj),
obj=self,
hint=error_hint,
id=error_id,
)
]
return []
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
# Handle the simpler arguments.
if self.db_table is not None:
kwargs["db_table"] = self.db_table
if self.remote_field.db_constraint is not True:
kwargs["db_constraint"] = self.remote_field.db_constraint
# Lowercase model names as they should be treated as case-insensitive.
if isinstance(self.remote_field.model, str):
if "." in self.remote_field.model:
app_label, model_name = self.remote_field.model.split(".")
kwargs["to"] = "%s.%s" % (app_label, model_name.lower())
else:
kwargs["to"] = self.remote_field.model.lower()
else:
kwargs["to"] = self.remote_field.model._meta.label_lower
if getattr(self.remote_field, "through", None) is not None:
if isinstance(self.remote_field.through, str):
kwargs["through"] = self.remote_field.through
elif not self.remote_field.through._meta.auto_created:
kwargs["through"] = self.remote_field.through._meta.label
# If swappable is True, then see if we're actually pointing to the target
# of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error.
if hasattr(kwargs["to"], "setting_name"):
if kwargs["to"].setting_name != swappable_setting:
raise ValueError(
"Cannot deconstruct a ManyToManyField pointing to a "
"model that is swapped in place of more than one model "
"(%s and %s)" % (kwargs["to"].setting_name, swappable_setting)
)
kwargs["to"] = SettingsReference(
kwargs["to"],
swappable_setting,
)
return name, path, args, kwargs
def _get_path_info(self, direct=False, filtered_relation=None):
"""Called by both direct and indirect m2m traversal."""
int_model = self.remote_field.through
linkfield1 = int_model._meta.get_field(self.m2m_field_name())
linkfield2 = int_model._meta.get_field(self.m2m_reverse_field_name())
if direct:
join1infos = linkfield1.reverse_path_infos
if filtered_relation:
join2infos = linkfield2.get_path_info(filtered_relation)
else:
join2infos = linkfield2.path_infos
else:
join1infos = linkfield2.reverse_path_infos
if filtered_relation:
join2infos = linkfield1.get_path_info(filtered_relation)
else:
join2infos = linkfield1.path_infos
# Get join infos between the last model of join 1 and the first model
# of join 2. Assume the only reason these may differ is due to model
# inheritance.
join1_final = join1infos[-1].to_opts
join2_initial = join2infos[0].from_opts
if join1_final is join2_initial:
intermediate_infos = []
elif issubclass(join1_final.model, join2_initial.model):
intermediate_infos = join1_final.get_path_to_parent(join2_initial.model)
else:
intermediate_infos = join2_initial.get_path_from_parent(join1_final.model)
return [*join1infos, *intermediate_infos, *join2infos]
def get_path_info(self, filtered_relation=None):
return self._get_path_info(direct=True, filtered_relation=filtered_relation)
@cached_property
def path_infos(self):
return self.get_path_info()
def get_reverse_path_info(self, filtered_relation=None):
return self._get_path_info(direct=False, filtered_relation=filtered_relation)
@cached_property
def reverse_path_infos(self):
return self.get_reverse_path_info()
def _get_m2m_db_table(self, opts):
"""
Function that can be curried to provide the m2m table name for this
relation.
"""
if self.remote_field.through is not None:
return self.remote_field.through._meta.db_table
elif self.db_table:
return self.db_table
else:
m2m_table_name = "%s_%s" % (utils.strip_quotes(opts.db_table), self.name)
return utils.truncate_name(m2m_table_name, connection.ops.max_name_length())
def _get_m2m_attr(self, related, attr):
"""
Function that can be curried to provide the source accessor or DB
column name for the m2m table.
"""
cache_attr = "_m2m_%s_cache" % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
if self.remote_field.through_fields is not None:
link_field_name = self.remote_field.through_fields[0]
else:
link_field_name = None
for f in self.remote_field.through._meta.fields:
if (
f.is_relation
and f.remote_field.model == related.related_model
and (link_field_name is None or link_field_name == f.name)
):
setattr(self, cache_attr, getattr(f, attr))
return getattr(self, cache_attr)
def _get_m2m_reverse_attr(self, related, attr):
"""
Function that can be curried to provide the related accessor or DB
column name for the m2m table.
"""
cache_attr = "_m2m_reverse_%s_cache" % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
found = False
if self.remote_field.through_fields is not None:
link_field_name = self.remote_field.through_fields[1]
else:
link_field_name = None
for f in self.remote_field.through._meta.fields:
if f.is_relation and f.remote_field.model == related.model:
if link_field_name is None and related.related_model == related.model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
setattr(self, cache_attr, getattr(f, attr))
break
else:
found = True
elif link_field_name is None or link_field_name == f.name:
setattr(self, cache_attr, getattr(f, attr))
break
return getattr(self, cache_attr)
def contribute_to_class(self, cls, name, **kwargs):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.remote_field.symmetrical and (
self.remote_field.model == RECURSIVE_RELATIONSHIP_CONSTANT
or self.remote_field.model == cls._meta.object_name
):
self.remote_field.related_name = "%s_rel_+" % name
elif self.remote_field.is_hidden():
# If the backwards relation is disabled, replace the original
# related_name with one generated from the m2m field name. Django
# still uses backwards relations internally and we need to avoid
# clashes between multiple m2m fields with related_name == '+'.
self.remote_field.related_name = "_%s_%s_%s_+" % (
cls._meta.app_label,
cls.__name__.lower(),
name,
)
super().contribute_to_class(cls, name, **kwargs)
# The intermediate m2m model is not auto created if:
# 1) There is a manually specified intermediate, or
# 2) The class owning the m2m field is abstract.
# 3) The class owning the m2m field has been swapped out.
if not cls._meta.abstract:
if self.remote_field.through:
def resolve_through_model(_, model, field):
field.remote_field.through = model
lazy_related_operation(
resolve_through_model, cls, self.remote_field.through, field=self
)
elif not cls._meta.swapped:
self.remote_field.through = create_many_to_many_intermediary_model(
self, cls
)
# Add the descriptor for the m2m relation.
setattr(cls, self.name, ManyToManyDescriptor(self.remote_field, reverse=False))
# Set up the accessor for the m2m table name for the relation.
self.m2m_db_table = partial(self._get_m2m_db_table, cls._meta)
def contribute_to_related_class(self, cls, related):
# Internal M2Ms (i.e., those with a related name ending with '+')
# and swapped models don't get a related descriptor.
if (
not self.remote_field.is_hidden()
and not related.related_model._meta.swapped
):
setattr(
cls,
related.get_accessor_name(),
ManyToManyDescriptor(self.remote_field, reverse=True),
)
# Set up the accessors for the column names on the m2m table.
self.m2m_column_name = partial(self._get_m2m_attr, related, "column")
self.m2m_reverse_name = partial(self._get_m2m_reverse_attr, related, "column")
self.m2m_field_name = partial(self._get_m2m_attr, related, "name")
self.m2m_reverse_field_name = partial(
self._get_m2m_reverse_attr, related, "name"
)
get_m2m_rel = partial(self._get_m2m_attr, related, "remote_field")
self.m2m_target_field_name = lambda: get_m2m_rel().field_name
get_m2m_reverse_rel = partial(
self._get_m2m_reverse_attr, related, "remote_field"
)
self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
return [] if obj.pk is None else list(getattr(obj, self.attname).all())
def save_form_data(self, instance, data):
getattr(instance, self.attname).set(data)
def formfield(self, *, using=None, **kwargs):
defaults = {
"form_class": forms.ModelMultipleChoiceField,
"queryset": self.remote_field.model._default_manager.using(using),
**kwargs,
}
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get("initial") is not None:
initial = defaults["initial"]
if callable(initial):
initial = initial()
defaults["initial"] = [i.pk for i in initial]
return super().formfield(**defaults)
def db_check(self, connection):
return None
def db_type(self, connection):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
def db_parameters(self, connection):
return {"type": None, "check": None}
|
f6936d00886653d3b4005a0952e78156d8ed9ffc49e877c290866d3568930453 | import json
from django import forms
from django.core import checks, exceptions
from django.db import NotSupportedError, connections, router
from django.db.models import lookups
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import TextField
from django.db.models.lookups import (
FieldGetDbPrepValueMixin,
PostgresOperatorLookup,
Transform,
)
from django.utils.translation import gettext_lazy as _
from . import Field
from .mixins import CheckFieldDefaultMixin
__all__ = ["JSONField"]
class JSONField(CheckFieldDefaultMixin, Field):
empty_strings_allowed = False
description = _("A JSON object")
default_error_messages = {
"invalid": _("Value must be valid JSON."),
}
_default_hint = ("dict", "{}")
def __init__(
self,
verbose_name=None,
name=None,
encoder=None,
decoder=None,
**kwargs,
):
if encoder and not callable(encoder):
raise ValueError("The encoder parameter must be a callable object.")
if decoder and not callable(decoder):
raise ValueError("The decoder parameter must be a callable object.")
self.encoder = encoder
self.decoder = decoder
super().__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super().check(**kwargs)
databases = kwargs.get("databases") or []
errors.extend(self._check_supported(databases))
return errors
def _check_supported(self, databases):
errors = []
for db in databases:
if not router.allow_migrate_model(db, self.model):
continue
connection = connections[db]
if (
self.model._meta.required_db_vendor
and self.model._meta.required_db_vendor != connection.vendor
):
continue
if not (
"supports_json_field" in self.model._meta.required_db_features
or connection.features.supports_json_field
):
errors.append(
checks.Error(
"%s does not support JSONFields." % connection.display_name,
obj=self.model,
id="fields.E180",
)
)
return errors
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.encoder is not None:
kwargs["encoder"] = self.encoder
if self.decoder is not None:
kwargs["decoder"] = self.decoder
return name, path, args, kwargs
def from_db_value(self, value, expression, connection):
if value is None:
return value
# Some backends (SQLite at least) extract non-string values in their
# SQL datatypes.
if isinstance(expression, KeyTransform) and not isinstance(value, str):
return value
try:
return json.loads(value, cls=self.decoder)
except json.JSONDecodeError:
return value
def get_internal_type(self):
return "JSONField"
def get_db_prep_value(self, value, connection, prepared=False):
if hasattr(value, "as_sql"):
return value
return connection.ops.adapt_json_value(value, self.encoder)
def get_db_prep_save(self, value, connection):
if value is None:
return value
return self.get_db_prep_value(value, connection)
def get_transform(self, name):
transform = super().get_transform(name)
if transform:
return transform
return KeyTransformFactory(name)
def validate(self, value, model_instance):
super().validate(value, model_instance)
try:
json.dumps(value, cls=self.encoder)
except TypeError:
raise exceptions.ValidationError(
self.error_messages["invalid"],
code="invalid",
params={"value": value},
)
def value_to_string(self, obj):
return self.value_from_object(obj)
def formfield(self, **kwargs):
return super().formfield(
**{
"form_class": forms.JSONField,
"encoder": self.encoder,
"decoder": self.decoder,
**kwargs,
}
)
def compile_json_path(key_transforms, include_root=True):
path = ["$"] if include_root else []
for key_transform in key_transforms:
try:
num = int(key_transform)
except ValueError: # non-integer
path.append(".")
path.append(json.dumps(key_transform))
else:
path.append("[%s]" % num)
return "".join(path)
class DataContains(FieldGetDbPrepValueMixin, PostgresOperatorLookup):
lookup_name = "contains"
postgres_operator = "@>"
def as_sql(self, compiler, connection):
if not connection.features.supports_json_field_contains:
raise NotSupportedError(
"contains lookup is not supported on this database backend."
)
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = tuple(lhs_params) + tuple(rhs_params)
return "JSON_CONTAINS(%s, %s)" % (lhs, rhs), params
class ContainedBy(FieldGetDbPrepValueMixin, PostgresOperatorLookup):
lookup_name = "contained_by"
postgres_operator = "<@"
def as_sql(self, compiler, connection):
if not connection.features.supports_json_field_contains:
raise NotSupportedError(
"contained_by lookup is not supported on this database backend."
)
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = tuple(rhs_params) + tuple(lhs_params)
return "JSON_CONTAINS(%s, %s)" % (rhs, lhs), params
class HasKeyLookup(PostgresOperatorLookup):
logical_operator = None
def compile_json_path_final_key(self, key_transform):
# Compile the final key without interpreting ints as array elements.
return ".%s" % json.dumps(key_transform)
def as_sql(self, compiler, connection, template=None):
# Process JSON path from the left-hand side.
if isinstance(self.lhs, KeyTransform):
lhs, lhs_params, lhs_key_transforms = self.lhs.preprocess_lhs(
compiler, connection
)
lhs_json_path = compile_json_path(lhs_key_transforms)
else:
lhs, lhs_params = self.process_lhs(compiler, connection)
lhs_json_path = "$"
sql = template % lhs
# Process JSON path from the right-hand side.
rhs = self.rhs
rhs_params = []
if not isinstance(rhs, (list, tuple)):
rhs = [rhs]
for key in rhs:
if isinstance(key, KeyTransform):
*_, rhs_key_transforms = key.preprocess_lhs(compiler, connection)
else:
rhs_key_transforms = [key]
*rhs_key_transforms, final_key = rhs_key_transforms
rhs_json_path = compile_json_path(rhs_key_transforms, include_root=False)
rhs_json_path += self.compile_json_path_final_key(final_key)
rhs_params.append(lhs_json_path + rhs_json_path)
# Add condition for each key.
if self.logical_operator:
sql = "(%s)" % self.logical_operator.join([sql] * len(rhs_params))
return sql, tuple(lhs_params) + tuple(rhs_params)
def as_mysql(self, compiler, connection):
return self.as_sql(
compiler, connection, template="JSON_CONTAINS_PATH(%s, 'one', %%s)"
)
def as_oracle(self, compiler, connection):
sql, params = self.as_sql(
compiler, connection, template="JSON_EXISTS(%s, '%%s')"
)
# Add paths directly into SQL because path expressions cannot be passed
# as bind variables on Oracle.
return sql % tuple(params), []
def as_postgresql(self, compiler, connection):
if isinstance(self.rhs, KeyTransform):
*_, rhs_key_transforms = self.rhs.preprocess_lhs(compiler, connection)
for key in rhs_key_transforms[:-1]:
self.lhs = KeyTransform(key, self.lhs)
self.rhs = rhs_key_transforms[-1]
return super().as_postgresql(compiler, connection)
def as_sqlite(self, compiler, connection):
return self.as_sql(
compiler, connection, template="JSON_TYPE(%s, %%s) IS NOT NULL"
)
class HasKey(HasKeyLookup):
lookup_name = "has_key"
postgres_operator = "?"
prepare_rhs = False
class HasKeys(HasKeyLookup):
lookup_name = "has_keys"
postgres_operator = "?&"
logical_operator = " AND "
def get_prep_lookup(self):
return [str(item) for item in self.rhs]
class HasAnyKeys(HasKeys):
lookup_name = "has_any_keys"
postgres_operator = "?|"
logical_operator = " OR "
class HasKeyOrArrayIndex(HasKey):
def compile_json_path_final_key(self, key_transform):
return compile_json_path([key_transform], include_root=False)
class CaseInsensitiveMixin:
"""
Mixin to allow case-insensitive comparison of JSON values on MySQL.
MySQL handles strings used in JSON context using the utf8mb4_bin collation.
Because utf8mb4_bin is a binary collation, comparison of JSON values is
case-sensitive.
"""
def process_lhs(self, compiler, connection):
lhs, lhs_params = super().process_lhs(compiler, connection)
if connection.vendor == "mysql":
return "LOWER(%s)" % lhs, lhs_params
return lhs, lhs_params
def process_rhs(self, compiler, connection):
rhs, rhs_params = super().process_rhs(compiler, connection)
if connection.vendor == "mysql":
return "LOWER(%s)" % rhs, rhs_params
return rhs, rhs_params
class JSONExact(lookups.Exact):
can_use_none_as_rhs = True
def process_rhs(self, compiler, connection):
rhs, rhs_params = super().process_rhs(compiler, connection)
# Treat None lookup values as null.
if rhs == "%s" and rhs_params == [None]:
rhs_params = ["null"]
if connection.vendor == "mysql":
func = ["JSON_EXTRACT(%s, '$')"] * len(rhs_params)
rhs %= tuple(func)
return rhs, rhs_params
class JSONIContains(CaseInsensitiveMixin, lookups.IContains):
pass
JSONField.register_lookup(DataContains)
JSONField.register_lookup(ContainedBy)
JSONField.register_lookup(HasKey)
JSONField.register_lookup(HasKeys)
JSONField.register_lookup(HasAnyKeys)
JSONField.register_lookup(JSONExact)
JSONField.register_lookup(JSONIContains)
class KeyTransform(Transform):
postgres_operator = "->"
postgres_nested_operator = "#>"
def __init__(self, key_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.key_name = str(key_name)
def preprocess_lhs(self, compiler, connection):
key_transforms = [self.key_name]
previous = self.lhs
while isinstance(previous, KeyTransform):
key_transforms.insert(0, previous.key_name)
previous = previous.lhs
lhs, params = compiler.compile(previous)
if connection.vendor == "oracle":
# Escape string-formatting.
key_transforms = [key.replace("%", "%%") for key in key_transforms]
return lhs, params, key_transforms
def as_mysql(self, compiler, connection):
lhs, params, key_transforms = self.preprocess_lhs(compiler, connection)
json_path = compile_json_path(key_transforms)
return "JSON_EXTRACT(%s, %%s)" % lhs, tuple(params) + (json_path,)
def as_oracle(self, compiler, connection):
lhs, params, key_transforms = self.preprocess_lhs(compiler, connection)
json_path = compile_json_path(key_transforms)
return (
"COALESCE(JSON_QUERY(%s, '%s'), JSON_VALUE(%s, '%s'))"
% ((lhs, json_path) * 2)
), tuple(params) * 2
def as_postgresql(self, compiler, connection):
lhs, params, key_transforms = self.preprocess_lhs(compiler, connection)
if len(key_transforms) > 1:
sql = "(%s %s %%s)" % (lhs, self.postgres_nested_operator)
return sql, tuple(params) + (key_transforms,)
try:
lookup = int(self.key_name)
except ValueError:
lookup = self.key_name
return "(%s %s %%s)" % (lhs, self.postgres_operator), tuple(params) + (lookup,)
def as_sqlite(self, compiler, connection):
lhs, params, key_transforms = self.preprocess_lhs(compiler, connection)
json_path = compile_json_path(key_transforms)
datatype_values = ",".join(
[repr(datatype) for datatype in connection.ops.jsonfield_datatype_values]
)
return (
"(CASE WHEN JSON_TYPE(%s, %%s) IN (%s) "
"THEN JSON_TYPE(%s, %%s) ELSE JSON_EXTRACT(%s, %%s) END)"
) % (lhs, datatype_values, lhs, lhs), (tuple(params) + (json_path,)) * 3
class KeyTextTransform(KeyTransform):
postgres_operator = "->>"
postgres_nested_operator = "#>>"
output_field = TextField()
def as_mysql(self, compiler, connection):
if connection.mysql_is_mariadb:
# MariaDB doesn't support -> and ->> operators (see MDEV-13594).
sql, params = super().as_mysql(compiler, connection)
return "JSON_UNQUOTE(%s)" % sql, params
else:
lhs, params, key_transforms = self.preprocess_lhs(compiler, connection)
json_path = compile_json_path(key_transforms)
return "(%s ->> %%s)" % lhs, tuple(params) + (json_path,)
@classmethod
def from_lookup(cls, lookup):
transform, *keys = lookup.split(LOOKUP_SEP)
if not keys:
raise ValueError("Lookup must contain key or index transforms.")
for key in keys:
transform = cls(key, transform)
return transform
KT = KeyTextTransform.from_lookup
class KeyTransformTextLookupMixin:
"""
Mixin for combining with a lookup expecting a text lhs from a JSONField
key lookup. On PostgreSQL, make use of the ->> operator instead of casting
key values to text and performing the lookup on the resulting
representation.
"""
def __init__(self, key_transform, *args, **kwargs):
if not isinstance(key_transform, KeyTransform):
raise TypeError(
"Transform should be an instance of KeyTransform in order to "
"use this lookup."
)
key_text_transform = KeyTextTransform(
key_transform.key_name,
*key_transform.source_expressions,
**key_transform.extra,
)
super().__init__(key_text_transform, *args, **kwargs)
class KeyTransformIsNull(lookups.IsNull):
# key__isnull=False is the same as has_key='key'
def as_oracle(self, compiler, connection):
sql, params = HasKeyOrArrayIndex(
self.lhs.lhs,
self.lhs.key_name,
).as_oracle(compiler, connection)
if not self.rhs:
return sql, params
# Column doesn't have a key or IS NULL.
lhs, lhs_params, _ = self.lhs.preprocess_lhs(compiler, connection)
return "(NOT %s OR %s IS NULL)" % (sql, lhs), tuple(params) + tuple(lhs_params)
def as_sqlite(self, compiler, connection):
template = "JSON_TYPE(%s, %%s) IS NULL"
if not self.rhs:
template = "JSON_TYPE(%s, %%s) IS NOT NULL"
return HasKeyOrArrayIndex(self.lhs.lhs, self.lhs.key_name).as_sql(
compiler,
connection,
template=template,
)
class KeyTransformIn(lookups.In):
def resolve_expression_parameter(self, compiler, connection, sql, param):
sql, params = super().resolve_expression_parameter(
compiler,
connection,
sql,
param,
)
if (
not hasattr(param, "as_sql")
and not connection.features.has_native_json_field
):
if connection.vendor == "oracle":
value = json.loads(param)
sql = "%s(JSON_OBJECT('value' VALUE %%s FORMAT JSON), '$.value')"
if isinstance(value, (list, dict)):
sql %= "JSON_QUERY"
else:
sql %= "JSON_VALUE"
elif connection.vendor == "mysql" or (
connection.vendor == "sqlite"
and params[0] not in connection.ops.jsonfield_datatype_values
):
sql = "JSON_EXTRACT(%s, '$')"
if connection.vendor == "mysql" and connection.mysql_is_mariadb:
sql = "JSON_UNQUOTE(%s)" % sql
return sql, params
class KeyTransformExact(JSONExact):
def process_rhs(self, compiler, connection):
if isinstance(self.rhs, KeyTransform):
return super(lookups.Exact, self).process_rhs(compiler, connection)
rhs, rhs_params = super().process_rhs(compiler, connection)
if connection.vendor == "oracle":
func = []
sql = "%s(JSON_OBJECT('value' VALUE %%s FORMAT JSON), '$.value')"
for value in rhs_params:
value = json.loads(value)
if isinstance(value, (list, dict)):
func.append(sql % "JSON_QUERY")
else:
func.append(sql % "JSON_VALUE")
rhs %= tuple(func)
elif connection.vendor == "sqlite":
func = []
for value in rhs_params:
if value in connection.ops.jsonfield_datatype_values:
func.append("%s")
else:
func.append("JSON_EXTRACT(%s, '$')")
rhs %= tuple(func)
return rhs, rhs_params
def as_oracle(self, compiler, connection):
rhs, rhs_params = super().process_rhs(compiler, connection)
if rhs_params == ["null"]:
# Field has key and it's NULL.
has_key_expr = HasKeyOrArrayIndex(self.lhs.lhs, self.lhs.key_name)
has_key_sql, has_key_params = has_key_expr.as_oracle(compiler, connection)
is_null_expr = self.lhs.get_lookup("isnull")(self.lhs, True)
is_null_sql, is_null_params = is_null_expr.as_sql(compiler, connection)
return (
"%s AND %s" % (has_key_sql, is_null_sql),
tuple(has_key_params) + tuple(is_null_params),
)
return super().as_sql(compiler, connection)
class KeyTransformIExact(
CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IExact
):
pass
class KeyTransformIContains(
CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IContains
):
pass
class KeyTransformStartsWith(KeyTransformTextLookupMixin, lookups.StartsWith):
pass
class KeyTransformIStartsWith(
CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IStartsWith
):
pass
class KeyTransformEndsWith(KeyTransformTextLookupMixin, lookups.EndsWith):
pass
class KeyTransformIEndsWith(
CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IEndsWith
):
pass
class KeyTransformRegex(KeyTransformTextLookupMixin, lookups.Regex):
pass
class KeyTransformIRegex(
CaseInsensitiveMixin, KeyTransformTextLookupMixin, lookups.IRegex
):
pass
class KeyTransformNumericLookupMixin:
def process_rhs(self, compiler, connection):
rhs, rhs_params = super().process_rhs(compiler, connection)
if not connection.features.has_native_json_field:
rhs_params = [json.loads(value) for value in rhs_params]
return rhs, rhs_params
class KeyTransformLt(KeyTransformNumericLookupMixin, lookups.LessThan):
pass
class KeyTransformLte(KeyTransformNumericLookupMixin, lookups.LessThanOrEqual):
pass
class KeyTransformGt(KeyTransformNumericLookupMixin, lookups.GreaterThan):
pass
class KeyTransformGte(KeyTransformNumericLookupMixin, lookups.GreaterThanOrEqual):
pass
KeyTransform.register_lookup(KeyTransformIn)
KeyTransform.register_lookup(KeyTransformExact)
KeyTransform.register_lookup(KeyTransformIExact)
KeyTransform.register_lookup(KeyTransformIsNull)
KeyTransform.register_lookup(KeyTransformIContains)
KeyTransform.register_lookup(KeyTransformStartsWith)
KeyTransform.register_lookup(KeyTransformIStartsWith)
KeyTransform.register_lookup(KeyTransformEndsWith)
KeyTransform.register_lookup(KeyTransformIEndsWith)
KeyTransform.register_lookup(KeyTransformRegex)
KeyTransform.register_lookup(KeyTransformIRegex)
KeyTransform.register_lookup(KeyTransformLt)
KeyTransform.register_lookup(KeyTransformLte)
KeyTransform.register_lookup(KeyTransformGt)
KeyTransform.register_lookup(KeyTransformGte)
class KeyTransformFactory:
def __init__(self, key_name):
self.key_name = key_name
def __call__(self, *args, **kwargs):
return KeyTransform(self.key_name, *args, **kwargs)
|
674f3487dd4dd047a7fc1341dd80d32373ea2f99f807b4e5f80afd94a95b9d1e | """
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
import copy
import difflib
import functools
import sys
from collections import Counter, namedtuple
from collections.abc import Iterator, Mapping
from itertools import chain, count, product
from string import ascii_uppercase
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections
from django.db.models.aggregates import Count
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import (
BaseExpression,
Col,
Exists,
F,
OuterRef,
Ref,
ResolvedOuterRef,
Value,
)
from django.db.models.fields import Field
from django.db.models.fields.related_lookups import MultiColSource
from django.db.models.lookups import Lookup
from django.db.models.query_utils import (
Q,
check_rel_lookup_compatibility,
refs_expression,
)
from django.db.models.sql.constants import INNER, LOUTER, ORDER_DIR, SINGLE
from django.db.models.sql.datastructures import BaseTable, Empty, Join, MultiJoin
from django.db.models.sql.where import AND, OR, ExtraWhere, NothingNode, WhereNode
from django.utils.functional import cached_property
from django.utils.regex_helper import _lazy_re_compile
from django.utils.tree import Node
__all__ = ["Query", "RawQuery"]
# Quotation marks ('"`[]), whitespace characters, semicolons, or inline
# SQL comments are forbidden in column aliases.
FORBIDDEN_ALIAS_PATTERN = _lazy_re_compile(r"['`\"\]\[;\s]|--|/\*|\*/")
# Inspired from
# https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS
EXPLAIN_OPTIONS_PATTERN = _lazy_re_compile(r"[\w\-]+")
def get_field_names_from_opts(opts):
if opts is None:
return set()
return set(
chain.from_iterable(
(f.name, f.attname) if f.concrete else (f.name,) for f in opts.get_fields()
)
)
def get_children_from_q(q):
for child in q.children:
if isinstance(child, Node):
yield from get_children_from_q(child)
else:
yield child
JoinInfo = namedtuple(
"JoinInfo",
("final_field", "targets", "opts", "joins", "path", "transform_function"),
)
class RawQuery:
"""A single raw SQL query."""
def __init__(self, sql, using, params=()):
self.params = params
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.annotation_select = {}
def chain(self, using):
return self.clone(using)
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.identifier_converter
return [converter(column_meta[0]) for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
@property
def params_type(self):
if self.params is None:
return None
return dict if isinstance(self.params, Mapping) else tuple
def __str__(self):
if self.params_type is None:
return self.sql
return self.sql % self.params_type(self.params)
def _execute_query(self):
connection = connections[self.using]
# Adapt parameters to the database, as much as possible considering
# that the target type isn't known. See #17755.
params_type = self.params_type
adapter = connection.ops.adapt_unknown_value
if params_type is tuple:
params = tuple(adapter(val) for val in self.params)
elif params_type is dict:
params = {key: adapter(val) for key, val in self.params.items()}
elif params_type is None:
params = None
else:
raise RuntimeError("Unexpected params type: %s" % params_type)
self.cursor = connection.cursor()
self.cursor.execute(self.sql, params)
ExplainInfo = namedtuple("ExplainInfo", ("format", "options"))
class Query(BaseExpression):
"""A single SQL query."""
alias_prefix = "T"
empty_result_set_value = None
subq_aliases = frozenset([alias_prefix])
compiler = "SQLCompiler"
base_table_class = BaseTable
join_class = Join
default_cols = True
default_ordering = True
standard_ordering = True
filter_is_sticky = False
subquery = False
# SQL-related attributes.
# Select and related select clauses are expressions to use in the SELECT
# clause of the query. The select is used for cases where we want to set up
# the select clause to contain other than default fields (values(),
# subqueries...). Note that annotations go to annotations dictionary.
select = ()
# The group_by attribute can have one of the following forms:
# - None: no group by at all in the query
# - A tuple of expressions: group by (at least) those expressions.
# String refs are also allowed for now.
# - True: group by all select fields of the model
# See compiler.get_group_by() for details.
group_by = None
order_by = ()
low_mark = 0 # Used for offset/limit.
high_mark = None # Used for offset/limit.
distinct = False
distinct_fields = ()
select_for_update = False
select_for_update_nowait = False
select_for_update_skip_locked = False
select_for_update_of = ()
select_for_no_key_update = False
select_related = False
has_select_fields = False
# Arbitrary limit for select_related to prevents infinite recursion.
max_depth = 5
# Holds the selects defined by a call to values() or values_list()
# excluding annotation_select and extra_select.
values_select = ()
# SQL annotation-related attributes.
annotation_select_mask = None
_annotation_select_cache = None
# Set combination attributes.
combinator = None
combinator_all = False
combined_queries = ()
# These are for extensions. The contents are more or less appended verbatim
# to the appropriate clause.
extra_select_mask = None
_extra_select_cache = None
extra_tables = ()
extra_order_by = ()
# A tuple that is a set of model field names and either True, if these are
# the fields to defer, or False if these are the only fields to load.
deferred_loading = (frozenset(), True)
explain_info = None
def __init__(self, model, alias_cols=True):
self.model = model
self.alias_refcount = {}
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# types they are. The key is the alias of the joined table (possibly
# the table name) and the value is a Join-like object (see
# sql.datastructures.Join for more information).
self.alias_map = {}
# Whether to provide alias to columns during reference resolving.
self.alias_cols = alias_cols
# Sometimes the query contains references to aliases in outer queries (as
# a result of split_exclude). Correct alias quoting needs to know these
# aliases too.
# Map external tables to whether they are aliased.
self.external_aliases = {}
self.table_map = {} # Maps table names to list of aliases.
self.used_aliases = set()
self.where = WhereNode()
# Maps alias -> Annotation Expression.
self.annotations = {}
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
self.extra = {} # Maps col_alias -> (col_sql, params).
self._filtered_relations = {}
@property
def output_field(self):
if len(self.select) == 1:
select = self.select[0]
return getattr(select, "target", None) or select.field
elif len(self.annotation_select) == 1:
return next(iter(self.annotation_select.values())).output_field
@cached_property
def base_table(self):
for alias in self.alias_map:
return alias
def __str__(self):
"""
Return the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.sql_with_params()
return sql % params
def sql_with_params(self):
"""
Return the query as an SQL string and the parameters that will be
substituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
def __deepcopy__(self, memo):
"""Limit the amount of work when a Query is deepcopied."""
result = self.clone()
memo[id(self)] = result
return result
def get_compiler(self, using=None, connection=None, elide_empty=True):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
return connection.ops.compiler(self.compiler)(
self, connection, using, elide_empty
)
def get_meta(self):
"""
Return the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
if self.model:
return self.model._meta
def clone(self):
"""
Return a copy of the current Query. A lightweight alternative to
deepcopy().
"""
obj = Empty()
obj.__class__ = self.__class__
# Copy references to everything.
obj.__dict__ = self.__dict__.copy()
# Clone attributes that can't use shallow copy.
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.external_aliases = self.external_aliases.copy()
obj.table_map = self.table_map.copy()
obj.where = self.where.clone()
obj.annotations = self.annotations.copy()
if self.annotation_select_mask is not None:
obj.annotation_select_mask = self.annotation_select_mask.copy()
if self.combined_queries:
obj.combined_queries = tuple(
[query.clone() for query in self.combined_queries]
)
# _annotation_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both annotations and
# _annotation_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._annotation_select_cache = None
obj.extra = self.extra.copy()
if self.extra_select_mask is not None:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is not None:
obj._extra_select_cache = self._extra_select_cache.copy()
if self.select_related is not False:
# Use deepcopy because select_related stores fields in nested
# dicts.
obj.select_related = copy.deepcopy(obj.select_related)
if "subq_aliases" in self.__dict__:
obj.subq_aliases = self.subq_aliases.copy()
obj.used_aliases = self.used_aliases.copy()
obj._filtered_relations = self._filtered_relations.copy()
# Clear the cached_property, if it exists.
obj.__dict__.pop("base_table", None)
return obj
def chain(self, klass=None):
"""
Return a copy of the current Query that's ready for another operation.
The klass argument changes the type of the Query, e.g. UpdateQuery.
"""
obj = self.clone()
if klass and obj.__class__ != klass:
obj.__class__ = klass
if not obj.filter_is_sticky:
obj.used_aliases = set()
obj.filter_is_sticky = False
if hasattr(obj, "_setup_query"):
obj._setup_query()
return obj
def relabeled_clone(self, change_map):
clone = self.clone()
clone.change_aliases(change_map)
return clone
def _get_col(self, target, field, alias):
if not self.alias_cols:
alias = None
return target.get_col(alias, field)
def get_aggregation(self, using, added_aggregate_names):
"""
Return the dictionary with the values of the existing aggregations.
"""
if not self.annotation_select:
return {}
existing_annotations = {
alias: annotation
for alias, annotation in self.annotations.items()
if alias not in added_aggregate_names
}
# Existing usage of aggregation can be determined by the presence of
# selected aggregates but also by filters against aliased aggregates.
_, having, qualify = self.where.split_having_qualify()
has_existing_aggregation = (
any(
getattr(annotation, "contains_aggregate", True)
for annotation in existing_annotations.values()
)
or having
)
# Decide if we need to use a subquery.
#
# Existing aggregations would cause incorrect results as
# get_aggregation() must produce just one result and thus must not use
# GROUP BY.
#
# If the query has limit or distinct, or uses set operations, then
# those operations must be done in a subquery so that the query
# aggregates on the limit and/or distinct results instead of applying
# the distinct and limit after the aggregation.
if (
isinstance(self.group_by, tuple)
or self.is_sliced
or has_existing_aggregation
or qualify
or self.distinct
or self.combinator
):
from django.db.models.sql.subqueries import AggregateQuery
inner_query = self.clone()
inner_query.subquery = True
outer_query = AggregateQuery(self.model, inner_query)
inner_query.select_for_update = False
inner_query.select_related = False
inner_query.set_annotation_mask(self.annotation_select)
# Queries with distinct_fields need ordering and when a limit is
# applied we must take the slice from the ordered query. Otherwise
# no need for ordering.
inner_query.clear_ordering(force=False)
if not inner_query.distinct:
# If the inner query uses default select and it has some
# aggregate annotations, then we must make sure the inner
# query is grouped by the main model's primary key. However,
# clearing the select clause can alter results if distinct is
# used.
if inner_query.default_cols and has_existing_aggregation:
inner_query.group_by = (
self.model._meta.pk.get_col(inner_query.get_initial_alias()),
)
inner_query.default_cols = False
if not qualify:
# Mask existing annotations that are not referenced by
# aggregates to be pushed to the outer query unless
# filtering against window functions is involved as it
# requires complex realising.
annotation_mask = set()
for name in added_aggregate_names:
annotation_mask.add(name)
annotation_mask |= inner_query.annotations[name].get_refs()
inner_query.set_annotation_mask(annotation_mask)
# Remove any aggregates marked for reduction from the subquery and
# move them to the outer AggregateQuery. This requires making sure
# all columns referenced by the aggregates are selected in the
# subquery. It is achieved by retrieving all column references from
# the aggregates, explicitly selecting them if they are not
# already, and making sure the aggregates are repointed to
# referenced to them.
col_refs = {}
for alias, expression in list(inner_query.annotation_select.items()):
if not expression.is_summary:
continue
annotation_select_mask = inner_query.annotation_select_mask
replacements = {}
for col in self._gen_cols([expression], resolve_refs=False):
if not (col_ref := col_refs.get(col)):
index = len(col_refs) + 1
col_alias = f"__col{index}"
col_ref = Ref(col_alias, col)
col_refs[col] = col_ref
inner_query.annotations[col_alias] = col
inner_query.append_annotation_mask([col_alias])
replacements[col] = col_ref
outer_query.annotations[alias] = expression.replace_expressions(
replacements
)
del inner_query.annotations[alias]
annotation_select_mask.remove(alias)
# Make sure the annotation_select wont use cached results.
inner_query.set_annotation_mask(inner_query.annotation_select_mask)
if (
inner_query.select == ()
and not inner_query.default_cols
and not inner_query.annotation_select_mask
):
# In case of Model.objects[0:3].count(), there would be no
# field selected in the inner query, yet we must use a subquery.
# So, make sure at least one field is selected.
inner_query.select = (
self.model._meta.pk.get_col(inner_query.get_initial_alias()),
)
else:
outer_query = self
self.select = ()
self.default_cols = False
self.extra = {}
if existing_annotations:
# Inline reference to existing annotations and mask them as
# they are unnecessary given only the summarized aggregations
# are requested.
replacements = {
Ref(alias, annotation): annotation
for alias, annotation in existing_annotations.items()
}
for name in added_aggregate_names:
self.annotations[name] = self.annotations[name].replace_expressions(
replacements
)
self.set_annotation_mask(added_aggregate_names)
empty_set_result = [
expression.empty_result_set_value
for expression in outer_query.annotation_select.values()
]
elide_empty = not any(result is NotImplemented for result in empty_set_result)
outer_query.clear_ordering(force=True)
outer_query.clear_limits()
outer_query.select_for_update = False
outer_query.select_related = False
compiler = outer_query.get_compiler(using, elide_empty=elide_empty)
result = compiler.execute_sql(SINGLE)
if result is None:
result = empty_set_result
converters = compiler.get_converters(outer_query.annotation_select.values())
result = next(compiler.apply_converters((result,), converters))
return dict(zip(outer_query.annotation_select, result))
def get_count(self, using):
"""
Perform a COUNT() query using the current filter constraints.
"""
obj = self.clone()
obj.add_annotation(Count("*"), alias="__count", is_summary=True)
return obj.get_aggregation(using, ["__count"])["__count"]
def has_filters(self):
return self.where
def exists(self, limit=True):
q = self.clone()
if not (q.distinct and q.is_sliced):
if q.group_by is True:
q.add_fields(
(f.attname for f in self.model._meta.concrete_fields), False
)
# Disable GROUP BY aliases to avoid orphaning references to the
# SELECT clause which is about to be cleared.
q.set_group_by(allow_aliases=False)
q.clear_select_clause()
if q.combined_queries and q.combinator == "union":
q.combined_queries = tuple(
combined_query.exists(limit=False)
for combined_query in q.combined_queries
)
q.clear_ordering(force=True)
if limit:
q.set_limits(high=1)
q.add_annotation(Value(1), "a")
return q
def has_results(self, using):
q = self.exists(using)
compiler = q.get_compiler(using=using)
return compiler.has_results()
def explain(self, using, format=None, **options):
q = self.clone()
for option_name in options:
if (
not EXPLAIN_OPTIONS_PATTERN.fullmatch(option_name)
or "--" in option_name
):
raise ValueError(f"Invalid option name: {option_name!r}.")
q.explain_info = ExplainInfo(format, options)
compiler = q.get_compiler(using=using)
return "\n".join(compiler.explain_query())
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
if self.model != rhs.model:
raise TypeError("Cannot combine queries on two different base models.")
if self.is_sliced:
raise TypeError("Cannot combine queries once a slice has been taken.")
if self.distinct != rhs.distinct:
raise TypeError("Cannot combine a unique query with a non-unique query.")
if self.distinct_fields != rhs.distinct_fields:
raise TypeError("Cannot combine queries with different distinct fields.")
# If lhs and rhs shares the same alias prefix, it is possible to have
# conflicting alias changes like T4 -> T5, T5 -> T6, which might end up
# as T4 -> T6 while combining two querysets. To prevent this, change an
# alias prefix of the rhs and update current aliases accordingly,
# except if the alias is the base table since it must be present in the
# query on both sides.
initial_alias = self.get_initial_alias()
rhs.bump_prefix(self, exclude={initial_alias})
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
conjunction = connector == AND
# Determine which existing joins can be reused. When combining the
# query with AND we must recreate all joins for m2m filters. When
# combining with OR we can reuse joins. The reason is that in AND
# case a single row can't fulfill a condition like:
# revrel__col=1 & revrel__col=2
# But, there might be two different related rows matching this
# condition. In OR case a single True is enough, so single row is
# enough, too.
#
# Note that we will be creating duplicate joins for non-m2m joins in
# the AND case. The results will be correct but this creates too many
# joins. This is something that could be fixed later on.
reuse = set() if conjunction else set(self.alias_map)
joinpromoter = JoinPromoter(connector, 2, False)
joinpromoter.add_votes(
j for j in self.alias_map if self.alias_map[j].join_type == INNER
)
rhs_votes = set()
# Now, add the joins from rhs query into the new query (skipping base
# table).
rhs_tables = list(rhs.alias_map)[1:]
for alias in rhs_tables:
join = rhs.alias_map[alias]
# If the left side of the join was already relabeled, use the
# updated alias.
join = join.relabeled_clone(change_map)
new_alias = self.join(join, reuse=reuse)
if join.join_type == INNER:
rhs_votes.add(new_alias)
# We can't reuse the same join again in the query. If we have two
# distinct joins for the same connection in rhs query, then the
# combined query must have two joins, too.
reuse.discard(new_alias)
if alias != new_alias:
change_map[alias] = new_alias
if not rhs.alias_refcount[alias]:
# The alias was unused in the rhs query. Unref it so that it
# will be unused in the new query, too. We have to add and
# unref the alias so that join promotion has information of
# the join type for the unused alias.
self.unref_alias(new_alias)
joinpromoter.add_votes(rhs_votes)
joinpromoter.update_join_types(self)
# Combine subqueries aliases to ensure aliases relabelling properly
# handle subqueries when combining where and select clauses.
self.subq_aliases |= rhs.subq_aliases
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
w = rhs.where.clone()
w.relabel_aliases(change_map)
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
if rhs.select:
self.set_select([col.relabeled_clone(change_map) for col in rhs.select])
else:
self.select = ()
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self.extra and rhs.extra:
raise ValueError(
"When merging querysets using 'or', you cannot have "
"extra(select=...) on both sides."
)
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by or self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def _get_defer_select_mask(self, opts, mask, select_mask=None):
if select_mask is None:
select_mask = {}
select_mask[opts.pk] = {}
# All concrete fields that are not part of the defer mask must be
# loaded. If a relational field is encountered it gets added to the
# mask for it be considered if `select_related` and the cycle continues
# by recursively caling this function.
for field in opts.concrete_fields:
field_mask = mask.pop(field.name, None)
if field_mask is None:
select_mask.setdefault(field, {})
elif field_mask:
if not field.is_relation:
raise FieldError(next(iter(field_mask)))
field_select_mask = select_mask.setdefault(field, {})
related_model = field.remote_field.model._meta.concrete_model
self._get_defer_select_mask(
related_model._meta, field_mask, field_select_mask
)
# Remaining defer entries must be references to reverse relationships.
# The following code is expected to raise FieldError if it encounters
# a malformed defer entry.
for field_name, field_mask in mask.items():
if filtered_relation := self._filtered_relations.get(field_name):
relation = opts.get_field(filtered_relation.relation_name)
field_select_mask = select_mask.setdefault((field_name, relation), {})
field = relation.field
else:
field = opts.get_field(field_name).field
field_select_mask = select_mask.setdefault(field, {})
related_model = field.model._meta.concrete_model
self._get_defer_select_mask(
related_model._meta, field_mask, field_select_mask
)
return select_mask
def _get_only_select_mask(self, opts, mask, select_mask=None):
if select_mask is None:
select_mask = {}
select_mask[opts.pk] = {}
# Only include fields mentioned in the mask.
for field_name, field_mask in mask.items():
field = opts.get_field(field_name)
field_select_mask = select_mask.setdefault(field, {})
if field_mask:
if not field.is_relation:
raise FieldError(next(iter(field_mask)))
related_model = field.remote_field.model._meta.concrete_model
self._get_only_select_mask(
related_model._meta, field_mask, field_select_mask
)
return select_mask
def get_select_mask(self):
"""
Convert the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialized on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
"""
field_names, defer = self.deferred_loading
if not field_names:
return {}
mask = {}
for field_name in field_names:
part_mask = mask
for part in field_name.split(LOOKUP_SEP):
part_mask = part_mask.setdefault(part, {})
opts = self.get_meta()
if defer:
return self._get_defer_select_mask(opts, mask)
return self._get_only_select_mask(opts, mask)
def table_alias(self, table_name, create=False, filtered_relation=None):
"""
Return a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
alias_list = self.table_map.get(table_name)
if not create and alias_list:
alias = alias_list[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if alias_list:
alias = "%s%d" % (self.alias_prefix, len(self.alias_map) + 1)
alias_list.append(alias)
else:
# The first occurrence of a table uses the table name directly.
alias = (
filtered_relation.alias if filtered_relation is not None else table_name
)
self.table_map[table_name] = [alias]
self.alias_refcount[alias] = 1
return alias, True
def ref_alias(self, alias):
"""Increases the reference count for this alias."""
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
"""Decreases the reference count for this alias."""
self.alias_refcount[alias] -= amount
def promote_joins(self, aliases):
"""
Promote recursively the join type of given aliases and its children to
an outer join. If 'unconditional' is False, only promote the join if
it is nullable or the parent join is an outer join.
The children promotion is done to avoid join chains that contain a LOUTER
b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted,
then we must also promote b->c automatically, or otherwise the promotion
of a->b doesn't actually change anything in the query results.
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type is None:
# This is the base table (first FROM entry) - this table
# isn't really joined at all in the query, so we should not
# alter its join type.
continue
# Only the first alias (skipped above) should have None join_type
assert self.alias_map[alias].join_type is not None
parent_alias = self.alias_map[alias].parent_alias
parent_louter = (
parent_alias and self.alias_map[parent_alias].join_type == LOUTER
)
already_louter = self.alias_map[alias].join_type == LOUTER
if (self.alias_map[alias].nullable or parent_louter) and not already_louter:
self.alias_map[alias] = self.alias_map[alias].promote()
# Join type of 'alias' changed, so re-examine all aliases that
# refer to this one.
aliases.extend(
join
for join in self.alias_map
if self.alias_map[join].parent_alias == alias
and join not in aliases
)
def demote_joins(self, aliases):
"""
Change join type from LOUTER to INNER for all joins in aliases.
Similarly to promote_joins(), this method must ensure no join chains
containing first an outer, then an inner join are generated. If we
are demoting b->c join in chain a LOUTER b LOUTER c then we must
demote a->b automatically, or otherwise the demotion of b->c doesn't
actually change anything in the query results. .
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type == LOUTER:
self.alias_map[alias] = self.alias_map[alias].demote()
parent_alias = self.alias_map[alias].parent_alias
if self.alias_map[parent_alias].join_type == INNER:
aliases.append(parent_alias)
def reset_refcounts(self, to_counts):
"""
Reset reference counts for aliases so that they match the value passed
in `to_counts`.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
self.unref_alias(alias, unref_amount)
def change_aliases(self, change_map):
"""
Change the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
# If keys and values of change_map were to intersect, an alias might be
# updated twice (e.g. T4 -> T5, T5 -> T6, so also T4 -> T6) depending
# on their order in change_map.
assert set(change_map).isdisjoint(change_map.values())
# 1. Update references in "select" (normal columns plus aliases),
# "group by" and "where".
self.where.relabel_aliases(change_map)
if isinstance(self.group_by, tuple):
self.group_by = tuple(
[col.relabeled_clone(change_map) for col in self.group_by]
)
self.select = tuple([col.relabeled_clone(change_map) for col in self.select])
self.annotations = self.annotations and {
key: col.relabeled_clone(change_map)
for key, col in self.annotations.items()
}
# 2. Rename the alias in the internal table/alias datastructures.
for old_alias, new_alias in change_map.items():
if old_alias not in self.alias_map:
continue
alias_data = self.alias_map[old_alias].relabeled_clone(change_map)
self.alias_map[new_alias] = alias_data
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data.table_name]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
self.external_aliases = {
# Table is aliased or it's being changed and thus is aliased.
change_map.get(alias, alias): (aliased or alias in change_map)
for alias, aliased in self.external_aliases.items()
}
def bump_prefix(self, other_query, exclude=None):
"""
Change the alias prefix to the next letter in the alphabet in a way
that the other query's aliases and this query's aliases will not
conflict. Even tables that previously had no alias will get an alias
after this call. To prevent changing aliases use the exclude parameter.
"""
def prefix_gen():
"""
Generate a sequence of characters in alphabetical order:
-> 'A', 'B', 'C', ...
When the alphabet is finished, the sequence will continue with the
Cartesian product:
-> 'AA', 'AB', 'AC', ...
"""
alphabet = ascii_uppercase
prefix = chr(ord(self.alias_prefix) + 1)
yield prefix
for n in count(1):
seq = alphabet[alphabet.index(prefix) :] if prefix else alphabet
for s in product(seq, repeat=n):
yield "".join(s)
prefix = None
if self.alias_prefix != other_query.alias_prefix:
# No clashes between self and outer query should be possible.
return
# Explicitly avoid infinite loop. The constant divider is based on how
# much depth recursive subquery references add to the stack. This value
# might need to be adjusted when adding or removing function calls from
# the code path in charge of performing these operations.
local_recursion_limit = sys.getrecursionlimit() // 16
for pos, prefix in enumerate(prefix_gen()):
if prefix not in self.subq_aliases:
self.alias_prefix = prefix
break
if pos > local_recursion_limit:
raise RecursionError(
"Maximum recursion depth exceeded: too many subqueries."
)
self.subq_aliases = self.subq_aliases.union([self.alias_prefix])
other_query.subq_aliases = other_query.subq_aliases.union(self.subq_aliases)
if exclude is None:
exclude = {}
self.change_aliases(
{
alias: "%s%d" % (self.alias_prefix, pos)
for pos, alias in enumerate(self.alias_map)
if alias not in exclude
}
)
def get_initial_alias(self):
"""
Return the first alias for this query, after increasing its reference
count.
"""
if self.alias_map:
alias = self.base_table
self.ref_alias(alias)
elif self.model:
alias = self.join(self.base_table_class(self.get_meta().db_table, None))
else:
alias = None
return alias
def count_active_tables(self):
"""
Return the number of tables in this query with a non-zero reference
count. After execution, the reference counts are zeroed, so tables
added in compiler will not be seen by this method.
"""
return len([1 for count in self.alias_refcount.values() if count])
def join(self, join, reuse=None, reuse_with_filtered_relation=False):
"""
Return an alias for the 'join', either reusing an existing alias for
that join or creating a new one. 'join' is either a base_table_class or
join_class.
The 'reuse' parameter can be either None which means all joins are
reusable, or it can be a set containing the aliases that can be reused.
The 'reuse_with_filtered_relation' parameter is used when computing
FilteredRelation instances.
A join is always created as LOUTER if the lhs alias is LOUTER to make
sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new
joins are created as LOUTER if the join is nullable.
"""
if reuse_with_filtered_relation and reuse:
reuse_aliases = [
a for a, j in self.alias_map.items() if a in reuse and j.equals(join)
]
else:
reuse_aliases = [
a
for a, j in self.alias_map.items()
if (reuse is None or a in reuse) and j == join
]
if reuse_aliases:
if join.table_alias in reuse_aliases:
reuse_alias = join.table_alias
else:
# Reuse the most recent alias of the joined table
# (a many-to-many relation may be joined multiple times).
reuse_alias = reuse_aliases[-1]
self.ref_alias(reuse_alias)
return reuse_alias
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(
join.table_name, create=True, filtered_relation=join.filtered_relation
)
if join.join_type:
if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable:
join_type = LOUTER
else:
join_type = INNER
join.join_type = join_type
join.table_alias = alias
self.alias_map[alias] = join
return alias
def join_parent_model(self, opts, model, alias, seen):
"""
Make sure the given 'model' is joined in the query. If 'model' isn't
a parent of 'opts' or if it is None this method is a no-op.
The 'alias' is the root alias for starting the join, 'seen' is a dict
of model -> alias of existing joins. It must also contain a mapping
of None -> some alias. This will be returned in the no-op case.
"""
if model in seen:
return seen[model]
chain = opts.get_base_chain(model)
if not chain:
return alias
curr_opts = opts
for int_model in chain:
if int_model in seen:
curr_opts = int_model._meta
alias = seen[int_model]
continue
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not curr_opts.parents[int_model]:
curr_opts = int_model._meta
continue
link_field = curr_opts.get_ancestor_link(int_model)
join_info = self.setup_joins([link_field.name], curr_opts, alias)
curr_opts = int_model._meta
alias = seen[int_model] = join_info.joins[-1]
return alias or seen[None]
def check_alias(self, alias):
if FORBIDDEN_ALIAS_PATTERN.search(alias):
raise ValueError(
"Column aliases cannot contain whitespace characters, quotation marks, "
"semicolons, or SQL comments."
)
def add_annotation(self, annotation, alias, is_summary=False, select=True):
"""Add a single annotation expression to the Query."""
self.check_alias(alias)
annotation = annotation.resolve_expression(
self, allow_joins=True, reuse=None, summarize=is_summary
)
if select:
self.append_annotation_mask([alias])
else:
self.set_annotation_mask(set(self.annotation_select).difference({alias}))
self.annotations[alias] = annotation
def resolve_expression(self, query, *args, **kwargs):
clone = self.clone()
# Subqueries need to use a different set of aliases than the outer query.
clone.bump_prefix(query)
clone.subquery = True
clone.where.resolve_expression(query, *args, **kwargs)
# Resolve combined queries.
if clone.combinator:
clone.combined_queries = tuple(
[
combined_query.resolve_expression(query, *args, **kwargs)
for combined_query in clone.combined_queries
]
)
for key, value in clone.annotations.items():
resolved = value.resolve_expression(query, *args, **kwargs)
if hasattr(resolved, "external_aliases"):
resolved.external_aliases.update(clone.external_aliases)
clone.annotations[key] = resolved
# Outer query's aliases are considered external.
for alias, table in query.alias_map.items():
clone.external_aliases[alias] = (
isinstance(table, Join)
and table.join_field.related_model._meta.db_table != alias
) or (
isinstance(table, BaseTable) and table.table_name != table.table_alias
)
return clone
def get_external_cols(self):
exprs = chain(self.annotations.values(), self.where.children)
return [
col
for col in self._gen_cols(exprs, include_external=True)
if col.alias in self.external_aliases
]
def get_group_by_cols(self, wrapper=None):
# If wrapper is referenced by an alias for an explicit GROUP BY through
# values() a reference to this expression and not the self must be
# returned to ensure external column references are not grouped against
# as well.
external_cols = self.get_external_cols()
if any(col.possibly_multivalued for col in external_cols):
return [wrapper or self]
return external_cols
def as_sql(self, compiler, connection):
# Some backends (e.g. Oracle) raise an error when a subquery contains
# unnecessary ORDER BY clause.
if (
self.subquery
and not connection.features.ignores_unnecessary_order_by_in_subqueries
):
self.clear_ordering(force=False)
for query in self.combined_queries:
query.clear_ordering(force=False)
sql, params = self.get_compiler(connection=connection).as_sql()
if self.subquery:
sql = "(%s)" % sql
return sql, params
def resolve_lookup_value(self, value, can_reuse, allow_joins):
if hasattr(value, "resolve_expression"):
value = value.resolve_expression(
self,
reuse=can_reuse,
allow_joins=allow_joins,
)
elif isinstance(value, (list, tuple)):
# The items of the iterable may be expressions and therefore need
# to be resolved independently.
values = (
self.resolve_lookup_value(sub_value, can_reuse, allow_joins)
for sub_value in value
)
type_ = type(value)
if hasattr(type_, "_make"): # namedtuple
return type_(*values)
return type_(values)
return value
def solve_lookup_type(self, lookup, summarize=False):
"""
Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains').
"""
lookup_splitted = lookup.split(LOOKUP_SEP)
if self.annotations:
annotation, expression_lookups = refs_expression(
lookup_splitted, self.annotations
)
if annotation:
expression = self.annotations[annotation]
if summarize:
expression = Ref(annotation, expression)
return expression_lookups, (), expression
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
field_parts = lookup_splitted[0 : len(lookup_splitted) - len(lookup_parts)]
if len(lookup_parts) > 1 and not field_parts:
raise FieldError(
'Invalid lookup "%s" for model %s".'
% (lookup, self.get_meta().model.__name__)
)
return lookup_parts, field_parts, False
def check_query_object_type(self, value, opts, field):
"""
Check whether the object passed while querying is of the correct type.
If not, raise a ValueError specifying the wrong object.
"""
if hasattr(value, "_meta"):
if not check_rel_lookup_compatibility(value._meta.model, opts, field):
raise ValueError(
'Cannot query "%s": Must be "%s" instance.'
% (value, opts.object_name)
)
def check_related_objects(self, field, value, opts):
"""Check the type of object passed to query relations."""
if field.is_relation:
# Check that the field and the queryset use the same model in a
# query like .filter(author=Author.objects.all()). For example, the
# opts would be Author's (from the author field) and value.model
# would be Author.objects.all() queryset's .model (Author also).
# The field is the related field on the lhs side.
if (
isinstance(value, Query)
and not value.has_select_fields
and not check_rel_lookup_compatibility(value.model, opts, field)
):
raise ValueError(
'Cannot use QuerySet for "%s": Use a QuerySet for "%s".'
% (value.model._meta.object_name, opts.object_name)
)
elif hasattr(value, "_meta"):
self.check_query_object_type(value, opts, field)
elif hasattr(value, "__iter__"):
for v in value:
self.check_query_object_type(v, opts, field)
def check_filterable(self, expression):
"""Raise an error if expression cannot be used in a WHERE clause."""
if hasattr(expression, "resolve_expression") and not getattr(
expression, "filterable", True
):
raise NotSupportedError(
expression.__class__.__name__ + " is disallowed in the filter "
"clause."
)
if hasattr(expression, "get_source_expressions"):
for expr in expression.get_source_expressions():
self.check_filterable(expr)
def build_lookup(self, lookups, lhs, rhs):
"""
Try to extract transforms and lookup from given lhs.
The lhs value is something that works like SQLExpression.
The rhs value is what the lookup is going to compare against.
The lookups is a list of names to extract using get_lookup()
and get_transform().
"""
# __exact is the default lookup if one isn't given.
*transforms, lookup_name = lookups or ["exact"]
for name in transforms:
lhs = self.try_transform(lhs, name)
# First try get_lookup() so that the lookup takes precedence if the lhs
# supports both transform and lookup for the name.
lookup_class = lhs.get_lookup(lookup_name)
if not lookup_class:
# A lookup wasn't found. Try to interpret the name as a transform
# and do an Exact lookup against it.
lhs = self.try_transform(lhs, lookup_name)
lookup_name = "exact"
lookup_class = lhs.get_lookup(lookup_name)
if not lookup_class:
return
lookup = lookup_class(lhs, rhs)
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value unless the lookup supports it.
if lookup.rhs is None and not lookup.can_use_none_as_rhs:
if lookup_name not in ("exact", "iexact"):
raise ValueError("Cannot use None as a query value")
return lhs.get_lookup("isnull")(lhs, True)
# For Oracle '' is equivalent to null. The check must be done at this
# stage because join promotion can't be done in the compiler. Using
# DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here.
# A similar thing is done in is_nullable(), too.
if (
lookup_name == "exact"
and lookup.rhs == ""
and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls
):
return lhs.get_lookup("isnull")(lhs, True)
return lookup
def try_transform(self, lhs, name):
"""
Helper method for build_lookup(). Try to fetch and initialize
a transform for name parameter from lhs.
"""
transform_class = lhs.get_transform(name)
if transform_class:
return transform_class(lhs)
else:
output_field = lhs.output_field.__class__
suggested_lookups = difflib.get_close_matches(
name, output_field.get_lookups()
)
if suggested_lookups:
suggestion = ", perhaps you meant %s?" % " or ".join(suggested_lookups)
else:
suggestion = "."
raise FieldError(
"Unsupported lookup '%s' for %s or join on the field not "
"permitted%s" % (name, output_field.__name__, suggestion)
)
def build_filter(
self,
filter_expr,
branch_negated=False,
current_negated=False,
can_reuse=None,
allow_joins=True,
split_subq=True,
reuse_with_filtered_relation=False,
check_filterable=True,
summarize=False,
):
"""
Build a WhereNode for a single filter clause but don't add it
to this Query. Query.add_q() will then add this filter to the where
Node.
The 'branch_negated' tells us if the current branch contains any
negations. This will be used to determine if subqueries are needed.
The 'current_negated' is used to determine if the current filter is
negated or not and this will be used to determine if IS NULL filtering
is needed.
The difference between current_negated and branch_negated is that
branch_negated is set on first negation, but current_negated is
flipped for each negation.
Note that add_filter will not do any negating itself, that is done
upper in the code by add_q().
The 'can_reuse' is a set of reusable joins for multijoins.
If 'reuse_with_filtered_relation' is True, then only joins in can_reuse
will be reused.
The method will create a filter clause that can be added to the current
query. However, if the filter isn't added to the query then the caller
is responsible for unreffing the joins used.
"""
if isinstance(filter_expr, dict):
raise FieldError("Cannot parse keyword query as dict")
if isinstance(filter_expr, Q):
return self._add_q(
filter_expr,
branch_negated=branch_negated,
current_negated=current_negated,
used_aliases=can_reuse,
allow_joins=allow_joins,
split_subq=split_subq,
check_filterable=check_filterable,
summarize=summarize,
)
if hasattr(filter_expr, "resolve_expression"):
if not getattr(filter_expr, "conditional", False):
raise TypeError("Cannot filter against a non-conditional expression.")
condition = filter_expr.resolve_expression(
self, allow_joins=allow_joins, summarize=summarize
)
if not isinstance(condition, Lookup):
condition = self.build_lookup(["exact"], condition, True)
return WhereNode([condition], connector=AND), []
arg, value = filter_expr
if not arg:
raise FieldError("Cannot parse keyword query %r" % arg)
lookups, parts, reffed_expression = self.solve_lookup_type(arg, summarize)
if check_filterable:
self.check_filterable(reffed_expression)
if not allow_joins and len(parts) > 1:
raise FieldError("Joined field references are not permitted in this query")
pre_joins = self.alias_refcount.copy()
value = self.resolve_lookup_value(value, can_reuse, allow_joins)
used_joins = {
k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)
}
if check_filterable:
self.check_filterable(value)
if reffed_expression:
condition = self.build_lookup(lookups, reffed_expression, value)
return WhereNode([condition], connector=AND), []
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = not branch_negated or not split_subq
try:
join_info = self.setup_joins(
parts,
opts,
alias,
can_reuse=can_reuse,
allow_many=allow_many,
reuse_with_filtered_relation=reuse_with_filtered_relation,
)
# Prevent iterator from being consumed by check_related_objects()
if isinstance(value, Iterator):
value = list(value)
self.check_related_objects(join_info.final_field, value, join_info.opts)
# split_exclude() needs to know which joins were generated for the
# lookup parts
self._lookup_joins = join_info.joins
except MultiJoin as e:
return self.split_exclude(filter_expr, can_reuse, e.names_with_path)
# Update used_joins before trimming since they are reused to determine
# which joins could be later promoted to INNER.
used_joins.update(join_info.joins)
targets, alias, join_list = self.trim_joins(
join_info.targets, join_info.joins, join_info.path
)
if can_reuse is not None:
can_reuse.update(join_list)
if join_info.final_field.is_relation:
if len(targets) == 1:
col = self._get_col(targets[0], join_info.final_field, alias)
else:
col = MultiColSource(
alias, targets, join_info.targets, join_info.final_field
)
else:
col = self._get_col(targets[0], join_info.final_field, alias)
condition = self.build_lookup(lookups, col, value)
lookup_type = condition.lookup_name
clause = WhereNode([condition], connector=AND)
require_outer = (
lookup_type == "isnull" and condition.rhs is True and not current_negated
)
if (
current_negated
and (lookup_type != "isnull" or condition.rhs is False)
and condition.rhs is not None
):
require_outer = True
if lookup_type != "isnull":
# The condition added here will be SQL like this:
# NOT (col IS NOT NULL), where the first NOT is added in
# upper layers of code. The reason for addition is that if col
# is null, then col != someval will result in SQL "unknown"
# which isn't the same as in Python. The Python None handling
# is wanted, and it can be gotten by
# (col IS NULL OR col != someval)
# <=>
# NOT (col IS NOT NULL AND col = someval).
if (
self.is_nullable(targets[0])
or self.alias_map[join_list[-1]].join_type == LOUTER
):
lookup_class = targets[0].get_lookup("isnull")
col = self._get_col(targets[0], join_info.targets[0], alias)
clause.add(lookup_class(col, False), AND)
# If someval is a nullable column, someval IS NOT NULL is
# added.
if isinstance(value, Col) and self.is_nullable(value.target):
lookup_class = value.target.get_lookup("isnull")
clause.add(lookup_class(value, False), AND)
return clause, used_joins if not require_outer else ()
def add_filter(self, filter_lhs, filter_rhs):
self.add_q(Q((filter_lhs, filter_rhs)))
def add_q(self, q_object):
"""
A preprocessor for the internal _add_q(). Responsible for doing final
join promotion.
"""
# For join promotion this case is doing an AND for the added q_object
# and existing conditions. So, any existing inner join forces the join
# type to remain inner. Existing outer joins can however be demoted.
# (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if
# rel_a doesn't produce any rows, then the whole condition must fail.
# So, demotion is OK.
existing_inner = {
a for a in self.alias_map if self.alias_map[a].join_type == INNER
}
clause, _ = self._add_q(q_object, self.used_aliases)
if clause:
self.where.add(clause, AND)
self.demote_joins(existing_inner)
def build_where(self, filter_expr):
return self.build_filter(filter_expr, allow_joins=False)[0]
def clear_where(self):
self.where = WhereNode()
def _add_q(
self,
q_object,
used_aliases,
branch_negated=False,
current_negated=False,
allow_joins=True,
split_subq=True,
check_filterable=True,
summarize=False,
):
"""Add a Q-object to the current filter."""
connector = q_object.connector
current_negated ^= q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = WhereNode(connector=connector, negated=q_object.negated)
joinpromoter = JoinPromoter(
q_object.connector, len(q_object.children), current_negated
)
for child in q_object.children:
child_clause, needed_inner = self.build_filter(
child,
can_reuse=used_aliases,
branch_negated=branch_negated,
current_negated=current_negated,
allow_joins=allow_joins,
split_subq=split_subq,
check_filterable=check_filterable,
summarize=summarize,
)
joinpromoter.add_votes(needed_inner)
if child_clause:
target_clause.add(child_clause, connector)
needed_inner = joinpromoter.update_join_types(self)
return target_clause, needed_inner
def build_filtered_relation_q(
self, q_object, reuse, branch_negated=False, current_negated=False
):
"""Add a FilteredRelation object to the current filter."""
connector = q_object.connector
current_negated ^= q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = WhereNode(connector=connector, negated=q_object.negated)
for child in q_object.children:
if isinstance(child, Node):
child_clause = self.build_filtered_relation_q(
child,
reuse=reuse,
branch_negated=branch_negated,
current_negated=current_negated,
)
else:
child_clause, _ = self.build_filter(
child,
can_reuse=reuse,
branch_negated=branch_negated,
current_negated=current_negated,
allow_joins=True,
split_subq=False,
reuse_with_filtered_relation=True,
)
target_clause.add(child_clause, connector)
return target_clause
def add_filtered_relation(self, filtered_relation, alias):
filtered_relation.alias = alias
lookups = dict(get_children_from_q(filtered_relation.condition))
relation_lookup_parts, relation_field_parts, _ = self.solve_lookup_type(
filtered_relation.relation_name
)
if relation_lookup_parts:
raise ValueError(
"FilteredRelation's relation_name cannot contain lookups "
"(got %r)." % filtered_relation.relation_name
)
for lookup in chain(lookups):
lookup_parts, lookup_field_parts, _ = self.solve_lookup_type(lookup)
shift = 2 if not lookup_parts else 1
lookup_field_path = lookup_field_parts[:-shift]
for idx, lookup_field_part in enumerate(lookup_field_path):
if len(relation_field_parts) > idx:
if relation_field_parts[idx] != lookup_field_part:
raise ValueError(
"FilteredRelation's condition doesn't support "
"relations outside the %r (got %r)."
% (filtered_relation.relation_name, lookup)
)
else:
raise ValueError(
"FilteredRelation's condition doesn't support nested "
"relations deeper than the relation_name (got %r for "
"%r)." % (lookup, filtered_relation.relation_name)
)
self._filtered_relations[filtered_relation.alias] = filtered_relation
def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):
"""
Walk the list of names and turns them into PathInfo tuples. A single
name in 'names' can generate multiple PathInfos (m2m, for example).
'names' is the path of names to travel, 'opts' is the model Options we
start the name resolving from, 'allow_many' is as for setup_joins().
If fail_on_missing is set to True, then a name that can't be resolved
will generate a FieldError.
Return a list of PathInfo tuples. In addition return the final field
(the last used join field) and target (which is a field guaranteed to
contain the same value as the final field). Finally, return those names
that weren't found (which are likely transforms and the final lookup).
"""
path, names_with_path = [], []
for pos, name in enumerate(names):
cur_names_with_path = (name, [])
if name == "pk":
name = opts.pk.name
field = None
filtered_relation = None
try:
if opts is None:
raise FieldDoesNotExist
field = opts.get_field(name)
except FieldDoesNotExist:
if name in self.annotation_select:
field = self.annotation_select[name].output_field
elif name in self._filtered_relations and pos == 0:
filtered_relation = self._filtered_relations[name]
if LOOKUP_SEP in filtered_relation.relation_name:
parts = filtered_relation.relation_name.split(LOOKUP_SEP)
filtered_relation_path, field, _, _ = self.names_to_path(
parts,
opts,
allow_many,
fail_on_missing,
)
path.extend(filtered_relation_path[:-1])
else:
field = opts.get_field(filtered_relation.relation_name)
if field is not None:
# Fields that contain one-to-many relations with a generic
# model (like a GenericForeignKey) cannot generate reverse
# relations and therefore cannot be used for reverse querying.
if field.is_relation and not field.related_model:
raise FieldError(
"Field %r does not generate an automatic reverse "
"relation and therefore cannot be used for reverse "
"querying. If it is a GenericForeignKey, consider "
"adding a GenericRelation." % name
)
try:
model = field.model._meta.concrete_model
except AttributeError:
# QuerySet.annotate() may introduce fields that aren't
# attached to a model.
model = None
else:
# We didn't find the current field, so move position back
# one step.
pos -= 1
if pos == -1 or fail_on_missing:
available = sorted(
[
*get_field_names_from_opts(opts),
*self.annotation_select,
*self._filtered_relations,
]
)
raise FieldError(
"Cannot resolve keyword '%s' into field. "
"Choices are: %s" % (name, ", ".join(available))
)
break
# Check if we need any joins for concrete inheritance cases (the
# field lives in parent, but we are currently in one of its
# children)
if opts is not None and model is not opts.model:
path_to_parent = opts.get_path_to_parent(model)
if path_to_parent:
path.extend(path_to_parent)
cur_names_with_path[1].extend(path_to_parent)
opts = path_to_parent[-1].to_opts
if hasattr(field, "path_infos"):
if filtered_relation:
pathinfos = field.get_path_info(filtered_relation)
else:
pathinfos = field.path_infos
if not allow_many:
for inner_pos, p in enumerate(pathinfos):
if p.m2m:
cur_names_with_path[1].extend(pathinfos[0 : inner_pos + 1])
names_with_path.append(cur_names_with_path)
raise MultiJoin(pos + 1, names_with_path)
last = pathinfos[-1]
path.extend(pathinfos)
final_field = last.join_field
opts = last.to_opts
targets = last.target_fields
cur_names_with_path[1].extend(pathinfos)
names_with_path.append(cur_names_with_path)
else:
# Local non-relational field.
final_field = field
targets = (field,)
if fail_on_missing and pos + 1 != len(names):
raise FieldError(
"Cannot resolve keyword %r into field. Join on '%s'"
" not permitted." % (names[pos + 1], name)
)
break
return path, final_field, targets, names[pos + 1 :]
def setup_joins(
self,
names,
opts,
alias,
can_reuse=None,
allow_many=True,
reuse_with_filtered_relation=False,
):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are starting from), 'alias' is the alias for
the table to start the joining from.
The 'can_reuse' defines the reverse foreign key joins we can reuse. It
can be None in which case all joins are reusable or a set of aliases
that can be reused. Note that non-reverse foreign keys are always
reusable when using setup_joins().
The 'reuse_with_filtered_relation' can be used to force 'can_reuse'
parameter and force the relation on the given connections.
If 'allow_many' is False, then any reverse foreign key seen will
generate a MultiJoin exception.
Return the final field involved in the joins, the target field (used
for any 'where' constraint), the final 'opts' value, the joins, the
field path traveled to generate the joins, and a transform function
that takes a field and alias and is equivalent to `field.get_col(alias)`
in the simple case but wraps field transforms if they were included in
names.
The target field is the field containing the concrete value. Final
field can be something different, for example foreign key pointing to
that value. Final field is needed for example in some value
conversions (convert 'obj' in fk__id=obj to pk val using the foreign
key field for example).
"""
joins = [alias]
# The transform can't be applied yet, as joins must be trimmed later.
# To avoid making every caller of this method look up transforms
# directly, compute transforms here and create a partial that converts
# fields to the appropriate wrapped version.
def final_transformer(field, alias):
if not self.alias_cols:
alias = None
return field.get_col(alias)
# Try resolving all the names as fields first. If there's an error,
# treat trailing names as lookups until a field can be resolved.
last_field_exception = None
for pivot in range(len(names), 0, -1):
try:
path, final_field, targets, rest = self.names_to_path(
names[:pivot],
opts,
allow_many,
fail_on_missing=True,
)
except FieldError as exc:
if pivot == 1:
# The first item cannot be a lookup, so it's safe
# to raise the field error here.
raise
else:
last_field_exception = exc
else:
# The transforms are the remaining items that couldn't be
# resolved into fields.
transforms = names[pivot:]
break
for name in transforms:
def transform(field, alias, *, name, previous):
try:
wrapped = previous(field, alias)
return self.try_transform(wrapped, name)
except FieldError:
# FieldError is raised if the transform doesn't exist.
if isinstance(final_field, Field) and last_field_exception:
raise last_field_exception
else:
raise
final_transformer = functools.partial(
transform, name=name, previous=final_transformer
)
final_transformer.has_transforms = True
# Then, add the path to the query's joins. Note that we can't trim
# joins at this stage - we will need the information about join type
# of the trimmed joins.
for join in path:
if join.filtered_relation:
filtered_relation = join.filtered_relation.clone()
table_alias = filtered_relation.alias
else:
filtered_relation = None
table_alias = None
opts = join.to_opts
if join.direct:
nullable = self.is_nullable(join.join_field)
else:
nullable = True
connection = self.join_class(
opts.db_table,
alias,
table_alias,
INNER,
join.join_field,
nullable,
filtered_relation=filtered_relation,
)
reuse = can_reuse if join.m2m or reuse_with_filtered_relation else None
alias = self.join(
connection,
reuse=reuse,
reuse_with_filtered_relation=reuse_with_filtered_relation,
)
joins.append(alias)
if filtered_relation:
filtered_relation.path = joins[:]
return JoinInfo(final_field, targets, opts, joins, path, final_transformer)
def trim_joins(self, targets, joins, path):
"""
The 'target' parameter is the final field being joined to, 'joins'
is the full list of join aliases. The 'path' contain the PathInfos
used to create the joins.
Return the final target field and table alias and the new active
joins.
Always trim any direct join if the target column is already in the
previous table. Can't trim reverse joins as it's unknown if there's
anything on the other side of the join.
"""
joins = joins[:]
for pos, info in enumerate(reversed(path)):
if len(joins) == 1 or not info.direct:
break
if info.filtered_relation:
break
join_targets = {t.column for t in info.join_field.foreign_related_fields}
cur_targets = {t.column for t in targets}
if not cur_targets.issubset(join_targets):
break
targets_dict = {
r[1].column: r[0]
for r in info.join_field.related_fields
if r[1].column in cur_targets
}
targets = tuple(targets_dict[t.column] for t in targets)
self.unref_alias(joins.pop())
return targets, joins[-1], joins
@classmethod
def _gen_cols(cls, exprs, include_external=False, resolve_refs=True):
for expr in exprs:
if isinstance(expr, Col):
yield expr
elif include_external and callable(
getattr(expr, "get_external_cols", None)
):
yield from expr.get_external_cols()
elif hasattr(expr, "get_source_expressions"):
if not resolve_refs and isinstance(expr, Ref):
continue
yield from cls._gen_cols(
expr.get_source_expressions(),
include_external=include_external,
resolve_refs=resolve_refs,
)
@classmethod
def _gen_col_aliases(cls, exprs):
yield from (expr.alias for expr in cls._gen_cols(exprs))
def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False):
annotation = self.annotations.get(name)
if annotation is not None:
if not allow_joins:
for alias in self._gen_col_aliases([annotation]):
if isinstance(self.alias_map[alias], Join):
raise FieldError(
"Joined field references are not permitted in this query"
)
if summarize:
# Summarize currently means we are doing an aggregate() query
# which is executed as a wrapped subquery if any of the
# aggregate() elements reference an existing annotation. In
# that case we need to return a Ref to the subquery's annotation.
if name not in self.annotation_select:
raise FieldError(
"Cannot aggregate over the '%s' alias. Use annotate() "
"to promote it." % name
)
return Ref(name, self.annotation_select[name])
else:
return annotation
else:
field_list = name.split(LOOKUP_SEP)
annotation = self.annotations.get(field_list[0])
if annotation is not None:
for transform in field_list[1:]:
annotation = self.try_transform(annotation, transform)
return annotation
join_info = self.setup_joins(
field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse
)
targets, final_alias, join_list = self.trim_joins(
join_info.targets, join_info.joins, join_info.path
)
if not allow_joins and len(join_list) > 1:
raise FieldError(
"Joined field references are not permitted in this query"
)
if len(targets) > 1:
raise FieldError(
"Referencing multicolumn fields with F() objects isn't supported"
)
# Verify that the last lookup in name is a field or a transform:
# transform_function() raises FieldError if not.
transform = join_info.transform_function(targets[0], final_alias)
if reuse is not None:
reuse.update(join_list)
return transform
def split_exclude(self, filter_expr, can_reuse, names_with_path):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
For example, if the origin filter is ~Q(child__name='foo'), filter_expr
is ('child__name', 'foo') and can_reuse is a set of joins usable for
filters in the original query.
We will turn this into equivalent of:
WHERE NOT EXISTS(
SELECT 1
FROM child
WHERE name = 'foo' AND child.parent_id = parent.id
LIMIT 1
)
"""
# Generate the inner query.
query = self.__class__(self.model)
query._filtered_relations = self._filtered_relations
filter_lhs, filter_rhs = filter_expr
if isinstance(filter_rhs, OuterRef):
filter_rhs = OuterRef(filter_rhs)
elif isinstance(filter_rhs, F):
filter_rhs = OuterRef(filter_rhs.name)
query.add_filter(filter_lhs, filter_rhs)
query.clear_ordering(force=True)
# Try to have as simple as possible subquery -> trim leading joins from
# the subquery.
trimmed_prefix, contains_louter = query.trim_start(names_with_path)
col = query.select[0]
select_field = col.target
alias = col.alias
if alias in can_reuse:
pk = select_field.model._meta.pk
# Need to add a restriction so that outer query's filters are in effect for
# the subquery, too.
query.bump_prefix(self)
lookup_class = select_field.get_lookup("exact")
# Note that the query.select[0].alias is different from alias
# due to bump_prefix above.
lookup = lookup_class(pk.get_col(query.select[0].alias), pk.get_col(alias))
query.where.add(lookup, AND)
query.external_aliases[alias] = True
lookup_class = select_field.get_lookup("exact")
lookup = lookup_class(col, ResolvedOuterRef(trimmed_prefix))
query.where.add(lookup, AND)
condition, needed_inner = self.build_filter(Exists(query))
if contains_louter:
or_null_condition, _ = self.build_filter(
("%s__isnull" % trimmed_prefix, True),
current_negated=True,
branch_negated=True,
can_reuse=can_reuse,
)
condition.add(or_null_condition, OR)
# Note that the end result will be:
# (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
# This might look crazy but due to how IN works, this seems to be
# correct. If the IS NOT NULL check is removed then outercol NOT
# IN will return UNKNOWN. If the IS NULL check is removed, then if
# outercol IS NULL we will not match the row.
return condition, needed_inner
def set_empty(self):
self.where.add(NothingNode(), AND)
for query in self.combined_queries:
query.set_empty()
def is_empty(self):
return any(isinstance(c, NothingNode) for c in self.where.children)
def set_limits(self, low=None, high=None):
"""
Adjust the limits on the rows retrieved. Use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, convert them to the appropriate offset and limit values.
Apply any limits passed in here to the existing constraints. Add low
to the current low value and clamp both to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
if self.low_mark == self.high_mark:
self.set_empty()
def clear_limits(self):
"""Clear any existing limits."""
self.low_mark, self.high_mark = 0, None
@property
def is_sliced(self):
return self.low_mark != 0 or self.high_mark is not None
def has_limit_one(self):
return self.high_mark is not None and (self.high_mark - self.low_mark) == 1
def can_filter(self):
"""
Return True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.is_sliced
def clear_select_clause(self):
"""Remove all fields from SELECT clause."""
self.select = ()
self.default_cols = False
self.select_related = False
self.set_extra_mask(())
self.set_annotation_mask(())
def clear_select_fields(self):
"""
Clear the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = ()
self.values_select = ()
def add_select_col(self, col, name):
self.select += (col,)
self.values_select += (name,)
def set_select(self, cols):
self.default_cols = False
self.select = tuple(cols)
def add_distinct_fields(self, *field_names):
"""
Add and resolve the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Add the given (model) fields to the select set. Add the field names in
the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
cols = []
for name in field_names:
# Join promotion note - we must not remove any rows here, so
# if there is no existing joins, use outer join.
join_info = self.setup_joins(
name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m
)
targets, final_alias, joins = self.trim_joins(
join_info.targets,
join_info.joins,
join_info.path,
)
for target in targets:
cols.append(join_info.transform_function(target, final_alias))
if cols:
self.set_select(cols)
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
if LOOKUP_SEP in name:
# For lookups spanning over relationships, show the error
# from the model on which the lookup failed.
raise
elif name in self.annotations:
raise FieldError(
"Cannot select the '%s' alias. Use annotate() to promote "
"it." % name
)
else:
names = sorted(
[
*get_field_names_from_opts(opts),
*self.extra,
*self.annotation_select,
*self._filtered_relations,
]
)
raise FieldError(
"Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names))
)
def add_ordering(self, *ordering):
"""
Add items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or OrderBy
expressions.
If 'ordering' is empty, clear all ordering from the query.
"""
errors = []
for item in ordering:
if isinstance(item, str):
if item == "?":
continue
if item.startswith("-"):
item = item[1:]
if item in self.annotations:
continue
if self.extra and item in self.extra:
continue
# names_to_path() validates the lookup. A descriptive
# FieldError will be raise if it's not.
self.names_to_path(item.split(LOOKUP_SEP), self.model._meta)
elif not hasattr(item, "resolve_expression"):
errors.append(item)
if getattr(item, "contains_aggregate", False):
raise FieldError(
"Using an aggregate in order_by() without also including "
"it in annotate() is not allowed: %s" % item
)
if errors:
raise FieldError("Invalid order_by arguments: %s" % errors)
if ordering:
self.order_by += ordering
else:
self.default_ordering = False
def clear_ordering(self, force=False, clear_default=True):
"""
Remove any ordering settings if the current query allows it without
side effects, set 'force' to True to clear the ordering regardless.
If 'clear_default' is True, there will be no ordering in the resulting
query (not even the model's default).
"""
if not force and (
self.is_sliced or self.distinct_fields or self.select_for_update
):
return
self.order_by = ()
self.extra_order_by = ()
if clear_default:
self.default_ordering = False
def set_group_by(self, allow_aliases=True):
"""
Expand the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
if allow_aliases:
# Column names from JOINs to check collisions with aliases.
column_names = set()
seen_models = set()
for join in list(self.alias_map.values())[1:]: # Skip base table.
model = join.join_field.related_model
if model not in seen_models:
column_names.update(
{field.column for field in model._meta.local_concrete_fields}
)
seen_models.add(model)
if self.values_select:
# If grouping by aliases is allowed assign selected values
# aliases by moving them to annotations.
group_by_annotations = {}
values_select = {}
for alias, expr in zip(self.values_select, self.select):
if isinstance(expr, Col):
values_select[alias] = expr
else:
group_by_annotations[alias] = expr
self.annotations = {**group_by_annotations, **self.annotations}
self.append_annotation_mask(group_by_annotations)
self.select = tuple(values_select.values())
self.values_select = tuple(values_select)
group_by = list(self.select)
for alias, annotation in self.annotation_select.items():
if not (group_by_cols := annotation.get_group_by_cols()):
continue
if (
allow_aliases
and alias not in column_names
and not annotation.contains_aggregate
):
group_by.append(Ref(alias, annotation))
else:
group_by.extend(group_by_cols)
self.group_by = tuple(group_by)
def add_select_related(self, fields):
"""
Set up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
if isinstance(self.select_related, bool):
field_dict = {}
else:
field_dict = self.select_related
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Add data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = {}
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
self.check_alias(name)
entry = str(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
if pos == 0 or entry[pos - 1] != "%":
entry_params.append(next(param_iter))
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""Remove any fields from the deferred loading set."""
self.deferred_loading = (frozenset(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. Add the new field names to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL column names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
if new_existing := existing.difference(field_names):
self.deferred_loading = new_existing, False
else:
self.clear_deferred_loading()
if new_only := set(field_names).difference(existing):
self.deferred_loading = new_only, True
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, remove
those names from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
field_names = set(field_names)
if "pk" in field_names:
field_names.remove("pk")
field_names.add(self.get_meta().pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = frozenset(field_names), False
def set_annotation_mask(self, names):
"""Set the mask of annotations that will be returned by the SELECT."""
if names is None:
self.annotation_select_mask = None
else:
self.annotation_select_mask = set(names)
self._annotation_select_cache = None
def append_annotation_mask(self, names):
if self.annotation_select_mask is not None:
self.set_annotation_mask(self.annotation_select_mask.union(names))
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT.
Don't remove them from the Query since they might be used later.
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
def set_values(self, fields):
self.select_related = False
self.clear_deferred_loading()
self.clear_select_fields()
self.has_select_fields = True
if fields:
field_names = []
extra_names = []
annotation_names = []
if not self.extra and not self.annotations:
# Shortcut - if there are no extra or annotations, then
# the values() clause must be just field names.
field_names = list(fields)
else:
self.default_cols = False
for f in fields:
if f in self.extra_select:
extra_names.append(f)
elif f in self.annotation_select:
annotation_names.append(f)
else:
field_names.append(f)
self.set_extra_mask(extra_names)
self.set_annotation_mask(annotation_names)
selected = frozenset(field_names + extra_names + annotation_names)
else:
field_names = [f.attname for f in self.model._meta.concrete_fields]
selected = frozenset(field_names)
# Selected annotations must be known before setting the GROUP BY
# clause.
if self.group_by is True:
self.add_fields(
(f.attname for f in self.model._meta.concrete_fields), False
)
# Disable GROUP BY aliases to avoid orphaning references to the
# SELECT clause which is about to be cleared.
self.set_group_by(allow_aliases=False)
self.clear_select_fields()
elif self.group_by:
# Resolve GROUP BY annotation references if they are not part of
# the selected fields anymore.
group_by = []
for expr in self.group_by:
if isinstance(expr, Ref) and expr.refs not in selected:
expr = self.annotations[expr.refs]
group_by.append(expr)
self.group_by = tuple(group_by)
self.values_select = tuple(field_names)
self.add_fields(field_names, True)
@property
def annotation_select(self):
"""
Return the dictionary of aggregate columns that are not masked and
should be used in the SELECT clause. Cache this result for performance.
"""
if self._annotation_select_cache is not None:
return self._annotation_select_cache
elif not self.annotations:
return {}
elif self.annotation_select_mask is not None:
self._annotation_select_cache = {
k: v
for k, v in self.annotations.items()
if k in self.annotation_select_mask
}
return self._annotation_select_cache
else:
return self.annotations
@property
def extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
if not self.extra:
return {}
elif self.extra_select_mask is not None:
self._extra_select_cache = {
k: v for k, v in self.extra.items() if k in self.extra_select_mask
}
return self._extra_select_cache
else:
return self.extra
def trim_start(self, names_with_path):
"""
Trim joins from the start of the join path. The candidates for trim
are the PathInfos in names_with_path structure that are m2m joins.
Also set the select column so the start matches the join.
This method is meant to be used for generating the subquery joins &
cols in split_exclude().
Return a lookup usable for doing outerq.filter(lookup=self) and a
boolean indicating if the joins in the prefix contain a LEFT OUTER join.
_"""
all_paths = []
for _, paths in names_with_path:
all_paths.extend(paths)
contains_louter = False
# Trim and operate only on tables that were generated for
# the lookup part of the query. That is, avoid trimming
# joins generated for F() expressions.
lookup_tables = [
t for t in self.alias_map if t in self._lookup_joins or t == self.base_table
]
for trimmed_paths, path in enumerate(all_paths):
if path.m2m:
break
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER:
contains_louter = True
alias = lookup_tables[trimmed_paths]
self.unref_alias(alias)
# The path.join_field is a Rel, lets get the other side's field
join_field = path.join_field.field
# Build the filter prefix.
paths_in_prefix = trimmed_paths
trimmed_prefix = []
for name, path in names_with_path:
if paths_in_prefix - len(path) < 0:
break
trimmed_prefix.append(name)
paths_in_prefix -= len(path)
trimmed_prefix.append(join_field.foreign_related_fields[0].name)
trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
# Lets still see if we can trim the first join from the inner query
# (that is, self). We can't do this for:
# - LEFT JOINs because we would miss those rows that have nothing on
# the outer side,
# - INNER JOINs from filtered relations because we would miss their
# filters.
first_join = self.alias_map[lookup_tables[trimmed_paths + 1]]
if first_join.join_type != LOUTER and not first_join.filtered_relation:
select_fields = [r[0] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths + 1]
self.unref_alias(lookup_tables[trimmed_paths])
extra_restriction = join_field.get_extra_restriction(
None, lookup_tables[trimmed_paths + 1]
)
if extra_restriction:
self.where.add(extra_restriction, AND)
else:
# TODO: It might be possible to trim more joins from the start of the
# inner query if it happens to have a longer join chain containing the
# values in select_fields. Lets punt this one for now.
select_fields = [r[1] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths]
# The found starting point is likely a join_class instead of a
# base_table_class reference. But the first entry in the query's FROM
# clause must not be a JOIN.
for table in self.alias_map:
if self.alias_refcount[table] > 0:
self.alias_map[table] = self.base_table_class(
self.alias_map[table].table_name,
table,
)
break
self.set_select([f.get_col(select_alias) for f in select_fields])
return trimmed_prefix, contains_louter
def is_nullable(self, field):
"""
Check if the given field should be treated as nullable.
Some backends treat '' as null and Django treats such fields as
nullable for those backends. In such situations field.null can be
False even if we should treat the field as nullable.
"""
# We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
# (nor should it have) knowledge of which connection is going to be
# used. The proper fix would be to defer all decisions where
# is_nullable() is needed to the compiler stage, but that is not easy
# to do currently.
return field.null or (
field.empty_strings_allowed
and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls
)
def get_order_dir(field, default="ASC"):
"""
Return the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == "-":
return field[1:], dirn[1]
return field, dirn[0]
class JoinPromoter:
"""
A class to abstract away join promotion problems for complex filter
conditions.
"""
def __init__(self, connector, num_children, negated):
self.connector = connector
self.negated = negated
if self.negated:
if connector == AND:
self.effective_connector = OR
else:
self.effective_connector = AND
else:
self.effective_connector = self.connector
self.num_children = num_children
# Maps of table alias to how many times it is seen as required for
# inner and/or outer joins.
self.votes = Counter()
def __repr__(self):
return (
f"{self.__class__.__qualname__}(connector={self.connector!r}, "
f"num_children={self.num_children!r}, negated={self.negated!r})"
)
def add_votes(self, votes):
"""
Add single vote per item to self.votes. Parameter can be any
iterable.
"""
self.votes.update(votes)
def update_join_types(self, query):
"""
Change join types so that the generated query is as efficient as
possible, but still correct. So, change as many joins as possible
to INNER, but don't make OUTER joins INNER if that could remove
results from the query.
"""
to_promote = set()
to_demote = set()
# The effective_connector is used so that NOT (a AND b) is treated
# similarly to (a OR b) for join promotion.
for table, votes in self.votes.items():
# We must use outer joins in OR case when the join isn't contained
# in all of the joins. Otherwise the INNER JOIN itself could remove
# valid results. Consider the case where a model with rel_a and
# rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now,
# if rel_a join doesn't produce any results is null (for example
# reverse foreign key or null value in direct foreign key), and
# there is a matching row in rel_b with col=2, then an INNER join
# to rel_a would remove a valid match from the query. So, we need
# to promote any existing INNER to LOUTER (it is possible this
# promotion in turn will be demoted later on).
if self.effective_connector == OR and votes < self.num_children:
to_promote.add(table)
# If connector is AND and there is a filter that can match only
# when there is a joinable row, then use INNER. For example, in
# rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL
# as join output, then the col=1 or col=2 can't match (as
# NULL=anything is always false).
# For the OR case, if all children voted for a join to be inner,
# then we can use INNER for the join. For example:
# (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell)
# then if rel_a doesn't produce any rows, the whole condition
# can't match. Hence we can safely use INNER join.
if self.effective_connector == AND or (
self.effective_connector == OR and votes == self.num_children
):
to_demote.add(table)
# Finally, what happens in cases where we have:
# (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0
# Now, we first generate the OR clause, and promote joins for it
# in the first if branch above. Both rel_a and rel_b are promoted
# to LOUTER joins. After that we do the AND case. The OR case
# voted no inner joins but the rel_a__col__gte=0 votes inner join
# for rel_a. We demote it back to INNER join (in AND case a single
# vote is enough). The demotion is OK, if rel_a doesn't produce
# rows, then the rel_a__col__gte=0 clause can't be true, and thus
# the whole clause must be false. So, it is safe to use INNER
# join.
# Note that in this example we could just as well have the __gte
# clause and the OR clause swapped. Or we could replace the __gte
# clause with an OR clause containing rel_a__col=1|rel_a__col=2,
# and again we could safely demote to INNER.
query.promote_joins(to_promote)
query.demote_joins(to_demote)
return to_demote
|
0d684795642da74b512b8a837d8a4404c4f91a99702c89ba9150fea85fd35875 | import collections
import json
import re
from functools import partial
from itertools import chain
from django.core.exceptions import EmptyResultSet, FieldError, FullResultSet
from django.db import DatabaseError, NotSupportedError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import F, OrderBy, RawSQL, Ref, Value
from django.db.models.functions import Cast, Random
from django.db.models.lookups import Lookup
from django.db.models.query_utils import select_related_descend
from django.db.models.sql.constants import (
CURSOR,
GET_ITERATOR_CHUNK_SIZE,
MULTI,
NO_RESULTS,
ORDER_DIR,
SINGLE,
)
from django.db.models.sql.query import Query, get_order_dir
from django.db.models.sql.where import AND
from django.db.transaction import TransactionManagementError
from django.utils.functional import cached_property
from django.utils.hashable import make_hashable
from django.utils.regex_helper import _lazy_re_compile
class SQLCompiler:
# Multiline ordering SQL clause may appear from RawSQL.
ordering_parts = _lazy_re_compile(
r"^(.*)\s(?:ASC|DESC).*",
re.MULTILINE | re.DOTALL,
)
def __init__(self, query, connection, using, elide_empty=True):
self.query = query
self.connection = connection
self.using = using
# Some queries, e.g. coalesced aggregation, need to be executed even if
# they would return an empty result set.
self.elide_empty = elide_empty
self.quote_cache = {"*": "*"}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
self._meta_ordering = None
def __repr__(self):
return (
f"<{self.__class__.__qualname__} "
f"model={self.query.model.__qualname__} "
f"connection={self.connection!r} using={self.using!r}>"
)
def setup_query(self, with_col_aliases=False):
if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select(
with_col_aliases=with_col_aliases,
)
self.col_count = len(self.select)
def pre_sql_setup(self, with_col_aliases=False):
"""
Do any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query(with_col_aliases=with_col_aliases)
order_by = self.get_order_by()
self.where, self.having, self.qualify = self.query.where.split_having_qualify(
must_group_by=self.query.group_by is not None
)
extra_select = self.get_extra_select(order_by, self.select)
self.has_extra_select = bool(extra_select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Return a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk')
# .annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
allows_group_by_refs = self.connection.features.allows_group_by_refs
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, "as_sql"):
expr = self.query.resolve_ref(expr)
if not allows_group_by_refs and isinstance(expr, Ref):
expr = expr.source
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
ref_sources = {expr.source for expr in expressions if isinstance(expr, Ref)}
aliased_exprs = {}
for expr, _, alias in select:
# Skip members of the select clause that are already included
# by reference.
if expr in ref_sources:
continue
if alias:
aliased_exprs[expr] = alias
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
if not self._meta_ordering:
for expr, (sql, params, is_ref) in order_by:
# Skip references to the SELECT clause, as all expressions in
# the SELECT clause are already part of the GROUP BY.
if not is_ref:
expressions.extend(expr.get_group_by_cols())
having_group_by = self.having.get_group_by_cols() if self.having else ()
for expr in having_group_by:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having_group_by)
for expr in expressions:
if allows_group_by_refs and (alias := aliased_exprs.get(expr)):
expr = Ref(alias, expr)
try:
sql, params = self.compile(expr)
except (EmptyResultSet, FullResultSet):
continue
sql, params = expr.select_format(self, sql, params)
params_hash = make_hashable(params)
if (sql, params_hash) not in seen:
result.append((sql, params))
seen.add((sql, params_hash))
return result
def collapse_group_by(self, expressions, having):
# If the database supports group by functional dependence reduction,
# then the expressions can be reduced to the set of selected table
# primary keys as all other columns are functionally dependent on them.
if self.connection.features.allows_group_by_selected_pks:
# Filter out all expressions associated with a table's primary key
# present in the grouped columns. This is done by identifying all
# tables that have their primary key included in the grouped
# columns and removing non-primary key columns referring to them.
# Unmanaged models are excluded because they could be representing
# database views on which the optimization might not be allowed.
pks = {
expr
for expr in expressions
if (
hasattr(expr, "target")
and expr.target.primary_key
and self.connection.features.allows_group_by_selected_pks_on_model(
expr.target.model
)
)
}
aliases = {expr.alias for expr in pks}
expressions = [
expr
for expr in expressions
if expr in pks
or expr in having
or getattr(expr, "alias", None) not in aliases
]
return expressions
def get_select(self, with_col_aliases=False):
"""
Return three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- The base model of the query.
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
select_mask = self.query.get_select_mask()
if self.query.default_cols:
cols = self.get_default_columns(select_mask)
else:
# self.query.select is a special case. These columns never go to
# any model.
cols = self.query.select
if cols:
select_list = []
for col in cols:
select_list.append(select_idx)
select.append((col, None))
select_idx += 1
klass_info = {
"model": self.query.model,
"select_fields": select_list,
}
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select, select_mask)
klass_info["related_klass_infos"] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info["related_klass_infos"]:
if ki["from_parent"]:
ki["select_fields"] = (
klass_info["select_fields"] + ki["select_fields"]
)
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
col_idx = 1
for col, alias in select:
try:
sql, params = self.compile(col)
except EmptyResultSet:
empty_result_set_value = getattr(
col, "empty_result_set_value", NotImplemented
)
if empty_result_set_value is NotImplemented:
# Select a predicate that's always False.
sql, params = "0", ()
else:
sql, params = self.compile(Value(empty_result_set_value))
except FullResultSet:
sql, params = self.compile(Value(True))
else:
sql, params = col.select_format(self, sql, params)
if alias is None and with_col_aliases:
alias = f"col{col_idx}"
col_idx += 1
ret.append((col, (sql, params), alias))
return ret, klass_info, annotations
def _order_by_pairs(self):
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
elif self.query.order_by:
ordering = self.query.order_by
elif (meta := self.query.get_meta()) and meta.ordering:
ordering = meta.ordering
self._meta_ordering = ordering
else:
ordering = []
if self.query.standard_ordering:
default_order, _ = ORDER_DIR["ASC"]
else:
default_order, _ = ORDER_DIR["DESC"]
for field in ordering:
if hasattr(field, "resolve_expression"):
if isinstance(field, Value):
# output_field must be resolved for constants.
field = Cast(field, field.output_field)
if not isinstance(field, OrderBy):
field = field.asc()
if not self.query.standard_ordering:
field = field.copy()
field.reverse_ordering()
if isinstance(field.expression, F) and (
annotation := self.query.annotation_select.get(
field.expression.name
)
):
field.expression = Ref(field.expression.name, annotation)
yield field, isinstance(field.expression, Ref)
continue
if field == "?": # random
yield OrderBy(Random()), False
continue
col, order = get_order_dir(field, default_order)
descending = order == "DESC"
if col in self.query.annotation_select:
# Reference to expression in SELECT clause
yield (
OrderBy(
Ref(col, self.query.annotation_select[col]),
descending=descending,
),
True,
)
continue
if col in self.query.annotations:
# References to an expression which is masked out of the SELECT
# clause.
if self.query.combinator and self.select:
# Don't use the resolved annotation because other
# combinated queries might define it differently.
expr = F(col)
else:
expr = self.query.annotations[col]
if isinstance(expr, Value):
# output_field must be resolved for constants.
expr = Cast(expr, expr.output_field)
yield OrderBy(expr, descending=descending), False
continue
if "." in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split(".", 1)
yield (
OrderBy(
RawSQL(
"%s.%s" % (self.quote_name_unless_alias(table), col), []
),
descending=descending,
),
False,
)
continue
if self.query.extra and col in self.query.extra:
if col in self.query.extra_select:
yield (
OrderBy(
Ref(col, RawSQL(*self.query.extra[col])),
descending=descending,
),
True,
)
else:
yield (
OrderBy(RawSQL(*self.query.extra[col]), descending=descending),
False,
)
else:
if self.query.combinator and self.select:
# Don't use the first model's field because other
# combinated queries might define it differently.
yield OrderBy(F(col), descending=descending), False
else:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
yield from self.find_ordering_name(
field,
self.query.get_meta(),
default_order=default_order,
)
def get_order_by(self):
"""
Return a list of 2-tuples of the form (expr, (sql, params, is_ref)) for
the ORDER BY clause.
The order_by clause can alter the select clause (for example it can add
aliases to clauses that do not yet have one, or it can add totally new
select clauses).
"""
result = []
seen = set()
for expr, is_ref in self._order_by_pairs():
resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None)
if not is_ref and self.query.combinator and self.select:
src = resolved.expression
expr_src = expr.expression
for sel_expr, _, col_alias in self.select:
if src == sel_expr:
# When values() is used the exact alias must be used to
# reference annotations.
if (
self.query.has_select_fields
and col_alias in self.query.annotation_select
and not (
isinstance(expr_src, F) and col_alias == expr_src.name
)
):
continue
resolved.set_source_expressions(
[Ref(col_alias if col_alias else src.target.column, src)]
)
break
else:
# Add column used in ORDER BY clause to the selected
# columns and to each combined query.
order_by_idx = len(self.query.select) + 1
col_alias = f"__orderbycol{order_by_idx}"
for q in self.query.combined_queries:
# If fields were explicitly selected through values()
# combined queries cannot be augmented.
if q.has_select_fields:
raise DatabaseError(
"ORDER BY term does not match any column in "
"the result set."
)
q.add_annotation(expr_src, col_alias)
self.query.add_select_col(resolved, col_alias)
resolved.set_source_expressions([Ref(col_alias, src)])
sql, params = self.compile(resolved)
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql)[1]
params_hash = make_hashable(params)
if (without_ordering, params_hash) in seen:
continue
seen.add((without_ordering, params_hash))
result.append((resolved, (sql, params, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
if self.query.distinct and not self.query.distinct_fields:
select_sql = [t[1] for t in select]
for expr, (sql, params, is_ref) in order_by:
without_ordering = self.ordering_parts.search(sql)[1]
if not is_ref and (without_ordering, params) not in select_sql:
extra_select.append((expr, (without_ordering, params), None))
return extra_select
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if (
(name in self.query.alias_map and name not in self.query.table_map)
or name in self.query.extra_select
or (
self.query.external_aliases.get(name)
and name not in self.query.table_map
)
):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node):
vendor_impl = getattr(node, "as_" + self.connection.vendor, None)
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
else:
sql, params = node.as_sql(self, self.connection)
return sql, params
def get_combinator_sql(self, combinator, all):
features = self.connection.features
compilers = [
query.get_compiler(self.using, self.connection, self.elide_empty)
for query in self.query.combined_queries
]
if not features.supports_slicing_ordering_in_compound:
for compiler in compilers:
if compiler.query.is_sliced:
raise DatabaseError(
"LIMIT/OFFSET not allowed in subqueries of compound statements."
)
if compiler.get_order_by():
raise DatabaseError(
"ORDER BY not allowed in subqueries of compound statements."
)
elif self.query.is_sliced and combinator == "union":
limit = (self.query.low_mark, self.query.high_mark)
for compiler in compilers:
# A sliced union cannot have its parts elided as some of them
# might be sliced as well and in the event where only a single
# part produces a non-empty resultset it might be impossible to
# generate valid SQL.
compiler.elide_empty = False
if not compiler.query.is_sliced:
compiler.query.set_limits(*limit)
parts = ()
for compiler in compilers:
try:
# If the columns list is limited, then all combined queries
# must have the same columns list. Set the selects defined on
# the query on all combined queries, if not already set.
if not compiler.query.values_select and self.query.values_select:
compiler.query = compiler.query.clone()
compiler.query.set_values(
(
*self.query.extra_select,
*self.query.values_select,
*self.query.annotation_select,
)
)
part_sql, part_args = compiler.as_sql(with_col_aliases=True)
if compiler.query.combinator:
# Wrap in a subquery if wrapping in parentheses isn't
# supported.
if not features.supports_parentheses_in_compound:
part_sql = "SELECT * FROM ({})".format(part_sql)
# Add parentheses when combining with compound query if not
# already added for all compound queries.
elif (
self.query.subquery
or not features.supports_slicing_ordering_in_compound
):
part_sql = "({})".format(part_sql)
elif (
self.query.subquery
and features.supports_slicing_ordering_in_compound
):
part_sql = "({})".format(part_sql)
parts += ((part_sql, part_args),)
except EmptyResultSet:
# Omit the empty queryset with UNION and with DIFFERENCE if the
# first queryset is nonempty.
if combinator == "union" or (combinator == "difference" and parts):
continue
raise
if not parts:
raise EmptyResultSet
combinator_sql = self.connection.ops.set_operators[combinator]
if all and combinator == "union":
combinator_sql += " ALL"
braces = "{}"
if not self.query.subquery and features.supports_slicing_ordering_in_compound:
braces = "({})"
sql_parts, args_parts = zip(
*((braces.format(sql), args) for sql, args in parts)
)
result = [" {} ".format(combinator_sql).join(sql_parts)]
params = []
for part in args_parts:
params.extend(part)
return result, params
def get_qualify_sql(self):
where_parts = []
if self.where:
where_parts.append(self.where)
if self.having:
where_parts.append(self.having)
inner_query = self.query.clone()
inner_query.subquery = True
inner_query.where = inner_query.where.__class__(where_parts)
# Augment the inner query with any window function references that
# might have been masked via values() and alias(). If any masked
# aliases are added they'll be masked again to avoid fetching
# the data in the `if qual_aliases` branch below.
select = {
expr: alias for expr, _, alias in self.get_select(with_col_aliases=True)[0]
}
select_aliases = set(select.values())
qual_aliases = set()
replacements = {}
def collect_replacements(expressions):
while expressions:
expr = expressions.pop()
if expr in replacements:
continue
elif select_alias := select.get(expr):
replacements[expr] = select_alias
elif isinstance(expr, Lookup):
expressions.extend(expr.get_source_expressions())
elif isinstance(expr, Ref):
if expr.refs not in select_aliases:
expressions.extend(expr.get_source_expressions())
else:
num_qual_alias = len(qual_aliases)
select_alias = f"qual{num_qual_alias}"
qual_aliases.add(select_alias)
inner_query.add_annotation(expr, select_alias)
replacements[expr] = select_alias
collect_replacements(list(self.qualify.leaves()))
self.qualify = self.qualify.replace_expressions(
{expr: Ref(alias, expr) for expr, alias in replacements.items()}
)
order_by = []
for order_by_expr, *_ in self.get_order_by():
collect_replacements(order_by_expr.get_source_expressions())
order_by.append(
order_by_expr.replace_expressions(
{expr: Ref(alias, expr) for expr, alias in replacements.items()}
)
)
inner_query_compiler = inner_query.get_compiler(
self.using, elide_empty=self.elide_empty
)
inner_sql, inner_params = inner_query_compiler.as_sql(
# The limits must be applied to the outer query to avoid pruning
# results too eagerly.
with_limits=False,
# Force unique aliasing of selected columns to avoid collisions
# and make rhs predicates referencing easier.
with_col_aliases=True,
)
qualify_sql, qualify_params = self.compile(self.qualify)
result = [
"SELECT * FROM (",
inner_sql,
")",
self.connection.ops.quote_name("qualify"),
"WHERE",
qualify_sql,
]
if qual_aliases:
# If some select aliases were unmasked for filtering purposes they
# must be masked back.
cols = [self.connection.ops.quote_name(alias) for alias in select.values()]
result = [
"SELECT",
", ".join(cols),
"FROM (",
*result,
")",
self.connection.ops.quote_name("qualify_mask"),
]
params = list(inner_params) + qualify_params
# As the SQL spec is unclear on whether or not derived tables
# ordering must propagate it has to be explicitly repeated on the
# outer-most query to ensure it's preserved.
if order_by:
ordering_sqls = []
for ordering in order_by:
ordering_sql, ordering_params = self.compile(ordering)
ordering_sqls.append(ordering_sql)
params.extend(ordering_params)
result.extend(["ORDER BY", ", ".join(ordering_sqls)])
return result, params
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
refcounts_before = self.query.alias_refcount.copy()
try:
combinator = self.query.combinator
extra_select, order_by, group_by = self.pre_sql_setup(
with_col_aliases=with_col_aliases or bool(combinator),
)
for_update_part = None
# Is a LIMIT/OFFSET clause needed?
with_limit_offset = with_limits and self.query.is_sliced
combinator = self.query.combinator
features = self.connection.features
if combinator:
if not getattr(features, "supports_select_{}".format(combinator)):
raise NotSupportedError(
"{} is not supported on this database backend.".format(
combinator
)
)
result, params = self.get_combinator_sql(
combinator, self.query.combinator_all
)
elif self.qualify:
result, params = self.get_qualify_sql()
order_by = None
else:
distinct_fields, distinct_params = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct'
# (see docstring of get_from_clause() for details).
from_, f_params = self.get_from_clause()
try:
where, w_params = (
self.compile(self.where) if self.where is not None else ("", [])
)
except EmptyResultSet:
if self.elide_empty:
raise
# Use a predicate that's always False.
where, w_params = "0 = 1", []
except FullResultSet:
where, w_params = "", []
try:
having, h_params = (
self.compile(self.having)
if self.having is not None
else ("", [])
)
except FullResultSet:
having, h_params = "", []
result = ["SELECT"]
params = []
if self.query.distinct:
distinct_result, distinct_params = self.connection.ops.distinct_sql(
distinct_fields,
distinct_params,
)
result += distinct_result
params += distinct_params
out_cols = []
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = "%s AS %s" % (
s_sql,
self.connection.ops.quote_name(alias),
)
params.extend(s_params)
out_cols.append(s_sql)
result += [", ".join(out_cols)]
if from_:
result += ["FROM", *from_]
elif self.connection.features.bare_select_suffix:
result += [self.connection.features.bare_select_suffix]
params.extend(f_params)
if self.query.select_for_update and features.has_select_for_update:
if (
self.connection.get_autocommit()
# Don't raise an exception when database doesn't
# support transactions, as it's a noop.
and features.supports_transactions
):
raise TransactionManagementError(
"select_for_update cannot be used outside of a transaction."
)
if (
with_limit_offset
and not features.supports_select_for_update_with_limit
):
raise NotSupportedError(
"LIMIT/OFFSET is not supported with "
"select_for_update on this database backend."
)
nowait = self.query.select_for_update_nowait
skip_locked = self.query.select_for_update_skip_locked
of = self.query.select_for_update_of
no_key = self.query.select_for_no_key_update
# If it's a NOWAIT/SKIP LOCKED/OF/NO KEY query but the
# backend doesn't support it, raise NotSupportedError to
# prevent a possible deadlock.
if nowait and not features.has_select_for_update_nowait:
raise NotSupportedError(
"NOWAIT is not supported on this database backend."
)
elif skip_locked and not features.has_select_for_update_skip_locked:
raise NotSupportedError(
"SKIP LOCKED is not supported on this database backend."
)
elif of and not features.has_select_for_update_of:
raise NotSupportedError(
"FOR UPDATE OF is not supported on this database backend."
)
elif no_key and not features.has_select_for_no_key_update:
raise NotSupportedError(
"FOR NO KEY UPDATE is not supported on this "
"database backend."
)
for_update_part = self.connection.ops.for_update_sql(
nowait=nowait,
skip_locked=skip_locked,
of=self.get_select_for_update_of_arguments(),
no_key=no_key,
)
if for_update_part and features.for_update_after_from:
result.append(for_update_part)
if where:
result.append("WHERE %s" % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) is not implemented."
)
order_by = order_by or self.connection.ops.force_no_ordering()
result.append("GROUP BY %s" % ", ".join(grouping))
if self._meta_ordering:
order_by = None
if having:
result.append("HAVING %s" % having)
params.extend(h_params)
if self.query.explain_info:
result.insert(
0,
self.connection.ops.explain_query_prefix(
self.query.explain_info.format,
**self.query.explain_info.options,
),
)
if order_by:
ordering = []
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
order_by_sql = "ORDER BY %s" % ", ".join(ordering)
if combinator and features.requires_compound_order_by_subquery:
result = ["SELECT * FROM (", *result, ")", order_by_sql]
else:
result.append(order_by_sql)
if with_limit_offset:
result.append(
self.connection.ops.limit_offset_sql(
self.query.low_mark, self.query.high_mark
)
)
if for_update_part and not features.for_update_after_from:
result.append(for_update_part)
if self.query.subquery and extra_select:
# If the query is used as a subquery, the extra selects would
# result in more columns than the left-hand side expression is
# expecting. This can happen when a subquery uses a combination
# of order_by() and distinct(), forcing the ordering expressions
# to be selected as well. Wrap the query in another subquery
# to exclude extraneous selects.
sub_selects = []
sub_params = []
for index, (select, _, alias) in enumerate(self.select, start=1):
if alias:
sub_selects.append(
"%s.%s"
% (
self.connection.ops.quote_name("subquery"),
self.connection.ops.quote_name(alias),
)
)
else:
select_clone = select.relabeled_clone(
{select.alias: "subquery"}
)
subselect, subparams = select_clone.as_sql(
self, self.connection
)
sub_selects.append(subselect)
sub_params.extend(subparams)
return "SELECT %s FROM (%s) subquery" % (
", ".join(sub_selects),
" ".join(result),
), tuple(sub_params + params)
return " ".join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def get_default_columns(
self, select_mask, start_alias=None, opts=None, from_parent=None
):
"""
Compute the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Return a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, return a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
if (opts := self.query.get_meta()) is None:
return result
start_alias = start_alias or self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if (
from_parent
and model is not None
and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model
)
):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if select_mask and field not in select_mask:
continue
alias = self.query.join_parent_model(opts, model, start_alias, seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Return a quoted list of fields to use in DISTINCT ON part of the query.
This method can alter the tables in the query, and thus it must be
called before get_from_clause().
"""
result = []
params = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _, transform_function = self._setup_joins(
parts, opts, None
)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
if name in self.query.annotation_select:
result.append(self.connection.ops.quote_name(name))
else:
r, p = self.compile(transform_function(target, alias))
result.append(r)
params.append(p)
return result, params
def find_ordering_name(
self, name, opts, alias=None, default_order="ASC", already_seen=None
):
"""
Return the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
descending = order == "DESC"
pieces = name.split(LOOKUP_SEP)
(
field,
targets,
alias,
joins,
path,
opts,
transform_function,
) = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless it is the pk
# shortcut or the attribute name of the field that is specified or
# there are transforms to process.
if (
field.is_relation
and opts.ordering
and getattr(field, "attname", None) != pieces[-1]
and name != "pk"
and not getattr(transform_function, "has_transforms", False)
):
# Firstly, avoid infinite loops.
already_seen = already_seen or set()
join_tuple = tuple(
getattr(self.query.alias_map[j], "join_cols", None) for j in joins
)
if join_tuple in already_seen:
raise FieldError("Infinite loop caused by ordering.")
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
if hasattr(item, "resolve_expression") and not isinstance(
item, OrderBy
):
item = item.desc() if descending else item.asc()
if isinstance(item, OrderBy):
results.append(
(item.prefix_references(f"{name}{LOOKUP_SEP}"), False)
)
continue
results.extend(
(expr.prefix_references(f"{name}{LOOKUP_SEP}"), is_ref)
for expr, is_ref in self.find_ordering_name(
item, opts, alias, order, already_seen
)
)
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [
(OrderBy(transform_function(t, alias), descending=descending), False)
for t in targets
]
def _setup_joins(self, pieces, opts, alias):
"""
Helper method for get_order_by() and get_distinct().
get_ordering() and get_distinct() must produce same target columns on
same input, as the prefixes of get_ordering() and get_distinct() must
match. Executing SQL where this is not true is an error.
"""
alias = alias or self.query.get_initial_alias()
field, targets, opts, joins, path, transform_function = self.query.setup_joins(
pieces, opts, alias
)
alias = joins[-1]
return field, targets, alias, joins, path, opts, transform_function
def get_from_clause(self):
"""
Return a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Subclasses, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables that are needed. This means the select columns,
ordering, and distinct must be done first.
"""
result = []
params = []
for alias in tuple(self.query.alias_map):
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if (
alias not in self.query.alias_map
or self.query.alias_refcount[alias] == 1
):
result.append(", %s" % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(
self,
select,
select_mask,
opts=None,
root_alias=None,
cur_depth=1,
requested=None,
restricted=None,
):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects
if f.field.unique
)
return chain(
direct_choices, reverse_choices, self.query._filtered_relations
)
related_klass_infos = []
if not restricted and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
restricted = isinstance(self.query.select_related, dict)
if restricted:
requested = self.query.select_related
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info["related_klass_infos"] = related_klass_infos
for f in opts.fields:
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or f.name in requested:
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s"
% (
f.name,
", ".join(_get_field_choices()) or "(none)",
)
)
else:
next = False
if not select_related_descend(f, restricted, requested, select_mask):
continue
related_select_mask = select_mask.get(f) or {}
klass_info = {
"model": f.remote_field.model,
"field": f,
"reverse": False,
"local_setter": f.set_cached_value,
"remote_setter": f.remote_field.set_cached_value
if f.unique
else lambda x, y: None,
"from_parent": False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _, _ = self.query.setup_joins([f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(
related_select_mask, start_alias=alias, opts=f.remote_field.model._meta
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next_klass_infos = self.get_related_selections(
select,
related_select_mask,
f.remote_field.model._meta,
alias,
cur_depth + 1,
next,
restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
related_select_mask = select_mask.get(f) or {}
if not select_related_descend(
f, restricted, requested, related_select_mask, reverse=True
):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
join_info = self.query.setup_joins(
[related_field_name], opts, root_alias
)
alias = join_info.joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
klass_info = {
"model": model,
"field": f,
"reverse": True,
"local_setter": f.remote_field.set_cached_value,
"remote_setter": f.set_cached_value,
"from_parent": from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
related_select_mask,
start_alias=alias,
opts=model._meta,
from_parent=opts.model,
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select,
related_select_mask,
model._meta,
alias,
cur_depth + 1,
next,
restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
def local_setter(obj, from_obj):
# Set a reverse fk object when relation is non-empty.
if from_obj:
f.remote_field.set_cached_value(from_obj, obj)
def remote_setter(name, obj, from_obj):
setattr(from_obj, name, obj)
for name in list(requested):
# Filtered relations work only on the topmost level.
if cur_depth > 1:
break
if name in self.query._filtered_relations:
fields_found.add(name)
f, _, join_opts, joins, _, _ = self.query.setup_joins(
[name], opts, root_alias
)
model = join_opts.model
alias = joins[-1]
from_parent = (
issubclass(model, opts.model) and model is not opts.model
)
klass_info = {
"model": model,
"field": f,
"reverse": True,
"local_setter": local_setter,
"remote_setter": partial(remote_setter, name),
"from_parent": from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
field_select_mask = select_mask.get((name, f)) or {}
columns = self.get_default_columns(
field_select_mask,
start_alias=alias,
opts=model._meta,
from_parent=opts.model,
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next_requested = requested.get(name, {})
next_klass_infos = self.get_related_selections(
select,
field_select_mask,
opts=model._meta,
root_alias=alias,
cur_depth=cur_depth + 1,
requested=next_requested,
restricted=restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
"Invalid field name(s) given in select_related: %s. "
"Choices are: %s"
% (
", ".join(invalid_fields),
", ".join(_get_field_choices()) or "(none)",
)
)
return related_klass_infos
def get_select_for_update_of_arguments(self):
"""
Return a quoted list of arguments for the SELECT FOR UPDATE OF part of
the query.
"""
def _get_parent_klass_info(klass_info):
concrete_model = klass_info["model"]._meta.concrete_model
for parent_model, parent_link in concrete_model._meta.parents.items():
parent_list = parent_model._meta.get_parent_list()
yield {
"model": parent_model,
"field": parent_link,
"reverse": False,
"select_fields": [
select_index
for select_index in klass_info["select_fields"]
# Selected columns from a model or its parents.
if (
self.select[select_index][0].target.model == parent_model
or self.select[select_index][0].target.model in parent_list
)
],
}
def _get_first_selected_col_from_model(klass_info):
"""
Find the first selected column from a model. If it doesn't exist,
don't lock a model.
select_fields is filled recursively, so it also contains fields
from the parent models.
"""
concrete_model = klass_info["model"]._meta.concrete_model
for select_index in klass_info["select_fields"]:
if self.select[select_index][0].target.model == concrete_model:
return self.select[select_index][0]
def _get_field_choices():
"""Yield all allowed field paths in breadth-first search order."""
queue = collections.deque([(None, self.klass_info)])
while queue:
parent_path, klass_info = queue.popleft()
if parent_path is None:
path = []
yield "self"
else:
field = klass_info["field"]
if klass_info["reverse"]:
field = field.remote_field
path = parent_path + [field.name]
yield LOOKUP_SEP.join(path)
queue.extend(
(path, klass_info)
for klass_info in _get_parent_klass_info(klass_info)
)
queue.extend(
(path, klass_info)
for klass_info in klass_info.get("related_klass_infos", [])
)
if not self.klass_info:
return []
result = []
invalid_names = []
for name in self.query.select_for_update_of:
klass_info = self.klass_info
if name == "self":
col = _get_first_selected_col_from_model(klass_info)
else:
for part in name.split(LOOKUP_SEP):
klass_infos = (
*klass_info.get("related_klass_infos", []),
*_get_parent_klass_info(klass_info),
)
for related_klass_info in klass_infos:
field = related_klass_info["field"]
if related_klass_info["reverse"]:
field = field.remote_field
if field.name == part:
klass_info = related_klass_info
break
else:
klass_info = None
break
if klass_info is None:
invalid_names.append(name)
continue
col = _get_first_selected_col_from_model(klass_info)
if col is not None:
if self.connection.features.select_for_update_of_column:
result.append(self.compile(col)[0])
else:
result.append(self.quote_name_unless_alias(col.alias))
if invalid_names:
raise FieldError(
"Invalid field name(s) given in select_for_update(of=(...)): %s. "
"Only relational fields followed in the query are allowed. "
"Choices are: %s."
% (
", ".join(invalid_names),
", ".join(_get_field_choices()),
)
)
return result
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
converters[i] = (backend_converters + field_converters, expression)
return converters
def apply_converters(self, rows, converters):
connection = self.connection
converters = list(converters.items())
for row in map(list, rows):
for pos, (convs, expression) in converters:
value = row[pos]
for converter in convs:
value = converter(value, expression, connection)
row[pos] = value
yield row
def results_iter(
self,
results=None,
tuple_expected=False,
chunked_fetch=False,
chunk_size=GET_ITERATOR_CHUNK_SIZE,
):
"""Return an iterator over the results from executing this query."""
if results is None:
results = self.execute_sql(
MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size
)
fields = [s[0] for s in self.select[0 : self.col_count]]
converters = self.get_converters(fields)
rows = chain.from_iterable(results)
if converters:
rows = self.apply_converters(rows, converters)
if tuple_expected:
rows = map(tuple, rows)
return rows
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
return bool(self.execute_sql(SINGLE))
def execute_sql(
self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE
):
"""
Run the query against the database and return the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
result_type = result_type or NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
if chunked_fetch:
cursor = self.connection.chunked_cursor()
else:
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
# Might fail for server-side cursors (e.g. connection closed)
cursor.close()
raise
if result_type == CURSOR:
# Give the caller the cursor to process and close.
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0 : self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor,
self.connection.features.empty_fetchmany_value,
self.col_count if self.has_extra_select else None,
chunk_size,
)
if not chunked_fetch or not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further. Use chunked_fetch if requested,
# unless the database doesn't support it.
return list(result)
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = "%s.%s" % (qn(alias), qn2(columns[index]))
self.query.where.add(RawSQL("%s = %s" % (lhs_sql, rhs), lhs_params), AND)
sql, params = self.as_sql()
return "EXISTS (%s)" % sql, params
def explain_query(self):
result = list(self.execute_sql())
# Some backends return 1 item tuples with strings, and others return
# tuples with integers and strings. Flatten them out into strings.
format_ = self.query.explain_info.format
output_formatter = json.dumps if format_ and format_.lower() == "json" else str
for row in result[0]:
if not isinstance(row, str):
yield " ".join(output_formatter(c) for c in row)
else:
yield row
class SQLInsertCompiler(SQLCompiler):
returning_fields = None
returning_params = ()
def field_as_sql(self, field, val):
"""
Take a field and a value intended to be saved on that field, and
return placeholder SQL and accompanying params. Check for raw values,
expressions, and fields with get_placeholder() defined in that order.
When field is None, consider the value raw and use it as the
placeholder, with no corresponding parameters returned.
"""
if field is None:
# A field value of None means the value is raw.
sql, params = val, []
elif hasattr(val, "as_sql"):
# This is an expression, let's compile it.
sql, params = self.compile(val)
elif hasattr(field, "get_placeholder"):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
sql, params = field.get_placeholder(val, self, self.connection), [val]
else:
# Return the common case for the placeholder
sql, params = "%s", [val]
# The following hook is only used by Oracle Spatial, which sometimes
# needs to yield 'NULL' and [] as its placeholder and params instead
# of '%s' and [None]. The 'NULL' placeholder is produced earlier by
# OracleOperations.get_geom_placeholder(). The following line removes
# the corresponding None parameter. See ticket #10888.
params = self.connection.ops.modify_insert_params(sql, params)
return sql, params
def prepare_value(self, field, value):
"""
Prepare a value to be used in a query by resolving it if it is an
expression and otherwise calling the field's get_db_prep_save().
"""
if hasattr(value, "resolve_expression"):
value = value.resolve_expression(
self.query, allow_joins=False, for_save=True
)
# Don't allow values containing Col expressions. They refer to
# existing columns on a row, but in the case of insert the row
# doesn't exist yet.
if value.contains_column_references:
raise ValueError(
'Failed to insert expression "%s" on %s. F() expressions '
"can only be used to update, not to insert." % (value, field)
)
if value.contains_aggregate:
raise FieldError(
"Aggregate functions are not allowed in this query "
"(%s=%r)." % (field.name, value)
)
if value.contains_over_clause:
raise FieldError(
"Window expressions are not allowed in this query (%s=%r)."
% (field.name, value)
)
else:
value = field.get_db_prep_save(value, connection=self.connection)
return value
def pre_save_val(self, field, obj):
"""
Get the given field's value off the given obj. pre_save() is used for
things like auto_now on DateTimeField. Skip it if this is a raw query.
"""
if self.query.raw:
return getattr(obj, field.attname)
return field.pre_save(obj, add=True)
def assemble_as_sql(self, fields, value_rows):
"""
Take a sequence of N fields and a sequence of M rows of values, and
generate placeholder SQL and parameters for each field and value.
Return a pair containing:
* a sequence of M rows of N SQL placeholder strings, and
* a sequence of M rows of corresponding parameter values.
Each placeholder string may contain any number of '%s' interpolation
strings, and each parameter row will contain exactly as many params
as the total number of '%s's in the corresponding placeholder row.
"""
if not value_rows:
return [], []
# list of (sql, [params]) tuples for each object to be saved
# Shape: [n_objs][n_fields][2]
rows_of_fields_as_sql = (
(self.field_as_sql(field, v) for field, v in zip(fields, row))
for row in value_rows
)
# tuple like ([sqls], [[params]s]) for each object to be saved
# Shape: [n_objs][2][n_fields]
sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql)
# Extract separate lists for placeholders and params.
# Each of these has shape [n_objs][n_fields]
placeholder_rows, param_rows = zip(*sql_and_param_pair_rows)
# Params for each field are still lists, and need to be flattened.
param_rows = [[p for ps in row for p in ps] for row in param_rows]
return placeholder_rows, param_rows
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
insert_statement = self.connection.ops.insert_statement(
on_conflict=self.query.on_conflict,
)
result = ["%s %s" % (insert_statement, qn(opts.db_table))]
fields = self.query.fields or [opts.pk]
result.append("(%s)" % ", ".join(qn(f.column) for f in fields))
if self.query.fields:
value_rows = [
[
self.prepare_value(field, self.pre_save_val(field, obj))
for field in fields
]
for obj in self.query.objs
]
else:
# An empty object.
value_rows = [
[self.connection.ops.pk_default_value()] for _ in self.query.objs
]
fields = [None]
# Currently the backends just accept values when generating bulk
# queries and generate their own placeholders. Doing that isn't
# necessary and it should be possible to use placeholders and
# expressions in bulk inserts too.
can_bulk = (
not self.returning_fields and self.connection.features.has_bulk_insert
)
placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows)
on_conflict_suffix_sql = self.connection.ops.on_conflict_suffix_sql(
fields,
self.query.on_conflict,
self.query.update_fields,
self.query.unique_fields,
)
if (
self.returning_fields
and self.connection.features.can_return_columns_from_insert
):
if self.connection.features.can_return_rows_from_bulk_insert:
result.append(
self.connection.ops.bulk_insert_sql(fields, placeholder_rows)
)
params = param_rows
else:
result.append("VALUES (%s)" % ", ".join(placeholder_rows[0]))
params = [param_rows[0]]
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
# Skip empty r_sql to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
r_sql, self.returning_params = self.connection.ops.return_insert_columns(
self.returning_fields
)
if r_sql:
result.append(r_sql)
params += [self.returning_params]
return [(" ".join(result), tuple(chain.from_iterable(params)))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
return [(" ".join(result), tuple(p for ps in param_rows for p in ps))]
else:
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholder_rows, param_rows)
]
def execute_sql(self, returning_fields=None):
assert not (
returning_fields
and len(self.query.objs) != 1
and not self.connection.features.can_return_rows_from_bulk_insert
)
opts = self.query.get_meta()
self.returning_fields = returning_fields
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not self.returning_fields:
return []
if (
self.connection.features.can_return_rows_from_bulk_insert
and len(self.query.objs) > 1
):
rows = self.connection.ops.fetch_returned_insert_rows(cursor)
elif self.connection.features.can_return_columns_from_insert:
assert len(self.query.objs) == 1
rows = [
self.connection.ops.fetch_returned_insert_columns(
cursor,
self.returning_params,
)
]
else:
rows = [
(
self.connection.ops.last_insert_id(
cursor,
opts.db_table,
opts.pk.column,
),
)
]
cols = [field.get_col(opts.db_table) for field in self.returning_fields]
converters = self.get_converters(cols)
if converters:
rows = list(self.apply_converters(rows, converters))
return rows
class SQLDeleteCompiler(SQLCompiler):
@cached_property
def single_alias(self):
# Ensure base table is in aliases.
self.query.get_initial_alias()
return sum(self.query.alias_refcount[t] > 0 for t in self.query.alias_map) == 1
@classmethod
def _expr_refs_base_model(cls, expr, base_model):
if isinstance(expr, Query):
return expr.model == base_model
if not hasattr(expr, "get_source_expressions"):
return False
return any(
cls._expr_refs_base_model(source_expr, base_model)
for source_expr in expr.get_source_expressions()
)
@cached_property
def contains_self_reference_subquery(self):
return any(
self._expr_refs_base_model(expr, self.query.model)
for expr in chain(
self.query.annotations.values(), self.query.where.children
)
)
def _as_sql(self, query):
delete = "DELETE FROM %s" % self.quote_name_unless_alias(query.base_table)
try:
where, params = self.compile(query.where)
except FullResultSet:
return delete, ()
return f"{delete} WHERE {where}", tuple(params)
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
if self.single_alias and not self.contains_self_reference_subquery:
return self._as_sql(self.query)
innerq = self.query.clone()
innerq.__class__ = Query
innerq.clear_select_clause()
pk = self.query.model._meta.pk
innerq.select = [pk.get_col(self.query.get_initial_alias())]
outerq = Query(self.query.model)
if not self.connection.features.update_can_self_select:
# Force the materialization of the inner query to allow reference
# to the target table on MySQL.
sql, params = innerq.get_compiler(connection=self.connection).as_sql()
innerq = RawSQL("SELECT * FROM (%s) subquery" % sql, params)
outerq.add_filter("pk__in", innerq)
return self._as_sql(outerq)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return "", ()
qn = self.quote_name_unless_alias
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, "resolve_expression"):
val = val.resolve_expression(
self.query, allow_joins=False, for_save=True
)
if val.contains_aggregate:
raise FieldError(
"Aggregate functions are not allowed in this query "
"(%s=%r)." % (field.name, val)
)
if val.contains_over_clause:
raise FieldError(
"Window expressions are not allowed in this query "
"(%s=%r)." % (field.name, val)
)
elif hasattr(val, "prepare_database_save"):
if field.remote_field:
val = field.get_db_prep_save(
val.prepare_database_save(field),
connection=self.connection,
)
else:
raise TypeError(
"Tried to update field %s with a model instance, %r. "
"Use a value compatible with %s."
% (field, val, field.__class__.__name__)
)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, "get_placeholder"):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = "%s"
name = field.column
if hasattr(val, "as_sql"):
sql, params = self.compile(val)
values.append("%s = %s" % (qn(name), placeholder % sql))
update_params.extend(params)
elif val is not None:
values.append("%s = %s" % (qn(name), placeholder))
update_params.append(val)
else:
values.append("%s = NULL" % qn(name))
table = self.query.base_table
result = [
"UPDATE %s SET" % qn(table),
", ".join(values),
]
try:
where, params = self.compile(self.query.where)
except FullResultSet:
params = []
else:
result.append("WHERE %s" % where)
return " ".join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Return the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super().execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, munge the "where"
conditions to match the format required for (portable) SQL updates.
If multiple updates are required, pull out the id values to update at
this point so that they don't change as a result of the progressive
updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.chain(klass=Query)
query.select_related = False
query.clear_ordering(force=True)
query.extra = {}
query.select = []
meta = query.get_meta()
fields = [meta.pk.name]
related_ids_index = []
for related in self.query.related_updates:
if all(
path.join_field.primary_key for path in meta.get_path_to_parent(related)
):
# If a primary key chain exists to the targeted related update,
# then the meta.pk value can be used for it.
related_ids_index.append((related, 0))
else:
# This branch will only be reached when updating a field of an
# ancestor that is not part of the primary key chain of a MTI
# tree.
related_ids_index.append((related, len(fields)))
fields.append(related._meta.pk.name)
query.add_fields(fields)
super().pre_sql_setup()
must_pre_select = (
count > 1 and not self.connection.features.update_can_self_select
)
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.clear_where()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
related_ids = collections.defaultdict(list)
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
for parent, index in related_ids_index:
related_ids[parent].extend(r[index] for r in rows)
self.query.add_filter("pk__in", idents)
self.query.related_ids = related_ids
else:
# The fast path. Filters and updates in one query.
self.query.add_filter("pk__in", query)
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
sql, params = [], []
for annotation in self.query.annotation_select.values():
ann_sql, ann_params = self.compile(annotation)
ann_sql, ann_params = annotation.select_format(self, ann_sql, ann_params)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
sql = ", ".join(sql)
params = tuple(params)
inner_query_sql, inner_query_params = self.query.inner_query.get_compiler(
self.using,
elide_empty=self.elide_empty,
).as_sql(with_col_aliases=True)
sql = "SELECT %s FROM (%s) subquery" % (sql, inner_query_sql)
params += inner_query_params
return sql, params
def cursor_iter(cursor, sentinel, col_count, itersize):
"""
Yield blocks of rows from a cursor and ensure the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel):
yield rows if col_count is None else [r[:col_count] for r in rows]
finally:
cursor.close()
|
d3c229dc9e73b3ee10c0f7b4e8ff68c4ed5c33d932a4bb60aed1691ffad785ae | """
Code to manage the creation and SQL rendering of 'where' constraints.
"""
import operator
from functools import reduce
from django.core.exceptions import EmptyResultSet, FullResultSet
from django.db.models.expressions import Case, When
from django.db.models.lookups import Exact
from django.utils import tree
from django.utils.functional import cached_property
# Connection types
AND = "AND"
OR = "OR"
XOR = "XOR"
class WhereNode(tree.Node):
"""
An SQL WHERE clause.
The class is tied to the Query class that created it (in order to create
the correct SQL).
A child is usually an expression producing boolean values. Most likely the
expression is a Lookup instance.
However, a child could also be any class with as_sql() and either
relabeled_clone() method or relabel_aliases() and clone() methods and
contains_aggregate attribute.
"""
default = AND
resolved = False
conditional = True
def split_having_qualify(self, negated=False, must_group_by=False):
"""
Return three possibly None nodes: one for those parts of self that
should be included in the WHERE clause, one for those parts of self
that must be included in the HAVING clause, and one for those parts
that refer to window functions.
"""
if not self.contains_aggregate and not self.contains_over_clause:
return self, None, None
in_negated = negated ^ self.negated
# Whether or not children must be connected in the same filtering
# clause (WHERE > HAVING > QUALIFY) to maintain logical semantic.
must_remain_connected = (
(in_negated and self.connector == AND)
or (not in_negated and self.connector == OR)
or self.connector == XOR
)
if (
must_remain_connected
and self.contains_aggregate
and not self.contains_over_clause
):
# It's must cheaper to short-circuit and stash everything in the
# HAVING clause than split children if possible.
return None, self, None
where_parts = []
having_parts = []
qualify_parts = []
for c in self.children:
if hasattr(c, "split_having_qualify"):
where_part, having_part, qualify_part = c.split_having_qualify(
in_negated, must_group_by
)
if where_part is not None:
where_parts.append(where_part)
if having_part is not None:
having_parts.append(having_part)
if qualify_part is not None:
qualify_parts.append(qualify_part)
elif c.contains_over_clause:
qualify_parts.append(c)
elif c.contains_aggregate:
having_parts.append(c)
else:
where_parts.append(c)
if must_remain_connected and qualify_parts:
# Disjunctive heterogeneous predicates can be pushed down to
# qualify as long as no conditional aggregation is involved.
if not where_parts or (where_parts and not must_group_by):
return None, None, self
elif where_parts:
# In theory this should only be enforced when dealing with
# where_parts containing predicates against multi-valued
# relationships that could affect aggregation results but this
# is complex to infer properly.
raise NotImplementedError(
"Heterogeneous disjunctive predicates against window functions are "
"not implemented when performing conditional aggregation."
)
where_node = (
self.create(where_parts, self.connector, self.negated)
if where_parts
else None
)
having_node = (
self.create(having_parts, self.connector, self.negated)
if having_parts
else None
)
qualify_node = (
self.create(qualify_parts, self.connector, self.negated)
if qualify_parts
else None
)
return where_node, having_node, qualify_node
def as_sql(self, compiler, connection):
"""
Return the SQL version of the where clause and the value to be
substituted in. Return '', [] if this node matches everything,
None, [] if this node is empty, and raise EmptyResultSet if this
node can't match anything.
"""
result = []
result_params = []
if self.connector == AND:
full_needed, empty_needed = len(self.children), 1
else:
full_needed, empty_needed = 1, len(self.children)
if self.connector == XOR and not connection.features.supports_logical_xor:
# Convert if the database doesn't support XOR:
# a XOR b XOR c XOR ...
# to:
# (a OR b OR c OR ...) AND (a + b + c + ...) == 1
lhs = self.__class__(self.children, OR)
rhs_sum = reduce(
operator.add,
(Case(When(c, then=1), default=0) for c in self.children),
)
rhs = Exact(1, rhs_sum)
return self.__class__([lhs, rhs], AND, self.negated).as_sql(
compiler, connection
)
for child in self.children:
try:
sql, params = compiler.compile(child)
except EmptyResultSet:
empty_needed -= 1
except FullResultSet:
full_needed -= 1
else:
if sql:
result.append(sql)
result_params.extend(params)
else:
full_needed -= 1
# Check if this node matches nothing or everything.
# First check the amount of full nodes and empty nodes
# to make this node empty/full.
# Now, check if this node is full/empty using the
# counts.
if empty_needed == 0:
if self.negated:
raise FullResultSet
else:
raise EmptyResultSet
if full_needed == 0:
if self.negated:
raise EmptyResultSet
else:
raise FullResultSet
conn = " %s " % self.connector
sql_string = conn.join(result)
if not sql_string:
raise FullResultSet
if self.negated:
# Some backends (Oracle at least) need parentheses around the inner
# SQL in the negated case, even if the inner SQL contains just a
# single expression.
sql_string = "NOT (%s)" % sql_string
elif len(result) > 1 or self.resolved:
sql_string = "(%s)" % sql_string
return sql_string, result_params
def get_group_by_cols(self):
cols = []
for child in self.children:
cols.extend(child.get_group_by_cols())
return cols
def get_source_expressions(self):
return self.children[:]
def set_source_expressions(self, children):
assert len(children) == len(self.children)
self.children = children
def relabel_aliases(self, change_map):
"""
Relabel the alias values of any children. 'change_map' is a dictionary
mapping old (current) alias values to the new values.
"""
for pos, child in enumerate(self.children):
if hasattr(child, "relabel_aliases"):
# For example another WhereNode
child.relabel_aliases(change_map)
elif hasattr(child, "relabeled_clone"):
self.children[pos] = child.relabeled_clone(change_map)
def clone(self):
clone = self.create(connector=self.connector, negated=self.negated)
for child in self.children:
if hasattr(child, "clone"):
child = child.clone()
clone.children.append(child)
return clone
def relabeled_clone(self, change_map):
clone = self.clone()
clone.relabel_aliases(change_map)
return clone
def replace_expressions(self, replacements):
if replacement := replacements.get(self):
return replacement
clone = self.create(connector=self.connector, negated=self.negated)
for child in self.children:
clone.children.append(child.replace_expressions(replacements))
return clone
def get_refs(self):
refs = set()
for child in self.children:
refs |= child.get_refs()
return refs
@classmethod
def _contains_aggregate(cls, obj):
if isinstance(obj, tree.Node):
return any(cls._contains_aggregate(c) for c in obj.children)
return obj.contains_aggregate
@cached_property
def contains_aggregate(self):
return self._contains_aggregate(self)
@classmethod
def _contains_over_clause(cls, obj):
if isinstance(obj, tree.Node):
return any(cls._contains_over_clause(c) for c in obj.children)
return obj.contains_over_clause
@cached_property
def contains_over_clause(self):
return self._contains_over_clause(self)
@property
def is_summary(self):
return any(child.is_summary for child in self.children)
@staticmethod
def _resolve_leaf(expr, query, *args, **kwargs):
if hasattr(expr, "resolve_expression"):
expr = expr.resolve_expression(query, *args, **kwargs)
return expr
@classmethod
def _resolve_node(cls, node, query, *args, **kwargs):
if hasattr(node, "children"):
for child in node.children:
cls._resolve_node(child, query, *args, **kwargs)
if hasattr(node, "lhs"):
node.lhs = cls._resolve_leaf(node.lhs, query, *args, **kwargs)
if hasattr(node, "rhs"):
node.rhs = cls._resolve_leaf(node.rhs, query, *args, **kwargs)
def resolve_expression(self, *args, **kwargs):
clone = self.clone()
clone._resolve_node(clone, *args, **kwargs)
clone.resolved = True
return clone
@cached_property
def output_field(self):
from django.db.models import BooleanField
return BooleanField()
@property
def _output_field_or_none(self):
return self.output_field
def select_format(self, compiler, sql, params):
# Wrap filters with a CASE WHEN expression if a database backend
# (e.g. Oracle) doesn't support boolean expression in SELECT or GROUP
# BY list.
if not compiler.connection.features.supports_boolean_expr_in_select_clause:
sql = f"CASE WHEN {sql} THEN 1 ELSE 0 END"
return sql, params
def get_db_converters(self, connection):
return self.output_field.get_db_converters(connection)
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def leaves(self):
for child in self.children:
if isinstance(child, WhereNode):
yield from child.leaves()
else:
yield child
class NothingNode:
"""A node that matches nothing."""
contains_aggregate = False
contains_over_clause = False
def as_sql(self, compiler=None, connection=None):
raise EmptyResultSet
class ExtraWhere:
# The contents are a black box - assume no aggregates or windows are used.
contains_aggregate = False
contains_over_clause = False
def __init__(self, sqls, params):
self.sqls = sqls
self.params = params
def as_sql(self, compiler=None, connection=None):
sqls = ["(%s)" % sql for sql in self.sqls]
return " AND ".join(sqls), list(self.params or ())
class SubqueryConstraint:
# Even if aggregates or windows would be used in a subquery,
# the outer query isn't interested about those.
contains_aggregate = False
contains_over_clause = False
def __init__(self, alias, columns, targets, query_object):
self.alias = alias
self.columns = columns
self.targets = targets
query_object.clear_ordering(clear_default=True)
self.query_object = query_object
def as_sql(self, compiler, connection):
query = self.query_object
query.set_values(self.targets)
query_compiler = query.get_compiler(connection=connection)
return query_compiler.as_subquery_condition(self.alias, self.columns, compiler)
|
d3428bc7a4623d27d60bf4df6a36895fe33e3a67a6ce281c0ca2c615706e5a55 | """
Oracle database backend for Django.
Requires cx_Oracle: https://oracle.github.io/python-cx_Oracle/
"""
import datetime
import decimal
import os
import platform
from contextlib import contextmanager
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import IntegrityError
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.utils import debug_transaction
from django.utils.asyncio import async_unsafe
from django.utils.encoding import force_bytes, force_str
from django.utils.functional import cached_property
def _setup_environment(environ):
# Cygwin requires some special voodoo to set the environment variables
# properly so that Oracle will see them.
if platform.system().upper().startswith("CYGWIN"):
try:
import ctypes
except ImportError as e:
raise ImproperlyConfigured(
"Error loading ctypes: %s; "
"the Oracle backend requires ctypes to "
"operate correctly under Cygwin." % e
)
kernel32 = ctypes.CDLL("kernel32")
for name, value in environ:
kernel32.SetEnvironmentVariableA(name, value)
else:
os.environ.update(environ)
_setup_environment(
[
# Oracle takes client-side character set encoding from the environment.
("NLS_LANG", ".AL32UTF8"),
# This prevents Unicode from getting mangled by getting encoded into the
# potentially non-Unicode database character set.
("ORA_NCHAR_LITERAL_REPLACE", "TRUE"),
]
)
try:
import cx_Oracle as Database
except ImportError as e:
raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e)
# Some of these import cx_Oracle, so import them after checking if it's installed.
from .client import DatabaseClient # NOQA
from .creation import DatabaseCreation # NOQA
from .features import DatabaseFeatures # NOQA
from .introspection import DatabaseIntrospection # NOQA
from .operations import DatabaseOperations # NOQA
from .schema import DatabaseSchemaEditor # NOQA
from .utils import Oracle_datetime, dsn # NOQA
from .validation import DatabaseValidation # NOQA
@contextmanager
def wrap_oracle_errors():
try:
yield
except Database.DatabaseError as e:
# cx_Oracle raises a cx_Oracle.DatabaseError exception with the
# following attributes and values:
# code = 2091
# message = 'ORA-02091: transaction rolled back
# 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS
# _C00102056) violated - parent key not found'
# or:
# 'ORA-00001: unique constraint (DJANGOTEST.DEFERRABLE_
# PINK_CONSTRAINT) violated
# Convert that case to Django's IntegrityError exception.
x = e.args[0]
if (
hasattr(x, "code")
and hasattr(x, "message")
and x.code == 2091
and ("ORA-02291" in x.message or "ORA-00001" in x.message)
):
raise IntegrityError(*tuple(e.args))
raise
class _UninitializedOperatorsDescriptor:
def __get__(self, instance, cls=None):
# If connection.operators is looked up before a connection has been
# created, transparently initialize connection.operators to avert an
# AttributeError.
if instance is None:
raise AttributeError("operators not available as class attribute")
# Creating a cursor will initialize the operators.
instance.cursor().close()
return instance.__dict__["operators"]
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = "oracle"
display_name = "Oracle"
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
"AutoField": "NUMBER(11) GENERATED BY DEFAULT ON NULL AS IDENTITY",
"BigAutoField": "NUMBER(19) GENERATED BY DEFAULT ON NULL AS IDENTITY",
"BinaryField": "BLOB",
"BooleanField": "NUMBER(1)",
"CharField": "NVARCHAR2(%(max_length)s)",
"DateField": "DATE",
"DateTimeField": "TIMESTAMP",
"DecimalField": "NUMBER(%(max_digits)s, %(decimal_places)s)",
"DurationField": "INTERVAL DAY(9) TO SECOND(6)",
"FileField": "NVARCHAR2(%(max_length)s)",
"FilePathField": "NVARCHAR2(%(max_length)s)",
"FloatField": "DOUBLE PRECISION",
"IntegerField": "NUMBER(11)",
"JSONField": "NCLOB",
"BigIntegerField": "NUMBER(19)",
"IPAddressField": "VARCHAR2(15)",
"GenericIPAddressField": "VARCHAR2(39)",
"OneToOneField": "NUMBER(11)",
"PositiveBigIntegerField": "NUMBER(19)",
"PositiveIntegerField": "NUMBER(11)",
"PositiveSmallIntegerField": "NUMBER(11)",
"SlugField": "NVARCHAR2(%(max_length)s)",
"SmallAutoField": "NUMBER(5) GENERATED BY DEFAULT ON NULL AS IDENTITY",
"SmallIntegerField": "NUMBER(11)",
"TextField": "NCLOB",
"TimeField": "TIMESTAMP",
"URLField": "VARCHAR2(%(max_length)s)",
"UUIDField": "VARCHAR2(32)",
}
data_type_check_constraints = {
"BooleanField": "%(qn_column)s IN (0,1)",
"JSONField": "%(qn_column)s IS JSON",
"PositiveBigIntegerField": "%(qn_column)s >= 0",
"PositiveIntegerField": "%(qn_column)s >= 0",
"PositiveSmallIntegerField": "%(qn_column)s >= 0",
}
# Oracle doesn't support a database index on these columns.
_limited_data_types = ("clob", "nclob", "blob")
operators = _UninitializedOperatorsDescriptor()
_standard_operators = {
"exact": "= %s",
"iexact": "= UPPER(%s)",
"contains": (
"LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)"
),
"icontains": (
"LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) "
"ESCAPE TRANSLATE('\\' USING NCHAR_CS)"
),
"gt": "> %s",
"gte": ">= %s",
"lt": "< %s",
"lte": "<= %s",
"startswith": (
"LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)"
),
"endswith": (
"LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)"
),
"istartswith": (
"LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) "
"ESCAPE TRANSLATE('\\' USING NCHAR_CS)"
),
"iendswith": (
"LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) "
"ESCAPE TRANSLATE('\\' USING NCHAR_CS)"
),
}
_likec_operators = {
**_standard_operators,
"contains": "LIKEC %s ESCAPE '\\'",
"icontains": "LIKEC UPPER(%s) ESCAPE '\\'",
"startswith": "LIKEC %s ESCAPE '\\'",
"endswith": "LIKEC %s ESCAPE '\\'",
"istartswith": "LIKEC UPPER(%s) ESCAPE '\\'",
"iendswith": "LIKEC UPPER(%s) ESCAPE '\\'",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, %, _)
# should be escaped on the database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
_pattern_ops = {
"contains": "'%%' || {} || '%%'",
"icontains": "'%%' || UPPER({}) || '%%'",
"startswith": "{} || '%%'",
"istartswith": "UPPER({}) || '%%'",
"endswith": "'%%' || {}",
"iendswith": "'%%' || UPPER({})",
}
_standard_pattern_ops = {
k: "LIKE TRANSLATE( " + v + " USING NCHAR_CS)"
" ESCAPE TRANSLATE('\\' USING NCHAR_CS)"
for k, v in _pattern_ops.items()
}
_likec_pattern_ops = {
k: "LIKEC " + v + " ESCAPE '\\'" for k, v in _pattern_ops.items()
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
validation_class = DatabaseValidation
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
use_returning_into = self.settings_dict["OPTIONS"].get(
"use_returning_into", True
)
self.features.can_return_columns_from_insert = use_returning_into
def get_database_version(self):
return self.oracle_version
def get_connection_params(self):
conn_params = self.settings_dict["OPTIONS"].copy()
if "use_returning_into" in conn_params:
del conn_params["use_returning_into"]
return conn_params
@async_unsafe
def get_new_connection(self, conn_params):
return Database.connect(
user=self.settings_dict["USER"],
password=self.settings_dict["PASSWORD"],
dsn=dsn(self.settings_dict),
**conn_params,
)
def init_connection_state(self):
super().init_connection_state()
cursor = self.create_cursor()
# Set the territory first. The territory overrides NLS_DATE_FORMAT
# and NLS_TIMESTAMP_FORMAT to the territory default. When all of
# these are set in single statement it isn't clear what is supposed
# to happen.
cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'")
# Set Oracle date to ANSI date format. This only needs to execute
# once when we create a new connection. We also set the Territory
# to 'AMERICA' which forces Sunday to evaluate to a '1' in
# TO_CHAR().
cursor.execute(
"ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'"
" NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'"
+ (" TIME_ZONE = 'UTC'" if settings.USE_TZ else "")
)
cursor.close()
if "operators" not in self.__dict__:
# Ticket #14149: Check whether our LIKE implementation will
# work for this connection or we need to fall back on LIKEC.
# This check is performed only once per DatabaseWrapper
# instance per thread, since subsequent connections will use
# the same settings.
cursor = self.create_cursor()
try:
cursor.execute(
"SELECT 1 FROM DUAL WHERE DUMMY %s"
% self._standard_operators["contains"],
["X"],
)
except Database.DatabaseError:
self.operators = self._likec_operators
self.pattern_ops = self._likec_pattern_ops
else:
self.operators = self._standard_operators
self.pattern_ops = self._standard_pattern_ops
cursor.close()
self.connection.stmtcachesize = 20
# Ensure all changes are preserved even when AUTOCOMMIT is False.
if not self.get_autocommit():
self.commit()
@async_unsafe
def create_cursor(self, name=None):
return FormatStylePlaceholderCursor(self.connection)
def _commit(self):
if self.connection is not None:
with debug_transaction(self, "COMMIT"), wrap_oracle_errors():
return self.connection.commit()
# Oracle doesn't support releasing savepoints. But we fake them when query
# logging is enabled to keep query counts consistent with other backends.
def _savepoint_commit(self, sid):
if self.queries_logged:
self.queries_log.append(
{
"sql": "-- RELEASE SAVEPOINT %s (faked)" % self.ops.quote_name(sid),
"time": "0.000",
}
)
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
Check constraints by setting them to immediate. Return them to deferred
afterward.
"""
with self.cursor() as cursor:
cursor.execute("SET CONSTRAINTS ALL IMMEDIATE")
cursor.execute("SET CONSTRAINTS ALL DEFERRED")
def is_usable(self):
try:
self.connection.ping()
except Database.Error:
return False
else:
return True
@cached_property
def cx_oracle_version(self):
return tuple(int(x) for x in Database.version.split("."))
@cached_property
def oracle_version(self):
with self.temporary_connection():
return tuple(int(x) for x in self.connection.version.split("."))
class OracleParam:
"""
Wrapper object for formatting parameters for Oracle. If the string
representation of the value is large enough (greater than 4000 characters)
the input size needs to be set as CLOB. Alternatively, if the parameter
has an `input_size` attribute, then the value of the `input_size` attribute
will be used instead. Otherwise, no input size will be set for the
parameter when executing the query.
"""
def __init__(self, param, cursor, strings_only=False):
# With raw SQL queries, datetimes can reach this function
# without being converted by DateTimeField.get_db_prep_value.
if settings.USE_TZ and (
isinstance(param, datetime.datetime)
and not isinstance(param, Oracle_datetime)
):
param = Oracle_datetime.from_datetime(param)
string_size = 0
# Oracle doesn't recognize True and False correctly.
if param is True:
param = 1
elif param is False:
param = 0
if hasattr(param, "bind_parameter"):
self.force_bytes = param.bind_parameter(cursor)
elif isinstance(param, (Database.Binary, datetime.timedelta)):
self.force_bytes = param
else:
# To transmit to the database, we need Unicode if supported
# To get size right, we must consider bytes.
self.force_bytes = force_str(param, cursor.charset, strings_only)
if isinstance(self.force_bytes, str):
# We could optimize by only converting up to 4000 bytes here
string_size = len(force_bytes(param, cursor.charset, strings_only))
if hasattr(param, "input_size"):
# If parameter has `input_size` attribute, use that.
self.input_size = param.input_size
elif string_size > 4000:
# Mark any string param greater than 4000 characters as a CLOB.
self.input_size = Database.CLOB
elif isinstance(param, datetime.datetime):
self.input_size = Database.TIMESTAMP
else:
self.input_size = None
class VariableWrapper:
"""
An adapter class for cursor variables that prevents the wrapped object
from being converted into a string when used to instantiate an OracleParam.
This can be used generally for any other object that should be passed into
Cursor.execute as-is.
"""
def __init__(self, var):
self.var = var
def bind_parameter(self, cursor):
return self.var
def __getattr__(self, key):
return getattr(self.var, key)
def __setattr__(self, key, value):
if key == "var":
self.__dict__[key] = value
else:
setattr(self.var, key, value)
class FormatStylePlaceholderCursor:
"""
Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var"
style. This fixes it -- but note that if you want to use a literal "%s" in
a query, you'll need to use "%%s".
"""
charset = "utf-8"
def __init__(self, connection):
self.cursor = connection.cursor()
self.cursor.outputtypehandler = self._output_type_handler
@staticmethod
def _output_number_converter(value):
return decimal.Decimal(value) if "." in value else int(value)
@staticmethod
def _get_decimal_converter(precision, scale):
if scale == 0:
return int
context = decimal.Context(prec=precision)
quantize_value = decimal.Decimal(1).scaleb(-scale)
return lambda v: decimal.Decimal(v).quantize(quantize_value, context=context)
@staticmethod
def _output_type_handler(cursor, name, defaultType, length, precision, scale):
"""
Called for each db column fetched from cursors. Return numbers as the
appropriate Python type.
"""
if defaultType == Database.NUMBER:
if scale == -127:
if precision == 0:
# NUMBER column: decimal-precision floating point.
# This will normally be an integer from a sequence,
# but it could be a decimal value.
outconverter = FormatStylePlaceholderCursor._output_number_converter
else:
# FLOAT column: binary-precision floating point.
# This comes from FloatField columns.
outconverter = float
elif precision > 0:
# NUMBER(p,s) column: decimal-precision fixed point.
# This comes from IntegerField and DecimalField columns.
outconverter = FormatStylePlaceholderCursor._get_decimal_converter(
precision, scale
)
else:
# No type information. This normally comes from a
# mathematical expression in the SELECT list. Guess int
# or Decimal based on whether it has a decimal point.
outconverter = FormatStylePlaceholderCursor._output_number_converter
return cursor.var(
Database.STRING,
size=255,
arraysize=cursor.arraysize,
outconverter=outconverter,
)
def _format_params(self, params):
try:
return {k: OracleParam(v, self, True) for k, v in params.items()}
except AttributeError:
return tuple(OracleParam(p, self, True) for p in params)
def _guess_input_sizes(self, params_list):
# Try dict handling; if that fails, treat as sequence
if hasattr(params_list[0], "keys"):
sizes = {}
for params in params_list:
for k, value in params.items():
if value.input_size:
sizes[k] = value.input_size
if sizes:
self.setinputsizes(**sizes)
else:
# It's not a list of dicts; it's a list of sequences
sizes = [None] * len(params_list[0])
for params in params_list:
for i, value in enumerate(params):
if value.input_size:
sizes[i] = value.input_size
if sizes:
self.setinputsizes(*sizes)
def _param_generator(self, params):
# Try dict handling; if that fails, treat as sequence
if hasattr(params, "items"):
return {k: v.force_bytes for k, v in params.items()}
else:
return [p.force_bytes for p in params]
def _fix_for_params(self, query, params, unify_by_values=False):
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(";") or query.endswith("/"):
query = query[:-1]
if params is None:
params = []
elif hasattr(params, "keys"):
# Handle params as dict
args = {k: ":%s" % k for k in params}
query %= args
elif unify_by_values and params:
# Handle params as a dict with unified query parameters by their
# values. It can be used only in single query execute() because
# executemany() shares the formatted query with each of the params
# list. e.g. for input params = [0.75, 2, 0.75, 'sth', 0.75]
# params_dict = {0.75: ':arg0', 2: ':arg1', 'sth': ':arg2'}
# args = [':arg0', ':arg1', ':arg0', ':arg2', ':arg0']
# params = {':arg0': 0.75, ':arg1': 2, ':arg2': 'sth'}
params_dict = {
param: ":arg%d" % i for i, param in enumerate(dict.fromkeys(params))
}
args = [params_dict[param] for param in params]
params = {value: key for key, value in params_dict.items()}
query %= tuple(args)
else:
# Handle params as sequence
args = [(":arg%d" % i) for i in range(len(params))]
query %= tuple(args)
return query, self._format_params(params)
def execute(self, query, params=None):
query, params = self._fix_for_params(query, params, unify_by_values=True)
self._guess_input_sizes([params])
with wrap_oracle_errors():
return self.cursor.execute(query, self._param_generator(params))
def executemany(self, query, params=None):
if not params:
# No params given, nothing to do
return None
# uniform treatment for sequences and iterables
params_iter = iter(params)
query, firstparams = self._fix_for_params(query, next(params_iter))
# we build a list of formatted params; as we're going to traverse it
# more than once, we can't make it lazy by using a generator
formatted = [firstparams] + [self._format_params(p) for p in params_iter]
self._guess_input_sizes(formatted)
with wrap_oracle_errors():
return self.cursor.executemany(
query, [self._param_generator(p) for p in formatted]
)
def close(self):
try:
self.cursor.close()
except Database.InterfaceError:
# already closed
pass
def var(self, *args):
return VariableWrapper(self.cursor.var(*args))
def arrayvar(self, *args):
return VariableWrapper(self.cursor.arrayvar(*args))
def __getattr__(self, attr):
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
|
19204042d801a90816c415172150b74df071df1ede94a444d299e7bcbda10e99 | import _thread
import copy
import datetime
import logging
import threading
import time
import warnings
from collections import deque
from contextlib import contextmanager
from django.db.backends.utils import debug_transaction
try:
import zoneinfo
except ImportError:
from backports import zoneinfo
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS, DatabaseError, NotSupportedError
from django.db.backends import utils
from django.db.backends.base.validation import BaseDatabaseValidation
from django.db.backends.signals import connection_created
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseErrorWrapper
from django.utils.asyncio import async_unsafe
from django.utils.functional import cached_property
NO_DB_ALIAS = "__no_db__"
RAN_DB_VERSION_CHECK = set()
logger = logging.getLogger("django.db.backends.base")
# RemovedInDjango50Warning
def timezone_constructor(tzname):
if settings.USE_DEPRECATED_PYTZ:
import pytz
return pytz.timezone(tzname)
return zoneinfo.ZoneInfo(tzname)
class BaseDatabaseWrapper:
"""Represent a database connection."""
# Mapping of Field objects to their column types.
data_types = {}
# Mapping of Field objects to their SQL suffix such as AUTOINCREMENT.
data_types_suffix = {}
# Mapping of Field objects to their SQL for CHECK constraints.
data_type_check_constraints = {}
ops = None
vendor = "unknown"
display_name = "unknown"
SchemaEditorClass = None
# Classes instantiated in __init__().
client_class = None
creation_class = None
features_class = None
introspection_class = None
ops_class = None
validation_class = BaseDatabaseValidation
queries_limit = 9000
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS):
# Connection related attributes.
# The underlying database connection.
self.connection = None
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.settings_dict = settings_dict
self.alias = alias
# Query logging in debug mode or when explicitly enabled.
self.queries_log = deque(maxlen=self.queries_limit)
self.force_debug_cursor = False
# Transaction related attributes.
# Tracks if the connection is in autocommit mode. Per PEP 249, by
# default, it isn't.
self.autocommit = False
# Tracks if the connection is in a transaction managed by 'atomic'.
self.in_atomic_block = False
# Increment to generate unique savepoint ids.
self.savepoint_state = 0
# List of savepoints created by 'atomic'.
self.savepoint_ids = []
# Stack of active 'atomic' blocks.
self.atomic_blocks = []
# Tracks if the outermost 'atomic' block should commit on exit,
# ie. if autocommit was active on entry.
self.commit_on_exit = True
# Tracks if the transaction should be rolled back to the next
# available savepoint because of an exception in an inner block.
self.needs_rollback = False
self.rollback_exc = None
# Connection termination related attributes.
self.close_at = None
self.closed_in_transaction = False
self.errors_occurred = False
self.health_check_enabled = False
self.health_check_done = False
# Thread-safety related attributes.
self._thread_sharing_lock = threading.Lock()
self._thread_sharing_count = 0
self._thread_ident = _thread.get_ident()
# A list of no-argument functions to run when the transaction commits.
# Each entry is an (sids, func, robust) tuple, where sids is a set of
# the active savepoint IDs when this function was registered and robust
# specifies whether it's allowed for the function to fail.
self.run_on_commit = []
# Should we run the on-commit hooks the next time set_autocommit(True)
# is called?
self.run_commit_hooks_on_set_autocommit_on = False
# A stack of wrappers to be invoked around execute()/executemany()
# calls. Each entry is a function taking five arguments: execute, sql,
# params, many, and context. It's the function's responsibility to
# call execute(sql, params, many, context).
self.execute_wrappers = []
self.client = self.client_class(self)
self.creation = self.creation_class(self)
self.features = self.features_class(self)
self.introspection = self.introspection_class(self)
self.ops = self.ops_class(self)
self.validation = self.validation_class(self)
def __repr__(self):
return (
f"<{self.__class__.__qualname__} "
f"vendor={self.vendor!r} alias={self.alias!r}>"
)
def ensure_timezone(self):
"""
Ensure the connection's timezone is set to `self.timezone_name` and
return whether it changed or not.
"""
return False
@cached_property
def timezone(self):
"""
Return a tzinfo of the database connection time zone.
This is only used when time zone support is enabled. When a datetime is
read from the database, it is always returned in this time zone.
When the database backend supports time zones, it doesn't matter which
time zone Django uses, as long as aware datetimes are used everywhere.
Other users connecting to the database can choose their own time zone.
When the database backend doesn't support time zones, the time zone
Django uses may be constrained by the requirements of other users of
the database.
"""
if not settings.USE_TZ:
return None
elif self.settings_dict["TIME_ZONE"] is None:
return datetime.timezone.utc
else:
return timezone_constructor(self.settings_dict["TIME_ZONE"])
@cached_property
def timezone_name(self):
"""
Name of the time zone of the database connection.
"""
if not settings.USE_TZ:
return settings.TIME_ZONE
elif self.settings_dict["TIME_ZONE"] is None:
return "UTC"
else:
return self.settings_dict["TIME_ZONE"]
@property
def queries_logged(self):
return self.force_debug_cursor or settings.DEBUG
@property
def queries(self):
if len(self.queries_log) == self.queries_log.maxlen:
warnings.warn(
"Limit for query logging exceeded, only the last {} queries "
"will be returned.".format(self.queries_log.maxlen)
)
return list(self.queries_log)
def get_database_version(self):
"""Return a tuple of the database's version."""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require a get_database_version() "
"method."
)
def check_database_version_supported(self):
"""
Raise an error if the database version isn't supported by this
version of Django.
"""
if (
self.features.minimum_database_version is not None
and self.get_database_version() < self.features.minimum_database_version
):
db_version = ".".join(map(str, self.get_database_version()))
min_db_version = ".".join(map(str, self.features.minimum_database_version))
raise NotSupportedError(
f"{self.display_name} {min_db_version} or later is required "
f"(found {db_version})."
)
# ##### Backend-specific methods for creating connections and cursors #####
def get_connection_params(self):
"""Return a dict of parameters suitable for get_new_connection."""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require a get_connection_params() "
"method"
)
def get_new_connection(self, conn_params):
"""Open a connection to the database."""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require a get_new_connection() "
"method"
)
def init_connection_state(self):
"""Initialize the database connection settings."""
global RAN_DB_VERSION_CHECK
if self.alias not in RAN_DB_VERSION_CHECK:
self.check_database_version_supported()
RAN_DB_VERSION_CHECK.add(self.alias)
def create_cursor(self, name=None):
"""Create a cursor. Assume that a connection is established."""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require a create_cursor() method"
)
# ##### Backend-specific methods for creating connections #####
@async_unsafe
def connect(self):
"""Connect to the database. Assume that the connection is closed."""
# Check for invalid configurations.
self.check_settings()
# In case the previous connection was closed while in an atomic block
self.in_atomic_block = False
self.savepoint_ids = []
self.atomic_blocks = []
self.needs_rollback = False
# Reset parameters defining when to close/health-check the connection.
self.health_check_enabled = self.settings_dict["CONN_HEALTH_CHECKS"]
max_age = self.settings_dict["CONN_MAX_AGE"]
self.close_at = None if max_age is None else time.monotonic() + max_age
self.closed_in_transaction = False
self.errors_occurred = False
# New connections are healthy.
self.health_check_done = True
# Establish the connection
conn_params = self.get_connection_params()
self.connection = self.get_new_connection(conn_params)
self.set_autocommit(self.settings_dict["AUTOCOMMIT"])
self.init_connection_state()
connection_created.send(sender=self.__class__, connection=self)
self.run_on_commit = []
def check_settings(self):
if self.settings_dict["TIME_ZONE"] is not None and not settings.USE_TZ:
raise ImproperlyConfigured(
"Connection '%s' cannot set TIME_ZONE because USE_TZ is False."
% self.alias
)
@async_unsafe
def ensure_connection(self):
"""Guarantee that a connection to the database is established."""
if self.connection is None:
with self.wrap_database_errors:
self.connect()
# ##### Backend-specific wrappers for PEP-249 connection methods #####
def _prepare_cursor(self, cursor):
"""
Validate the connection is usable and perform database cursor wrapping.
"""
self.validate_thread_sharing()
if self.queries_logged:
wrapped_cursor = self.make_debug_cursor(cursor)
else:
wrapped_cursor = self.make_cursor(cursor)
return wrapped_cursor
def _cursor(self, name=None):
self.close_if_health_check_failed()
self.ensure_connection()
with self.wrap_database_errors:
return self._prepare_cursor(self.create_cursor(name))
def _commit(self):
if self.connection is not None:
with debug_transaction(self, "COMMIT"), self.wrap_database_errors:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
with debug_transaction(self, "ROLLBACK"), self.wrap_database_errors:
return self.connection.rollback()
def _close(self):
if self.connection is not None:
with self.wrap_database_errors:
return self.connection.close()
# ##### Generic wrappers for PEP-249 connection methods #####
@async_unsafe
def cursor(self):
"""Create a cursor, opening a connection if necessary."""
return self._cursor()
@async_unsafe
def commit(self):
"""Commit a transaction and reset the dirty flag."""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._commit()
# A successful commit means that the database connection works.
self.errors_occurred = False
self.run_commit_hooks_on_set_autocommit_on = True
@async_unsafe
def rollback(self):
"""Roll back a transaction and reset the dirty flag."""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._rollback()
# A successful rollback means that the database connection works.
self.errors_occurred = False
self.needs_rollback = False
self.run_on_commit = []
@async_unsafe
def close(self):
"""Close the connection to the database."""
self.validate_thread_sharing()
self.run_on_commit = []
# Don't call validate_no_atomic_block() to avoid making it difficult
# to get rid of a connection in an invalid state. The next connect()
# will reset the transaction state anyway.
if self.closed_in_transaction or self.connection is None:
return
try:
self._close()
finally:
if self.in_atomic_block:
self.closed_in_transaction = True
self.needs_rollback = True
else:
self.connection = None
# ##### Backend-specific savepoint management methods #####
def _savepoint(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
with self.cursor() as cursor:
cursor.execute(self.ops.savepoint_commit_sql(sid))
def _savepoint_allowed(self):
# Savepoints cannot be created outside a transaction
return self.features.uses_savepoints and not self.get_autocommit()
# ##### Generic savepoint management methods #####
@async_unsafe
def savepoint(self):
"""
Create a savepoint inside the current transaction. Return an
identifier for the savepoint that will be used for the subsequent
rollback or commit. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
thread_ident = _thread.get_ident()
tid = str(thread_ident).replace("-", "")
self.savepoint_state += 1
sid = "s%s_x%d" % (tid, self.savepoint_state)
self.validate_thread_sharing()
self._savepoint(sid)
return sid
@async_unsafe
def savepoint_rollback(self, sid):
"""
Roll back to a savepoint. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_rollback(sid)
# Remove any callbacks registered while this savepoint was active.
self.run_on_commit = [
(sids, func, robust)
for (sids, func, robust) in self.run_on_commit
if sid not in sids
]
@async_unsafe
def savepoint_commit(self, sid):
"""
Release a savepoint. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
self.validate_thread_sharing()
self._savepoint_commit(sid)
@async_unsafe
def clean_savepoints(self):
"""
Reset the counter used to generate unique savepoint ids in this thread.
"""
self.savepoint_state = 0
# ##### Backend-specific transaction management methods #####
def _set_autocommit(self, autocommit):
"""
Backend-specific implementation to enable or disable autocommit.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require a _set_autocommit() method"
)
# ##### Generic transaction management methods #####
def get_autocommit(self):
"""Get the autocommit state."""
self.ensure_connection()
return self.autocommit
def set_autocommit(
self, autocommit, force_begin_transaction_with_broken_autocommit=False
):
"""
Enable or disable autocommit.
The usual way to start a transaction is to turn autocommit off.
SQLite does not properly start a transaction when disabling
autocommit. To avoid this buggy behavior and to actually enter a new
transaction, an explicit BEGIN is required. Using
force_begin_transaction_with_broken_autocommit=True will issue an
explicit BEGIN with SQLite. This option will be ignored for other
backends.
"""
self.validate_no_atomic_block()
self.close_if_health_check_failed()
self.ensure_connection()
start_transaction_under_autocommit = (
force_begin_transaction_with_broken_autocommit
and not autocommit
and hasattr(self, "_start_transaction_under_autocommit")
)
if start_transaction_under_autocommit:
self._start_transaction_under_autocommit()
elif autocommit:
self._set_autocommit(autocommit)
else:
with debug_transaction(self, "BEGIN"):
self._set_autocommit(autocommit)
self.autocommit = autocommit
if autocommit and self.run_commit_hooks_on_set_autocommit_on:
self.run_and_clear_commit_hooks()
self.run_commit_hooks_on_set_autocommit_on = False
def get_rollback(self):
"""Get the "needs rollback" flag -- for *advanced use* only."""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block."
)
return self.needs_rollback
def set_rollback(self, rollback):
"""
Set or unset the "needs rollback" flag -- for *advanced use* only.
"""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block."
)
self.needs_rollback = rollback
def validate_no_atomic_block(self):
"""Raise an error if an atomic block is active."""
if self.in_atomic_block:
raise TransactionManagementError(
"This is forbidden when an 'atomic' block is active."
)
def validate_no_broken_transaction(self):
if self.needs_rollback:
raise TransactionManagementError(
"An error occurred in the current transaction. You can't "
"execute queries until the end of the 'atomic' block."
) from self.rollback_exc
# ##### Foreign key constraints checks handling #####
@contextmanager
def constraint_checks_disabled(self):
"""
Disable foreign key constraint checking.
"""
disabled = self.disable_constraint_checking()
try:
yield
finally:
if disabled:
self.enable_constraint_checking()
def disable_constraint_checking(self):
"""
Backends can implement as needed to temporarily disable foreign key
constraint checking. Should return True if the constraints were
disabled and will need to be reenabled.
"""
return False
def enable_constraint_checking(self):
"""
Backends can implement as needed to re-enable foreign key constraint
checking.
"""
pass
def check_constraints(self, table_names=None):
"""
Backends can override this method if they can apply constraint
checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE"). Should raise an
IntegrityError if any invalid foreign key references are encountered.
"""
pass
# ##### Connection termination handling #####
def is_usable(self):
"""
Test if the database connection is usable.
This method may assume that self.connection is not None.
Actual implementations should take care not to raise exceptions
as that may prevent Django from recycling unusable connections.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseWrapper may require an is_usable() method"
)
def close_if_health_check_failed(self):
"""Close existing connection if it fails a health check."""
if (
self.connection is None
or not self.health_check_enabled
or self.health_check_done
):
return
if not self.is_usable():
self.close()
self.health_check_done = True
def close_if_unusable_or_obsolete(self):
"""
Close the current connection if unrecoverable errors have occurred
or if it outlived its maximum age.
"""
if self.connection is not None:
self.health_check_done = False
# If the application didn't restore the original autocommit setting,
# don't take chances, drop the connection.
if self.get_autocommit() != self.settings_dict["AUTOCOMMIT"]:
self.close()
return
# If an exception other than DataError or IntegrityError occurred
# since the last commit / rollback, check if the connection works.
if self.errors_occurred:
if self.is_usable():
self.errors_occurred = False
self.health_check_done = True
else:
self.close()
return
if self.close_at is not None and time.monotonic() >= self.close_at:
self.close()
return
# ##### Thread safety handling #####
@property
def allow_thread_sharing(self):
with self._thread_sharing_lock:
return self._thread_sharing_count > 0
def inc_thread_sharing(self):
with self._thread_sharing_lock:
self._thread_sharing_count += 1
def dec_thread_sharing(self):
with self._thread_sharing_lock:
if self._thread_sharing_count <= 0:
raise RuntimeError(
"Cannot decrement the thread sharing count below zero."
)
self._thread_sharing_count -= 1
def validate_thread_sharing(self):
"""
Validate that the connection isn't accessed by another thread than the
one which originally created it, unless the connection was explicitly
authorized to be shared between threads (via the `inc_thread_sharing()`
method). Raise an exception if the validation fails.
"""
if not (self.allow_thread_sharing or self._thread_ident == _thread.get_ident()):
raise DatabaseError(
"DatabaseWrapper objects created in a "
"thread can only be used in that same thread. The object "
"with alias '%s' was created in thread id %s and this is "
"thread id %s." % (self.alias, self._thread_ident, _thread.get_ident())
)
# ##### Miscellaneous #####
def prepare_database(self):
"""
Hook to do any database check or preparation, generally called before
migrating a project or an app.
"""
pass
@cached_property
def wrap_database_errors(self):
"""
Context manager and decorator that re-throws backend-specific database
exceptions using Django's common wrappers.
"""
return DatabaseErrorWrapper(self)
def chunked_cursor(self):
"""
Return a cursor that tries to avoid caching in the database (if
supported by the database), otherwise return a regular cursor.
"""
return self.cursor()
def make_debug_cursor(self, cursor):
"""Create a cursor that logs all queries in self.queries_log."""
return utils.CursorDebugWrapper(cursor, self)
def make_cursor(self, cursor):
"""Create a cursor without debug logging."""
return utils.CursorWrapper(cursor, self)
@contextmanager
def temporary_connection(self):
"""
Context manager that ensures that a connection is established, and
if it opened one, closes it to avoid leaving a dangling connection.
This is useful for operations outside of the request-response cycle.
Provide a cursor: with self.temporary_connection() as cursor: ...
"""
must_close = self.connection is None
try:
with self.cursor() as cursor:
yield cursor
finally:
if must_close:
self.close()
@contextmanager
def _nodb_cursor(self):
"""
Return a cursor from an alternative connection to be used when there is
no need to access the main database, specifically for test db
creation/deletion. This also prevents the production database from
being exposed to potential child threads while (or after) the test
database is destroyed. Refs #10868, #17786, #16969.
"""
conn = self.__class__({**self.settings_dict, "NAME": None}, alias=NO_DB_ALIAS)
try:
with conn.cursor() as cursor:
yield cursor
finally:
conn.close()
def schema_editor(self, *args, **kwargs):
"""
Return a new instance of this backend's SchemaEditor.
"""
if self.SchemaEditorClass is None:
raise NotImplementedError(
"The SchemaEditorClass attribute of this database wrapper is still None"
)
return self.SchemaEditorClass(self, *args, **kwargs)
def on_commit(self, func, robust=False):
if not callable(func):
raise TypeError("on_commit()'s callback must be a callable.")
if self.in_atomic_block:
# Transaction in progress; save for execution on commit.
self.run_on_commit.append((set(self.savepoint_ids), func, robust))
elif not self.get_autocommit():
raise TransactionManagementError(
"on_commit() cannot be used in manual transaction management"
)
else:
# No transaction in progress and in autocommit mode; execute
# immediately.
if robust:
try:
func()
except Exception as e:
logger.error(
f"Error calling {func.__qualname__} in on_commit() (%s).",
e,
exc_info=True,
)
else:
func()
def run_and_clear_commit_hooks(self):
self.validate_no_atomic_block()
current_run_on_commit = self.run_on_commit
self.run_on_commit = []
while current_run_on_commit:
_, func, robust = current_run_on_commit.pop(0)
if robust:
try:
func()
except Exception as e:
logger.error(
f"Error calling {func.__qualname__} in on_commit() during "
f"transaction (%s).",
e,
exc_info=True,
)
else:
func()
@contextmanager
def execute_wrapper(self, wrapper):
"""
Return a context manager under which the wrapper is applied to suitable
database query executions.
"""
self.execute_wrappers.append(wrapper)
try:
yield
finally:
self.execute_wrappers.pop()
def copy(self, alias=None):
"""
Return a copy of this connection.
For tests that require two connections to the same database.
"""
settings_dict = copy.deepcopy(self.settings_dict)
if alias is None:
alias = self.alias
return type(self)(settings_dict, alias)
|
6d025a227a1659a74fe444a3114f69bfa47287441ab53c7f30843899a0c9678b | import datetime
import decimal
import json
from importlib import import_module
import sqlparse
from django.conf import settings
from django.db import NotSupportedError, transaction
from django.db.backends import utils
from django.utils import timezone
from django.utils.encoding import force_str
class BaseDatabaseOperations:
"""
Encapsulate backend-specific differences, such as the way a backend
performs ordering or calculates the ID of a recently-inserted row.
"""
compiler_module = "django.db.models.sql.compiler"
# Integer field safe ranges by `internal_type` as documented
# in docs/ref/models/fields.txt.
integer_field_ranges = {
"SmallIntegerField": (-32768, 32767),
"IntegerField": (-2147483648, 2147483647),
"BigIntegerField": (-9223372036854775808, 9223372036854775807),
"PositiveBigIntegerField": (0, 9223372036854775807),
"PositiveSmallIntegerField": (0, 32767),
"PositiveIntegerField": (0, 2147483647),
"SmallAutoField": (-32768, 32767),
"AutoField": (-2147483648, 2147483647),
"BigAutoField": (-9223372036854775808, 9223372036854775807),
}
set_operators = {
"union": "UNION",
"intersection": "INTERSECT",
"difference": "EXCEPT",
}
# Mapping of Field.get_internal_type() (typically the model field's class
# name) to the data type to use for the Cast() function, if different from
# DatabaseWrapper.data_types.
cast_data_types = {}
# CharField data type if the max_length argument isn't provided.
cast_char_field_without_max_length = None
# Start and end points for window expressions.
PRECEDING = "PRECEDING"
FOLLOWING = "FOLLOWING"
UNBOUNDED_PRECEDING = "UNBOUNDED " + PRECEDING
UNBOUNDED_FOLLOWING = "UNBOUNDED " + FOLLOWING
CURRENT_ROW = "CURRENT ROW"
# Prefix for EXPLAIN queries, or None EXPLAIN isn't supported.
explain_prefix = None
def __init__(self, connection):
self.connection = connection
self._cache = None
def autoinc_sql(self, table, column):
"""
Return any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def bulk_batch_size(self, fields, objs):
"""
Return the maximum allowed batch size for the backend. The fields
are the fields going to be inserted in the batch, the objs contains
all the objects to be inserted.
"""
return len(objs)
def format_for_duration_arithmetic(self, sql):
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a "
"format_for_duration_arithmetic() method."
)
def cache_key_culling_sql(self):
"""
Return an SQL query that retrieves the first cache key greater than the
n smallest.
This is used by the 'db' cache backend to determine where to start
culling.
"""
cache_key = self.quote_name("cache_key")
return f"SELECT {cache_key} FROM %s ORDER BY {cache_key} LIMIT 1 OFFSET %%s"
def unification_cast_sql(self, output_field):
"""
Given a field instance, return the SQL that casts the result of a union
to that type. The resulting string should contain a '%s' placeholder
for the expression being cast.
"""
return "%s"
def date_extract_sql(self, lookup_type, sql, params):
"""
Given a lookup_type of 'year', 'month', or 'day', return the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a date_extract_sql() "
"method"
)
def date_trunc_sql(self, lookup_type, sql, params, tzname=None):
"""
Given a lookup_type of 'year', 'month', or 'day', return the SQL that
truncates the given date or datetime field field_name to a date object
with only the given specificity.
If `tzname` is provided, the given value is truncated in a specific
timezone.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a date_trunc_sql() "
"method."
)
def datetime_cast_date_sql(self, sql, params, tzname):
"""
Return the SQL to cast a datetime value to date value.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a "
"datetime_cast_date_sql() method."
)
def datetime_cast_time_sql(self, sql, params, tzname):
"""
Return the SQL to cast a datetime value to time value.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a "
"datetime_cast_time_sql() method"
)
def datetime_extract_sql(self, lookup_type, sql, params, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or
'second', return the SQL that extracts a value from the given
datetime field field_name.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a datetime_extract_sql() "
"method"
)
def datetime_trunc_sql(self, lookup_type, sql, params, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or
'second', return the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a datetime_trunc_sql() "
"method"
)
def time_trunc_sql(self, lookup_type, sql, params, tzname=None):
"""
Given a lookup_type of 'hour', 'minute' or 'second', return the SQL
that truncates the given time or datetime field field_name to a time
object with only the given specificity.
If `tzname` is provided, the given value is truncated in a specific
timezone.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a time_trunc_sql() method"
)
def time_extract_sql(self, lookup_type, sql, params):
"""
Given a lookup_type of 'hour', 'minute', or 'second', return the SQL
that extracts a value from the given time field field_name.
"""
return self.date_extract_sql(lookup_type, sql, params)
def deferrable_sql(self):
"""
Return the SQL to make a constraint "initially deferred" during a
CREATE TABLE statement.
"""
return ""
def distinct_sql(self, fields, params):
"""
Return an SQL DISTINCT clause which removes duplicate rows from the
result set. If any fields are given, only check the given fields for
duplicates.
"""
if fields:
raise NotSupportedError(
"DISTINCT ON fields is not supported by this database backend"
)
else:
return ["DISTINCT"], []
def fetch_returned_insert_columns(self, cursor, returning_params):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table, return the newly created data.
"""
return cursor.fetchone()
def field_cast_sql(self, db_type, internal_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR') and an internal type
(e.g. 'GenericIPAddressField'), return the SQL to cast it before using
it in a WHERE statement. The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def force_no_ordering(self):
"""
Return a list used in the "ORDER BY" clause to force no ordering at
all. Return an empty list to include nothing in the ordering.
"""
return []
def for_update_sql(self, nowait=False, skip_locked=False, of=(), no_key=False):
"""
Return the FOR UPDATE SQL clause to lock rows for an update operation.
"""
return "FOR%s UPDATE%s%s%s" % (
" NO KEY" if no_key else "",
" OF %s" % ", ".join(of) if of else "",
" NOWAIT" if nowait else "",
" SKIP LOCKED" if skip_locked else "",
)
def _get_limit_offset_params(self, low_mark, high_mark):
offset = low_mark or 0
if high_mark is not None:
return (high_mark - offset), offset
elif offset:
return self.connection.ops.no_limit_value(), offset
return None, offset
def limit_offset_sql(self, low_mark, high_mark):
"""Return LIMIT/OFFSET SQL clause."""
limit, offset = self._get_limit_offset_params(low_mark, high_mark)
return " ".join(
sql
for sql in (
("LIMIT %d" % limit) if limit else None,
("OFFSET %d" % offset) if offset else None,
)
if sql
)
def last_executed_query(self, cursor, sql, params):
"""
Return a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
# Convert params to contain string values.
def to_string(s):
return force_str(s, strings_only=True, errors="replace")
if isinstance(params, (list, tuple)):
u_params = tuple(to_string(val) for val in params)
elif params is None:
u_params = ()
else:
u_params = {to_string(k): to_string(v) for k, v in params.items()}
return "QUERY = %r - PARAMS = %r" % (sql, u_params)
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, return the newly created ID.
`pk_name` is the name of the primary-key column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type, internal_type=None):
"""
Return the string to use in a query when performing lookups
("contains", "like", etc.). It should contain a '%s' placeholder for
the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Return the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Return the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Return the value to use for the LIMIT when we are wanting "LIMIT
infinity". Return None if the limit clause can be omitted in this case.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a no_limit_value() method"
)
def pk_default_value(self):
"""
Return the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return "DEFAULT"
def prepare_sql_script(self, sql):
"""
Take an SQL script that may contain multiple lines and return a list
of statements to feed to successive cursor.execute() calls.
Since few databases are able to process raw SQL scripts in a single
cursor.execute() call and PEP 249 doesn't talk about this use case,
the default implementation is conservative.
"""
return [
sqlparse.format(statement, strip_comments=True)
for statement in sqlparse.split(sql)
if statement
]
def process_clob(self, value):
"""
Return the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_columns(self, fields):
"""
For backends that support returning columns as part of an insert query,
return the SQL and params to append to the INSERT query. The returned
fragment should contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Return the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Return a quoted version of the given table, index, or column name. Do
not quote the given name if it's already been quoted.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a quote_name() method"
)
def regex_lookup(self, lookup_type):
"""
Return the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). It should contain a '%s'
placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), raise
NotImplementedError.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseOperations may require a regex_lookup() method"
)
def savepoint_create_sql(self, sid):
"""
Return the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVEPOINT %s" % self.quote_name(sid)
def savepoint_commit_sql(self, sid):
"""
Return the SQL for committing the given savepoint.
"""
return "RELEASE SAVEPOINT %s" % self.quote_name(sid)
def savepoint_rollback_sql(self, sid):
"""
Return the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid)
def set_time_zone_sql(self):
"""
Return the SQL that will set the connection's time zone.
Return '' if the backend doesn't support time zones.
"""
return ""
def sql_flush(self, style, tables, *, reset_sequences=False, allow_cascade=False):
"""
Return a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
If `reset_sequences` is True, the list includes SQL statements required
to reset the sequences.
The `allow_cascade` argument determines whether truncation may cascade
to tables with foreign keys pointing the tables being truncated.
PostgreSQL requires a cascade even if these tables are empty.
"""
raise NotImplementedError(
"subclasses of BaseDatabaseOperations must provide an sql_flush() method"
)
def execute_sql_flush(self, sql_list):
"""Execute a list of SQL statements to flush the database."""
with transaction.atomic(
using=self.connection.alias,
savepoint=self.connection.features.can_rollback_ddl,
):
with self.connection.cursor() as cursor:
for sql in sql_list:
cursor.execute(sql)
def sequence_reset_by_name_sql(self, style, sequences):
"""
Return a list of the SQL statements required to reset sequences
passed in `sequences`.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return []
def sequence_reset_sql(self, style, model_list):
"""
Return a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""Return the SQL statement required to start a transaction."""
return "BEGIN;"
def end_transaction_sql(self, success=True):
"""Return the SQL statement required to end a transaction."""
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Return the SQL that will be used in a query to define the tablespace.
Return '' if the backend doesn't support tablespaces.
If `inline` is True, append the SQL to a row; otherwise append it to
the entire CREATE TABLE or CREATE INDEX statement.
"""
return ""
def prep_for_like_query(self, x):
"""Prepare a value for use in a LIKE query."""
return str(x).replace("\\", "\\\\").replace("%", r"\%").replace("_", r"\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def validate_autopk_value(self, value):
"""
Certain backends do not accept some values for "serial" fields
(for example zero in MySQL). Raise a ValueError if the value is
invalid, otherwise return the validated value.
"""
return value
def adapt_unknown_value(self, value):
"""
Transform a value to something compatible with the backend driver.
This method only depends on the type of the value. It's designed for
cases where the target type isn't known, such as .raw() SQL queries.
As a consequence it may not work perfectly in all circumstances.
"""
if isinstance(value, datetime.datetime): # must be before date
return self.adapt_datetimefield_value(value)
elif isinstance(value, datetime.date):
return self.adapt_datefield_value(value)
elif isinstance(value, datetime.time):
return self.adapt_timefield_value(value)
elif isinstance(value, decimal.Decimal):
return self.adapt_decimalfield_value(value)
else:
return value
def adapt_datefield_value(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return str(value)
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, "resolve_expression"):
return value
return str(value)
def adapt_timefield_value(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, "resolve_expression"):
return value
if timezone.is_aware(value):
raise ValueError("Django does not support timezone-aware times.")
return str(value)
def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
return utils.format_number(value, max_digits, decimal_places)
def adapt_ipaddressfield_value(self, value):
"""
Transform a string representation of an IP address into the expected
type for the backend driver.
"""
return value or None
def adapt_json_value(self, value, encoder):
return json.dumps(value, cls=encoder)
def year_lookup_bounds_for_date_field(self, value, iso_year=False):
"""
Return a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year
lookup.
`value` is an int, containing the looked-up year.
If `iso_year` is True, return bounds for ISO-8601 week-numbering years.
"""
if iso_year:
first = datetime.date.fromisocalendar(value, 1, 1)
second = datetime.date.fromisocalendar(
value + 1, 1, 1
) - datetime.timedelta(days=1)
else:
first = datetime.date(value, 1, 1)
second = datetime.date(value, 12, 31)
first = self.adapt_datefield_value(first)
second = self.adapt_datefield_value(second)
return [first, second]
def year_lookup_bounds_for_datetime_field(self, value, iso_year=False):
"""
Return a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateTimeField value using a year
lookup.
`value` is an int, containing the looked-up year.
If `iso_year` is True, return bounds for ISO-8601 week-numbering years.
"""
if iso_year:
first = datetime.datetime.fromisocalendar(value, 1, 1)
second = datetime.datetime.fromisocalendar(
value + 1, 1, 1
) - datetime.timedelta(microseconds=1)
else:
first = datetime.datetime(value, 1, 1)
second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999)
if settings.USE_TZ:
tz = timezone.get_current_timezone()
first = timezone.make_aware(first, tz)
second = timezone.make_aware(second, tz)
first = self.adapt_datetimefield_value(first)
second = self.adapt_datetimefield_value(second)
return [first, second]
def get_db_converters(self, expression):
"""
Return a list of functions needed to convert field data.
Some field types on some backends do not provide data in the correct
format, this is the hook for converter functions.
"""
return []
def convert_durationfield_value(self, value, expression, connection):
if value is not None:
return datetime.timedelta(0, 0, value)
def check_expression_support(self, expression):
"""
Check that the backend supports the provided expression.
This is used on specific backends to rule out known expressions
that have problematic or nonexistent implementations. If the
expression has a known problem, the backend should raise
NotSupportedError.
"""
pass
def conditional_expression_supported_in_where_clause(self, expression):
"""
Return True, if the conditional expression is supported in the WHERE
clause.
"""
return True
def combine_expression(self, connector, sub_expressions):
"""
Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions).
"""
conn = " %s " % connector
return conn.join(sub_expressions)
def combine_duration_expression(self, connector, sub_expressions):
return self.combine_expression(connector, sub_expressions)
def binary_placeholder_sql(self, value):
"""
Some backends require special syntax to insert binary content (MySQL
for example uses '_binary %s').
"""
return "%s"
def modify_insert_params(self, placeholder, params):
"""
Allow modification of insert parameters. Needed for Oracle Spatial
backend due to #10888.
"""
return params
def integer_field_range(self, internal_type):
"""
Given an integer field internal type (e.g. 'PositiveIntegerField'),
return a tuple of the (min_value, max_value) form representing the
range of the column type bound to the field.
"""
return self.integer_field_ranges[internal_type]
def subtract_temporals(self, internal_type, lhs, rhs):
if self.connection.features.supports_temporal_subtraction:
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
return "(%s - %s)" % (lhs_sql, rhs_sql), (*lhs_params, *rhs_params)
raise NotSupportedError(
"This backend does not support %s subtraction." % internal_type
)
def window_frame_start(self, start):
if isinstance(start, int):
if start < 0:
return "%d %s" % (abs(start), self.PRECEDING)
elif start == 0:
return self.CURRENT_ROW
elif start is None:
return self.UNBOUNDED_PRECEDING
raise ValueError(
"start argument must be a negative integer, zero, or None, but got '%s'."
% start
)
def window_frame_end(self, end):
if isinstance(end, int):
if end == 0:
return self.CURRENT_ROW
elif end > 0:
return "%d %s" % (end, self.FOLLOWING)
elif end is None:
return self.UNBOUNDED_FOLLOWING
raise ValueError(
"end argument must be a positive integer, zero, or None, but got '%s'."
% end
)
def window_frame_rows_start_end(self, start=None, end=None):
"""
Return SQL for start and end points in an OVER clause window frame.
"""
if not self.connection.features.supports_over_clause:
raise NotSupportedError("This backend does not support window expressions.")
return self.window_frame_start(start), self.window_frame_end(end)
def window_frame_range_start_end(self, start=None, end=None):
start_, end_ = self.window_frame_rows_start_end(start, end)
features = self.connection.features
if features.only_supports_unbounded_with_preceding_and_following and (
(start and start < 0) or (end and end > 0)
):
raise NotSupportedError(
"%s only supports UNBOUNDED together with PRECEDING and "
"FOLLOWING." % self.connection.display_name
)
return start_, end_
def explain_query_prefix(self, format=None, **options):
if not self.connection.features.supports_explaining_query_execution:
raise NotSupportedError(
"This backend does not support explaining query execution."
)
if format:
supported_formats = self.connection.features.supported_explain_formats
normalized_format = format.upper()
if normalized_format not in supported_formats:
msg = "%s is not a recognized format." % normalized_format
if supported_formats:
msg += " Allowed formats: %s" % ", ".join(sorted(supported_formats))
else:
msg += (
f" {self.connection.display_name} does not support any formats."
)
raise ValueError(msg)
if options:
raise ValueError("Unknown options: %s" % ", ".join(sorted(options.keys())))
return self.explain_prefix
def insert_statement(self, on_conflict=None):
return "INSERT INTO"
def on_conflict_suffix_sql(self, fields, on_conflict, update_fields, unique_fields):
return ""
|
eed8a4e9ecb02a49d181510f69e29fb96ee3206b08f47592cb35432e06db546a | import json
from functools import lru_cache, partial
from psycopg2.extras import Inet
from psycopg2.extras import Json as Jsonb
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.backends.utils import split_tzname_delta
from django.db.models.constants import OnConflict
@lru_cache
def get_json_dumps(encoder):
if encoder is None:
return json.dumps
return partial(json.dumps, cls=encoder)
class DatabaseOperations(BaseDatabaseOperations):
cast_char_field_without_max_length = "varchar"
explain_prefix = "EXPLAIN"
explain_options = frozenset(
[
"ANALYZE",
"BUFFERS",
"COSTS",
"SETTINGS",
"SUMMARY",
"TIMING",
"VERBOSE",
"WAL",
]
)
cast_data_types = {
"AutoField": "integer",
"BigAutoField": "bigint",
"SmallAutoField": "smallint",
}
def unification_cast_sql(self, output_field):
internal_type = output_field.get_internal_type()
if internal_type in (
"GenericIPAddressField",
"IPAddressField",
"TimeField",
"UUIDField",
):
# PostgreSQL will resolve a union as type 'text' if input types are
# 'unknown'.
# https://www.postgresql.org/docs/current/typeconv-union-case.html
# These fields cannot be implicitly cast back in the default
# PostgreSQL configuration so we need to explicitly cast them.
# We must also remove components of the type within brackets:
# varchar(255) -> varchar.
return (
"CAST(%%s AS %s)" % output_field.db_type(self.connection).split("(")[0]
)
return "%s"
def date_extract_sql(self, lookup_type, sql, params):
# https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
extract_sql = f"EXTRACT(%s FROM {sql})"
extract_param = lookup_type
if lookup_type == "week_day":
# For consistency across backends, we return Sunday=1, Saturday=7.
extract_sql = f"EXTRACT(%s FROM {sql}) + 1"
extract_param = "dow"
elif lookup_type == "iso_week_day":
extract_param = "isodow"
elif lookup_type == "iso_year":
extract_param = "isoyear"
return extract_sql, (extract_param, *params)
def date_trunc_sql(self, lookup_type, sql, params, tzname=None):
sql, params = self._convert_sql_to_tz(sql, params, tzname)
# https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
return f"DATE_TRUNC(%s, {sql})", (lookup_type, *params)
def _prepare_tzname_delta(self, tzname):
tzname, sign, offset = split_tzname_delta(tzname)
if offset:
sign = "-" if sign == "+" else "+"
return f"{tzname}{sign}{offset}"
return tzname
def _convert_sql_to_tz(self, sql, params, tzname):
if tzname and settings.USE_TZ:
tzname_param = self._prepare_tzname_delta(tzname)
return f"{sql} AT TIME ZONE %s", (*params, tzname_param)
return sql, params
def datetime_cast_date_sql(self, sql, params, tzname):
sql, params = self._convert_sql_to_tz(sql, params, tzname)
return f"({sql})::date", params
def datetime_cast_time_sql(self, sql, params, tzname):
sql, params = self._convert_sql_to_tz(sql, params, tzname)
return f"({sql})::time", params
def datetime_extract_sql(self, lookup_type, sql, params, tzname):
sql, params = self._convert_sql_to_tz(sql, params, tzname)
if lookup_type == "second":
# Truncate fractional seconds.
return (
f"EXTRACT(%s FROM DATE_TRUNC(%s, {sql}))",
("second", "second", *params),
)
return self.date_extract_sql(lookup_type, sql, params)
def datetime_trunc_sql(self, lookup_type, sql, params, tzname):
sql, params = self._convert_sql_to_tz(sql, params, tzname)
# https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
return f"DATE_TRUNC(%s, {sql})", (lookup_type, *params)
def time_extract_sql(self, lookup_type, sql, params):
if lookup_type == "second":
# Truncate fractional seconds.
return (
f"EXTRACT(%s FROM DATE_TRUNC(%s, {sql}))",
("second", "second", *params),
)
return self.date_extract_sql(lookup_type, sql, params)
def time_trunc_sql(self, lookup_type, sql, params, tzname=None):
sql, params = self._convert_sql_to_tz(sql, params, tzname)
return f"DATE_TRUNC(%s, {sql})::time", (lookup_type, *params)
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def fetch_returned_insert_rows(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table, return the tuple of returned data.
"""
return cursor.fetchall()
def lookup_cast(self, lookup_type, internal_type=None):
lookup = "%s"
# Cast text lookups to text to allow things like filter(x__contains=4)
if lookup_type in (
"iexact",
"contains",
"icontains",
"startswith",
"istartswith",
"endswith",
"iendswith",
"regex",
"iregex",
):
if internal_type in ("IPAddressField", "GenericIPAddressField"):
lookup = "HOST(%s)"
# RemovedInDjango51Warning.
elif internal_type in ("CICharField", "CIEmailField", "CITextField"):
lookup = "%s::citext"
else:
lookup = "%s::text"
# Use UPPER(x) for case-insensitive lookups; it's faster.
if lookup_type in ("iexact", "icontains", "istartswith", "iendswith"):
lookup = "UPPER(%s)" % lookup
return lookup
def no_limit_value(self):
return None
def prepare_sql_script(self, sql):
return [sql]
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def set_time_zone_sql(self):
return "SET TIME ZONE %s"
def sql_flush(self, style, tables, *, reset_sequences=False, allow_cascade=False):
if not tables:
return []
# Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows us
# to truncate tables referenced by a foreign key in any other table.
sql_parts = [
style.SQL_KEYWORD("TRUNCATE"),
", ".join(style.SQL_FIELD(self.quote_name(table)) for table in tables),
]
if reset_sequences:
sql_parts.append(style.SQL_KEYWORD("RESTART IDENTITY"))
if allow_cascade:
sql_parts.append(style.SQL_KEYWORD("CASCADE"))
return ["%s;" % " ".join(sql_parts)]
def sequence_reset_by_name_sql(self, style, sequences):
# 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements
# to reset sequence indices
sql = []
for sequence_info in sequences:
table_name = sequence_info["table"]
# 'id' will be the case if it's an m2m using an autogenerated
# intermediate table (see BaseDatabaseIntrospection.sequence_list).
column_name = sequence_info["column"] or "id"
sql.append(
"%s setval(pg_get_serial_sequence('%s','%s'), 1, false);"
% (
style.SQL_KEYWORD("SELECT"),
style.SQL_TABLE(self.quote_name(table_name)),
style.SQL_FIELD(column_name),
)
)
return sql
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
qn = self.quote_name
for model in model_list:
# Use `coalesce` to set the sequence for each model to the max pk
# value if there are records, or 1 if there are none. Set the
# `is_called` property (the third argument to `setval`) to true if
# there are records (as the max pk value is already in use),
# otherwise set it to false. Use pg_get_serial_sequence to get the
# underlying sequence name from the table name and column name.
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
output.append(
"%s setval(pg_get_serial_sequence('%s','%s'), "
"coalesce(max(%s), 1), max(%s) %s null) %s %s;"
% (
style.SQL_KEYWORD("SELECT"),
style.SQL_TABLE(qn(model._meta.db_table)),
style.SQL_FIELD(f.column),
style.SQL_FIELD(qn(f.column)),
style.SQL_FIELD(qn(f.column)),
style.SQL_KEYWORD("IS NOT"),
style.SQL_KEYWORD("FROM"),
style.SQL_TABLE(qn(model._meta.db_table)),
)
)
# Only one AutoField is allowed per model, so don't bother
# continuing.
break
return output
def prep_for_iexact_query(self, x):
return x
def max_name_length(self):
"""
Return the maximum length of an identifier.
The maximum length of an identifier is 63 by default, but can be
changed by recompiling PostgreSQL after editing the NAMEDATALEN
macro in src/include/pg_config_manual.h.
This implementation returns 63, but can be overridden by a custom
database backend that inherits most of its behavior from this one.
"""
return 63
def distinct_sql(self, fields, params):
if fields:
params = [param for param_list in params for param in param_list]
return (["DISTINCT ON (%s)" % ", ".join(fields)], params)
else:
return ["DISTINCT"], []
def last_executed_query(self, cursor, sql, params):
# https://www.psycopg.org/docs/cursor.html#cursor.query
# The query attribute is a Psycopg extension to the DB API 2.0.
if cursor.query is not None:
return cursor.query.decode()
return None
def return_insert_columns(self, fields):
if not fields:
return "", ()
columns = [
"%s.%s"
% (
self.quote_name(field.model._meta.db_table),
self.quote_name(field.column),
)
for field in fields
]
return "RETURNING %s" % ", ".join(columns), ()
def bulk_insert_sql(self, fields, placeholder_rows):
placeholder_rows_sql = (", ".join(row) for row in placeholder_rows)
values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql)
return "VALUES " + values_sql
def adapt_datefield_value(self, value):
return value
def adapt_datetimefield_value(self, value):
return value
def adapt_timefield_value(self, value):
return value
def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None):
return value
def adapt_ipaddressfield_value(self, value):
if value:
return Inet(value)
return None
def adapt_json_value(self, value, encoder):
return Jsonb(value, dumps=get_json_dumps(encoder))
def subtract_temporals(self, internal_type, lhs, rhs):
if internal_type == "DateField":
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
params = (*lhs_params, *rhs_params)
return "(interval '1 day' * (%s - %s))" % (lhs_sql, rhs_sql), params
return super().subtract_temporals(internal_type, lhs, rhs)
def explain_query_prefix(self, format=None, **options):
extra = {}
# Normalize options.
if options:
options = {
name.upper(): "true" if value else "false"
for name, value in options.items()
}
for valid_option in self.explain_options:
value = options.pop(valid_option, None)
if value is not None:
extra[valid_option] = value
prefix = super().explain_query_prefix(format, **options)
if format:
extra["FORMAT"] = format
if extra:
prefix += " (%s)" % ", ".join("%s %s" % i for i in extra.items())
return prefix
def on_conflict_suffix_sql(self, fields, on_conflict, update_fields, unique_fields):
if on_conflict == OnConflict.IGNORE:
return "ON CONFLICT DO NOTHING"
if on_conflict == OnConflict.UPDATE:
return "ON CONFLICT(%s) DO UPDATE SET %s" % (
", ".join(map(self.quote_name, unique_fields)),
", ".join(
[
f"{field} = EXCLUDED.{field}"
for field in map(self.quote_name, update_fields)
]
),
)
return super().on_conflict_suffix_sql(
fields,
on_conflict,
update_fields,
unique_fields,
)
|
c1706550f5e7b32139a528ae9cc3cb56950712110a14117c8e73d18caa3f3c7c | import copy
from decimal import Decimal
from django.apps.registry import Apps
from django.db import NotSupportedError
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.backends.ddl_references import Statement
from django.db.backends.utils import strip_quotes
from django.db.models import UniqueConstraint
from django.db.transaction import atomic
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_delete_table = "DROP TABLE %(table)s"
sql_create_fk = None
sql_create_inline_fk = (
"REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED"
)
sql_create_column_inline_fk = sql_create_inline_fk
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s"
sql_create_unique = "CREATE UNIQUE INDEX %(name)s ON %(table)s (%(columns)s)"
sql_delete_unique = "DROP INDEX %(name)s"
def __enter__(self):
# Some SQLite schema alterations need foreign key constraints to be
# disabled. Enforce it here for the duration of the schema edition.
if not self.connection.disable_constraint_checking():
raise NotSupportedError(
"SQLite schema editor cannot be used while foreign key "
"constraint checks are enabled. Make sure to disable them "
"before entering a transaction.atomic() context because "
"SQLite does not support disabling them in the middle of "
"a multi-statement transaction."
)
return super().__enter__()
def __exit__(self, exc_type, exc_value, traceback):
self.connection.check_constraints()
super().__exit__(exc_type, exc_value, traceback)
self.connection.enable_constraint_checking()
def quote_value(self, value):
# The backend "mostly works" without this function and there are use
# cases for compiling Python without the sqlite3 libraries (e.g.
# security hardening).
try:
import sqlite3
value = sqlite3.adapt(value)
except ImportError:
pass
except sqlite3.ProgrammingError:
pass
# Manual emulation of SQLite parameter quoting
if isinstance(value, bool):
return str(int(value))
elif isinstance(value, (Decimal, float, int)):
return str(value)
elif isinstance(value, str):
return "'%s'" % value.replace("'", "''")
elif value is None:
return "NULL"
elif isinstance(value, (bytes, bytearray, memoryview)):
# Bytes are only allowed for BLOB fields, encoded as string
# literals containing hexadecimal data and preceded by a single "X"
# character.
return "X'%s'" % value.hex()
else:
raise ValueError(
"Cannot quote parameter value %r of type %s" % (value, type(value))
)
def prepare_default(self, value):
return self.quote_value(value)
def _is_referenced_by_fk_constraint(
self, table_name, column_name=None, ignore_self=False
):
"""
Return whether or not the provided table name is referenced by another
one. If `column_name` is specified, only references pointing to that
column are considered. If `ignore_self` is True, self-referential
constraints are ignored.
"""
with self.connection.cursor() as cursor:
for other_table in self.connection.introspection.get_table_list(cursor):
if ignore_self and other_table.name == table_name:
continue
relations = self.connection.introspection.get_relations(
cursor, other_table.name
)
for constraint_column, constraint_table in relations.values():
if constraint_table == table_name and (
column_name is None or constraint_column == column_name
):
return True
return False
def alter_db_table(
self, model, old_db_table, new_db_table, disable_constraints=True
):
if (
not self.connection.features.supports_atomic_references_rename
and disable_constraints
and self._is_referenced_by_fk_constraint(old_db_table)
):
if self.connection.in_atomic_block:
raise NotSupportedError(
(
"Renaming the %r table while in a transaction is not "
"supported on SQLite < 3.26 because it would break referential "
"integrity. Try adding `atomic = False` to the Migration class."
)
% old_db_table
)
self.connection.enable_constraint_checking()
super().alter_db_table(model, old_db_table, new_db_table)
self.connection.disable_constraint_checking()
else:
super().alter_db_table(model, old_db_table, new_db_table)
def alter_field(self, model, old_field, new_field, strict=False):
if not self._field_should_be_altered(old_field, new_field):
return
old_field_name = old_field.name
table_name = model._meta.db_table
_, old_column_name = old_field.get_attname_column()
if (
new_field.name != old_field_name
and not self.connection.features.supports_atomic_references_rename
and self._is_referenced_by_fk_constraint(
table_name, old_column_name, ignore_self=True
)
):
if self.connection.in_atomic_block:
raise NotSupportedError(
(
"Renaming the %r.%r column while in a transaction is not "
"supported on SQLite < 3.26 because it would break referential "
"integrity. Try adding `atomic = False` to the Migration class."
)
% (model._meta.db_table, old_field_name)
)
with atomic(self.connection.alias):
super().alter_field(model, old_field, new_field, strict=strict)
# Follow SQLite's documented procedure for performing changes
# that don't affect the on-disk content.
# https://sqlite.org/lang_altertable.html#otheralter
with self.connection.cursor() as cursor:
schema_version = cursor.execute("PRAGMA schema_version").fetchone()[
0
]
cursor.execute("PRAGMA writable_schema = 1")
references_template = ' REFERENCES "%s" ("%%s") ' % table_name
new_column_name = new_field.get_attname_column()[1]
search = references_template % old_column_name
replacement = references_template % new_column_name
cursor.execute(
"UPDATE sqlite_master SET sql = replace(sql, %s, %s)",
(search, replacement),
)
cursor.execute("PRAGMA schema_version = %d" % (schema_version + 1))
cursor.execute("PRAGMA writable_schema = 0")
# The integrity check will raise an exception and rollback
# the transaction if the sqlite_master updates corrupt the
# database.
cursor.execute("PRAGMA integrity_check")
# Perform a VACUUM to refresh the database representation from
# the sqlite_master table.
with self.connection.cursor() as cursor:
cursor.execute("VACUUM")
else:
super().alter_field(model, old_field, new_field, strict=strict)
def _remake_table(
self, model, create_field=None, delete_field=None, alter_fields=None
):
"""
Shortcut to transform a model from old_model into new_model
This follows the correct procedure to perform non-rename or column
addition operations based on SQLite's documentation
https://www.sqlite.org/lang_altertable.html#caution
The essential steps are:
1. Create a table with the updated definition called "new__app_model"
2. Copy the data from the existing "app_model" table to the new table
3. Drop the "app_model" table
4. Rename the "new__app_model" table to "app_model"
5. Restore any index of the previous "app_model" table.
"""
# Self-referential fields must be recreated rather than copied from
# the old model to ensure their remote_field.field_name doesn't refer
# to an altered field.
def is_self_referential(f):
return f.is_relation and f.remote_field.model is model
# Work out the new fields dict / mapping
body = {
f.name: f.clone() if is_self_referential(f) else f
for f in model._meta.local_concrete_fields
}
# Since mapping might mix column names and default values,
# its values must be already quoted.
mapping = {
f.column: self.quote_name(f.column)
for f in model._meta.local_concrete_fields
}
# This maps field names (not columns) for things like unique_together
rename_mapping = {}
# If any of the new or altered fields is introducing a new PK,
# remove the old one
restore_pk_field = None
alter_fields = alter_fields or []
if getattr(create_field, "primary_key", False) or any(
getattr(new_field, "primary_key", False) for _, new_field in alter_fields
):
for name, field in list(body.items()):
if field.primary_key and not any(
# Do not remove the old primary key when an altered field
# that introduces a primary key is the same field.
name == new_field.name
for _, new_field in alter_fields
):
field.primary_key = False
restore_pk_field = field
if field.auto_created:
del body[name]
del mapping[field.column]
# Add in any created fields
if create_field:
body[create_field.name] = create_field
# Choose a default and insert it into the copy map
if not create_field.many_to_many and create_field.concrete:
mapping[create_field.column] = self.prepare_default(
self.effective_default(create_field),
)
# Add in any altered fields
for alter_field in alter_fields:
old_field, new_field = alter_field
body.pop(old_field.name, None)
mapping.pop(old_field.column, None)
body[new_field.name] = new_field
if old_field.null and not new_field.null:
case_sql = "coalesce(%(col)s, %(default)s)" % {
"col": self.quote_name(old_field.column),
"default": self.prepare_default(self.effective_default(new_field)),
}
mapping[new_field.column] = case_sql
else:
mapping[new_field.column] = self.quote_name(old_field.column)
rename_mapping[old_field.name] = new_field.name
# Remove any deleted fields
if delete_field:
del body[delete_field.name]
del mapping[delete_field.column]
# Remove any implicit M2M tables
if (
delete_field.many_to_many
and delete_field.remote_field.through._meta.auto_created
):
return self.delete_model(delete_field.remote_field.through)
# Work inside a new app registry
apps = Apps()
# Work out the new value of unique_together, taking renames into
# account
unique_together = [
[rename_mapping.get(n, n) for n in unique]
for unique in model._meta.unique_together
]
# Work out the new value for index_together, taking renames into
# account
index_together = [
[rename_mapping.get(n, n) for n in index]
for index in model._meta.index_together
]
indexes = model._meta.indexes
if delete_field:
indexes = [
index for index in indexes if delete_field.name not in index.fields
]
constraints = list(model._meta.constraints)
# Provide isolated instances of the fields to the new model body so
# that the existing model's internals aren't interfered with when
# the dummy model is constructed.
body_copy = copy.deepcopy(body)
# Construct a new model with the new fields to allow self referential
# primary key to resolve to. This model won't ever be materialized as a
# table and solely exists for foreign key reference resolution purposes.
# This wouldn't be required if the schema editor was operating on model
# states instead of rendered models.
meta_contents = {
"app_label": model._meta.app_label,
"db_table": model._meta.db_table,
"unique_together": unique_together,
"index_together": index_together,
"indexes": indexes,
"constraints": constraints,
"apps": apps,
}
meta = type("Meta", (), meta_contents)
body_copy["Meta"] = meta
body_copy["__module__"] = model.__module__
type(model._meta.object_name, model.__bases__, body_copy)
# Construct a model with a renamed table name.
body_copy = copy.deepcopy(body)
meta_contents = {
"app_label": model._meta.app_label,
"db_table": "new__%s" % strip_quotes(model._meta.db_table),
"unique_together": unique_together,
"index_together": index_together,
"indexes": indexes,
"constraints": constraints,
"apps": apps,
}
meta = type("Meta", (), meta_contents)
body_copy["Meta"] = meta
body_copy["__module__"] = model.__module__
new_model = type("New%s" % model._meta.object_name, model.__bases__, body_copy)
# Create a new table with the updated schema.
self.create_model(new_model)
# Copy data from the old table into the new table
self.execute(
"INSERT INTO %s (%s) SELECT %s FROM %s"
% (
self.quote_name(new_model._meta.db_table),
", ".join(self.quote_name(x) for x in mapping),
", ".join(mapping.values()),
self.quote_name(model._meta.db_table),
)
)
# Delete the old table to make way for the new
self.delete_model(model, handle_autom2m=False)
# Rename the new table to take way for the old
self.alter_db_table(
new_model,
new_model._meta.db_table,
model._meta.db_table,
disable_constraints=False,
)
# Run deferred SQL on correct table
for sql in self.deferred_sql:
self.execute(sql)
self.deferred_sql = []
# Fix any PK-removed field
if restore_pk_field:
restore_pk_field.primary_key = True
def delete_model(self, model, handle_autom2m=True):
if handle_autom2m:
super().delete_model(model)
else:
# Delete the table (and only that)
self.execute(
self.sql_delete_table
% {
"table": self.quote_name(model._meta.db_table),
}
)
# Remove all deferred statements referencing the deleted table.
for sql in list(self.deferred_sql):
if isinstance(sql, Statement) and sql.references_table(
model._meta.db_table
):
self.deferred_sql.remove(sql)
def add_field(self, model, field):
"""Create a field on a model."""
# Special-case implicit M2M tables.
if field.many_to_many and field.remote_field.through._meta.auto_created:
self.create_model(field.remote_field.through)
elif (
# Primary keys and unique fields are not supported in ALTER TABLE
# ADD COLUMN.
field.primary_key
or field.unique
or
# Fields with default values cannot by handled by ALTER TABLE ADD
# COLUMN statement because DROP DEFAULT is not supported in
# ALTER TABLE.
not field.null
or self.effective_default(field) is not None
):
self._remake_table(model, create_field=field)
else:
super().add_field(model, field)
def remove_field(self, model, field):
"""
Remove a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# M2M fields are a special case
if field.many_to_many:
# For implicit M2M tables, delete the auto-created table
if field.remote_field.through._meta.auto_created:
self.delete_model(field.remote_field.through)
# For explicit "through" M2M fields, do nothing
elif (
self.connection.features.can_alter_table_drop_column
# Primary keys, unique fields, indexed fields, and foreign keys are
# not supported in ALTER TABLE DROP COLUMN.
and not field.primary_key
and not field.unique
and not field.db_index
and not (field.remote_field and field.db_constraint)
):
super().remove_field(model, field)
# For everything else, remake.
else:
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)["type"] is None:
return
self._remake_table(model, delete_field=field)
def _alter_field(
self,
model,
old_field,
new_field,
old_type,
new_type,
old_db_params,
new_db_params,
strict=False,
):
"""Perform a "physical" (non-ManyToMany) field update."""
# Use "ALTER TABLE ... RENAME COLUMN" if only the column name
# changed and there aren't any constraints.
if (
self.connection.features.can_alter_table_rename_column
and old_field.column != new_field.column
and self.column_sql(model, old_field) == self.column_sql(model, new_field)
and not (
old_field.remote_field
and old_field.db_constraint
or new_field.remote_field
and new_field.db_constraint
)
):
return self.execute(
self._rename_field_sql(
model._meta.db_table, old_field, new_field, new_type
)
)
# Alter by remaking table
self._remake_table(model, alter_fields=[(old_field, new_field)])
# Rebuild tables with FKs pointing to this field.
old_collation = old_db_params.get("collation")
new_collation = new_db_params.get("collation")
if new_field.unique and (
old_type != new_type or old_collation != new_collation
):
related_models = set()
opts = new_field.model._meta
for remote_field in opts.related_objects:
# Ignore self-relationship since the table was already rebuilt.
if remote_field.related_model == model:
continue
if not remote_field.many_to_many:
if remote_field.field_name == new_field.name:
related_models.add(remote_field.related_model)
elif new_field.primary_key and remote_field.through._meta.auto_created:
related_models.add(remote_field.through)
if new_field.primary_key:
for many_to_many in opts.many_to_many:
# Ignore self-relationship since the table was already rebuilt.
if many_to_many.related_model == model:
continue
if many_to_many.remote_field.through._meta.auto_created:
related_models.add(many_to_many.remote_field.through)
for related_model in related_models:
self._remake_table(related_model)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""Alter M2Ms to repoint their to= endpoints."""
if (
old_field.remote_field.through._meta.db_table
== new_field.remote_field.through._meta.db_table
):
# The field name didn't change, but some options did, so we have to
# propagate this altering.
self._remake_table(
old_field.remote_field.through,
alter_fields=[
(
# The field that points to the target model is needed,
# so that table can be remade with the new m2m field -
# this is m2m_reverse_field_name().
old_field.remote_field.through._meta.get_field(
old_field.m2m_reverse_field_name()
),
new_field.remote_field.through._meta.get_field(
new_field.m2m_reverse_field_name()
),
),
(
# The field that points to the model itself is needed,
# so that table can be remade with the new self field -
# this is m2m_field_name().
old_field.remote_field.through._meta.get_field(
old_field.m2m_field_name()
),
new_field.remote_field.through._meta.get_field(
new_field.m2m_field_name()
),
),
],
)
return
# Make a new through table
self.create_model(new_field.remote_field.through)
# Copy the data across
self.execute(
"INSERT INTO %s (%s) SELECT %s FROM %s"
% (
self.quote_name(new_field.remote_field.through._meta.db_table),
", ".join(
[
"id",
new_field.m2m_column_name(),
new_field.m2m_reverse_name(),
]
),
", ".join(
[
"id",
old_field.m2m_column_name(),
old_field.m2m_reverse_name(),
]
),
self.quote_name(old_field.remote_field.through._meta.db_table),
)
)
# Delete the old through table
self.delete_model(old_field.remote_field.through)
def add_constraint(self, model, constraint):
if isinstance(constraint, UniqueConstraint) and (
constraint.condition
or constraint.contains_expressions
or constraint.include
or constraint.deferrable
):
super().add_constraint(model, constraint)
else:
self._remake_table(model)
def remove_constraint(self, model, constraint):
if isinstance(constraint, UniqueConstraint) and (
constraint.condition
or constraint.contains_expressions
or constraint.include
or constraint.deferrable
):
super().remove_constraint(model, constraint)
else:
self._remake_table(model)
def _collate_sql(self, collation):
return "COLLATE " + collation
|
799834f4daffbffd075510c3c63a9b17cc597f07d878433c066fb7b99fd84b93 | from io import BytesIO
from django.conf import settings
from django.core import signals
from django.core.handlers import base
from django.http import HttpRequest, QueryDict, parse_cookie
from django.urls import set_script_prefix
from django.utils.encoding import repercent_broken_unicode
from django.utils.functional import cached_property
from django.utils.regex_helper import _lazy_re_compile
_slashes_re = _lazy_re_compile(rb"/+")
class LimitedStream:
"""Wrap another stream to disallow reading it past a number of bytes."""
def __init__(self, stream, limit):
self.stream = stream
self.remaining = limit
self.buffer = b""
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return b""
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = b""
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else: # size >= len(self.buffer)
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = b""
return result
def readline(self, size=None):
while b"\n" not in self.buffer and (size is None or len(self.buffer) < size):
if size:
# since size is not None here, len(self.buffer) < size
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = BytesIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
def close(self):
pass
class WSGIRequest(HttpRequest):
non_picklable_attrs = HttpRequest.non_picklable_attrs | frozenset(["environ"])
meta_non_picklable_attrs = frozenset(["wsgi.errors", "wsgi.input"])
def __init__(self, environ):
script_name = get_script_name(environ)
# If PATH_INFO is empty (e.g. accessing the SCRIPT_NAME URL without a
# trailing slash), operate as if '/' was requested.
path_info = get_path_info(environ) or "/"
self.environ = environ
self.path_info = path_info
# be careful to only replace the first slash in the path because of
# http://test/something and http://test//something being different as
# stated in RFC 3986.
self.path = "%s/%s" % (script_name.rstrip("/"), path_info.replace("/", "", 1))
self.META = environ
self.META["PATH_INFO"] = path_info
self.META["SCRIPT_NAME"] = script_name
self.method = environ["REQUEST_METHOD"].upper()
# Set content_type, content_params, and encoding.
self._set_content_type_params(environ)
try:
content_length = int(environ.get("CONTENT_LENGTH"))
except (ValueError, TypeError):
content_length = 0
self._stream = LimitedStream(self.environ["wsgi.input"], content_length)
self._read_started = False
self.resolver_match = None
def __getstate__(self):
state = super().__getstate__()
for attr in self.meta_non_picklable_attrs:
if attr in state["META"]:
del state["META"][attr]
return state
def _get_scheme(self):
return self.environ.get("wsgi.url_scheme")
@cached_property
def GET(self):
# The WSGI spec says 'QUERY_STRING' may be absent.
raw_query_string = get_bytes_from_wsgi(self.environ, "QUERY_STRING", "")
return QueryDict(raw_query_string, encoding=self._encoding)
def _get_post(self):
if not hasattr(self, "_post"):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
@cached_property
def COOKIES(self):
raw_cookie = get_str_from_wsgi(self.environ, "HTTP_COOKIE", "")
return parse_cookie(raw_cookie)
@property
def FILES(self):
if not hasattr(self, "_files"):
self._load_post_and_files()
return self._files
POST = property(_get_post, _set_post)
class WSGIHandler(base.BaseHandler):
request_class = WSGIRequest
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.load_middleware()
def __call__(self, environ, start_response):
set_script_prefix(get_script_name(environ))
signals.request_started.send(sender=self.__class__, environ=environ)
request = self.request_class(environ)
response = self.get_response(request)
response._handler_class = self.__class__
status = "%d %s" % (response.status_code, response.reason_phrase)
response_headers = [
*response.items(),
*(("Set-Cookie", c.output(header="")) for c in response.cookies.values()),
]
start_response(status, response_headers)
if getattr(response, "file_to_stream", None) is not None and environ.get(
"wsgi.file_wrapper"
):
# If `wsgi.file_wrapper` is used the WSGI server does not call
# .close on the response, but on the file wrapper. Patch it to use
# response.close instead which takes care of closing all files.
response.file_to_stream.close = response.close
response = environ["wsgi.file_wrapper"](
response.file_to_stream, response.block_size
)
return response
def get_path_info(environ):
"""Return the HTTP request's PATH_INFO as a string."""
path_info = get_bytes_from_wsgi(environ, "PATH_INFO", "/")
return repercent_broken_unicode(path_info).decode()
def get_script_name(environ):
"""
Return the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite is used, return what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
"""
if settings.FORCE_SCRIPT_NAME is not None:
return settings.FORCE_SCRIPT_NAME
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = get_bytes_from_wsgi(environ, "SCRIPT_URL", "") or get_bytes_from_wsgi(
environ, "REDIRECT_URL", ""
)
if script_url:
if b"//" in script_url:
# mod_wsgi squashes multiple successive slashes in PATH_INFO,
# do the same with script_url before manipulating paths (#17133).
script_url = _slashes_re.sub(b"/", script_url)
path_info = get_bytes_from_wsgi(environ, "PATH_INFO", "")
script_name = script_url[: -len(path_info)] if path_info else script_url
else:
script_name = get_bytes_from_wsgi(environ, "SCRIPT_NAME", "")
return script_name.decode()
def get_bytes_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be strings.
"""
value = environ.get(key, default)
# Non-ASCII values in the WSGI environ are arbitrarily decoded with
# ISO-8859-1. This is wrong for Django websites where UTF-8 is the default.
# Re-encode to recover the original bytestring.
return value.encode("iso-8859-1")
def get_str_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as str.
key and default should be str objects.
"""
value = get_bytes_from_wsgi(environ, key, default)
return value.decode(errors="replace")
|
1bcc5660df7dc363098cc13098affc740a4d68fc243b59b0776d0840cc360999 | import mimetypes
from email import charset as Charset
from email import encoders as Encoders
from email import generator, message_from_string
from email.errors import HeaderParseError
from email.header import Header
from email.headerregistry import Address, parser
from email.message import Message
from email.mime.base import MIMEBase
from email.mime.message import MIMEMessage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formataddr, formatdate, getaddresses, make_msgid
from io import BytesIO, StringIO
from pathlib import Path
from django.conf import settings
from django.core.mail.utils import DNS_NAME
from django.utils.encoding import force_str, punycode
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
utf8_charset = Charset.Charset("utf-8")
utf8_charset.body_encoding = None # Python defaults to BASE64
utf8_charset_qp = Charset.Charset("utf-8")
utf8_charset_qp.body_encoding = Charset.QP
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = "application/octet-stream"
RFC5322_EMAIL_LINE_LENGTH_LIMIT = 998
class BadHeaderError(ValueError):
pass
# Header names that contain structured address data (RFC 5322).
ADDRESS_HEADERS = {
"from",
"sender",
"reply-to",
"to",
"cc",
"bcc",
"resent-from",
"resent-sender",
"resent-to",
"resent-cc",
"resent-bcc",
}
def forbid_multi_line_headers(name, val, encoding):
"""Forbid multi-line headers to prevent header injection."""
encoding = encoding or settings.DEFAULT_CHARSET
val = str(val) # val may be lazy
if "\n" in val or "\r" in val:
raise BadHeaderError(
"Header values can't contain newlines (got %r for header %r)" % (val, name)
)
try:
val.encode("ascii")
except UnicodeEncodeError:
if name.lower() in ADDRESS_HEADERS:
val = ", ".join(
sanitize_address(addr, encoding) for addr in getaddresses((val,))
)
else:
val = Header(val, encoding).encode()
else:
if name.lower() == "subject":
val = Header(val).encode()
return name, val
def sanitize_address(addr, encoding):
"""
Format a pair of (name, address) or an email address string.
"""
address = None
if not isinstance(addr, tuple):
addr = force_str(addr)
try:
token, rest = parser.get_mailbox(addr)
except (HeaderParseError, ValueError, IndexError):
raise ValueError('Invalid address "%s"' % addr)
else:
if rest:
# The entire email address must be parsed.
raise ValueError(
'Invalid address; only %s could be parsed from "%s"' % (token, addr)
)
nm = token.display_name or ""
localpart = token.local_part
domain = token.domain or ""
else:
nm, address = addr
localpart, domain = address.rsplit("@", 1)
address_parts = nm + localpart + domain
if "\n" in address_parts or "\r" in address_parts:
raise ValueError("Invalid address; address parts cannot contain newlines.")
# Avoid UTF-8 encode, if it's possible.
try:
nm.encode("ascii")
nm = Header(nm).encode()
except UnicodeEncodeError:
nm = Header(nm, encoding).encode()
try:
localpart.encode("ascii")
except UnicodeEncodeError:
localpart = Header(localpart, encoding).encode()
domain = punycode(domain)
parsed_address = Address(username=localpart, domain=domain)
return formataddr((nm, parsed_address.addr_spec))
class MIMEMixin:
def as_string(self, unixfrom=False, linesep="\n"):
"""Return the entire formatted message as a string.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_string() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = StringIO()
g = generator.Generator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
def as_bytes(self, unixfrom=False, linesep="\n"):
"""Return the entire formatted message as bytes.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_bytes() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = BytesIO()
g = generator.BytesGenerator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
class SafeMIMEMessage(MIMEMixin, MIMEMessage):
def __setitem__(self, name, val):
# message/rfc822 attachments must be ASCII
name, val = forbid_multi_line_headers(name, val, "ascii")
MIMEMessage.__setitem__(self, name, val)
class SafeMIMEText(MIMEMixin, MIMEText):
def __init__(self, _text, _subtype="plain", _charset=None):
self.encoding = _charset
MIMEText.__init__(self, _text, _subtype=_subtype, _charset=_charset)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEText.__setitem__(self, name, val)
def set_payload(self, payload, charset=None):
if charset == "utf-8" and not isinstance(charset, Charset.Charset):
has_long_lines = any(
len(line.encode()) > RFC5322_EMAIL_LINE_LENGTH_LIMIT
for line in payload.splitlines()
)
# Quoted-Printable encoding has the side effect of shortening long
# lines, if any (#22561).
charset = utf8_charset_qp if has_long_lines else utf8_charset
MIMEText.set_payload(self, payload, charset=charset)
class SafeMIMEMultipart(MIMEMixin, MIMEMultipart):
def __init__(
self, _subtype="mixed", boundary=None, _subparts=None, encoding=None, **_params
):
self.encoding = encoding
MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipart.__setitem__(self, name, val)
class EmailMessage:
"""A container for email information."""
content_subtype = "plain"
mixed_subtype = "mixed"
encoding = None # None => use settings default
def __init__(
self,
subject="",
body="",
from_email=None,
to=None,
bcc=None,
connection=None,
attachments=None,
headers=None,
cc=None,
reply_to=None,
):
"""
Initialize a single email message (which can be sent to multiple
recipients).
"""
if to:
if isinstance(to, str):
raise TypeError('"to" argument must be a list or tuple')
self.to = list(to)
else:
self.to = []
if cc:
if isinstance(cc, str):
raise TypeError('"cc" argument must be a list or tuple')
self.cc = list(cc)
else:
self.cc = []
if bcc:
if isinstance(bcc, str):
raise TypeError('"bcc" argument must be a list or tuple')
self.bcc = list(bcc)
else:
self.bcc = []
if reply_to:
if isinstance(reply_to, str):
raise TypeError('"reply_to" argument must be a list or tuple')
self.reply_to = list(reply_to)
else:
self.reply_to = []
self.from_email = from_email or settings.DEFAULT_FROM_EMAIL
self.subject = subject
self.body = body or ""
self.attachments = []
if attachments:
for attachment in attachments:
if isinstance(attachment, MIMEBase):
self.attach(attachment)
else:
self.attach(*attachment)
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
from django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = SafeMIMEText(self.body, self.content_subtype, encoding)
msg = self._create_message(msg)
msg["Subject"] = self.subject
msg["From"] = self.extra_headers.get("From", self.from_email)
self._set_list_header_if_not_empty(msg, "To", self.to)
self._set_list_header_if_not_empty(msg, "Cc", self.cc)
self._set_list_header_if_not_empty(msg, "Reply-To", self.reply_to)
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if "date" not in header_names:
# formatdate() uses stdlib methods to format the date, which use
# the stdlib/OS concept of a timezone, however, Django sets the
# TZ environment variable based on the TIME_ZONE setting which
# will get picked up by formatdate().
msg["Date"] = formatdate(localtime=settings.EMAIL_USE_LOCALTIME)
if "message-id" not in header_names:
# Use cached DNS_NAME for performance
msg["Message-ID"] = make_msgid(domain=DNS_NAME)
for name, value in self.extra_headers.items():
if name.lower() != "from": # From is already handled
msg[name] = value
return msg
def recipients(self):
"""
Return a list of all recipients of the email (includes direct
addressees as well as Cc and Bcc entries).
"""
return [email for email in (self.to + self.cc + self.bcc) if email]
def send(self, fail_silently=False):
"""Send the email message."""
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
"""
Attach a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass, insert it directly
into the resulting message attachments.
For a text/* mimetype (guessed or specified), when a bytes object is
specified as content, decode it as UTF-8. If that fails, set the
mimetype to DEFAULT_ATTACHMENT_MIME_TYPE and don't decode the content.
"""
if isinstance(filename, MIMEBase):
if content is not None or mimetype is not None:
raise ValueError(
"content and mimetype must not be given when a MIMEBase "
"instance is provided."
)
self.attachments.append(filename)
elif content is None:
raise ValueError("content must be provided.")
else:
mimetype = (
mimetype
or mimetypes.guess_type(filename)[0]
or DEFAULT_ATTACHMENT_MIME_TYPE
)
basetype, subtype = mimetype.split("/", 1)
if basetype == "text":
if isinstance(content, bytes):
try:
content = content.decode()
except UnicodeDecodeError:
# If mimetype suggests the file is text but it's
# actually binary, read() raises a UnicodeDecodeError.
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
"""
Attach a file from the filesystem.
Set the mimetype to DEFAULT_ATTACHMENT_MIME_TYPE if it isn't specified
and cannot be guessed.
For a text/* mimetype (guessed or specified), decode the file's content
as UTF-8. If that fails, set the mimetype to
DEFAULT_ATTACHMENT_MIME_TYPE and don't decode the content.
"""
path = Path(path)
with path.open("rb") as file:
content = file.read()
self.attach(path.name, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(msg)
def _create_attachments(self, msg):
if self.attachments:
encoding = self.encoding or settings.DEFAULT_CHARSET
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding)
if self.body or body_msg.is_multipart():
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
return msg
def _create_mime_attachment(self, content, mimetype):
"""
Convert the content, mimetype pair into a MIME attachment object.
If the mimetype is message/rfc822, content may be an
email.Message or EmailMessage object, as well as a str.
"""
basetype, subtype = mimetype.split("/", 1)
if basetype == "text":
encoding = self.encoding or settings.DEFAULT_CHARSET
attachment = SafeMIMEText(content, subtype, encoding)
elif basetype == "message" and subtype == "rfc822":
# Bug #18967: Per RFC 2046 Section 5.2.1, message/rfc822
# attachments must not be base64 encoded.
if isinstance(content, EmailMessage):
# convert content into an email.Message first
content = content.message()
elif not isinstance(content, Message):
# For compatibility with existing code, parse the message
# into an email.Message object if it is not one already.
content = message_from_string(force_str(content))
attachment = SafeMIMEMessage(content, subtype)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
def _create_attachment(self, filename, content, mimetype=None):
"""
Convert the filename, content, mimetype triple into a MIME attachment
object.
"""
attachment = self._create_mime_attachment(content, mimetype)
if filename:
try:
filename.encode("ascii")
except UnicodeEncodeError:
filename = ("utf-8", "", filename)
attachment.add_header(
"Content-Disposition", "attachment", filename=filename
)
return attachment
def _set_list_header_if_not_empty(self, msg, header, values):
"""
Set msg's header, either from self.extra_headers, if present, or from
the values argument.
"""
if values:
try:
value = self.extra_headers[header]
except KeyError:
value = ", ".join(str(v) for v in values)
msg[header] = value
class EmailMultiAlternatives(EmailMessage):
"""
A version of EmailMessage that makes it easy to send multipart/alternative
messages. For example, including text and HTML versions of the text is
made easier.
"""
alternative_subtype = "alternative"
def __init__(
self,
subject="",
body="",
from_email=None,
to=None,
bcc=None,
connection=None,
attachments=None,
headers=None,
alternatives=None,
cc=None,
reply_to=None,
):
"""
Initialize a single email message (which can be sent to multiple
recipients).
"""
super().__init__(
subject,
body,
from_email,
to,
bcc,
connection,
attachments,
headers,
cc,
reply_to,
)
self.alternatives = alternatives or []
def attach_alternative(self, content, mimetype):
"""Attach an alternative content representation."""
if content is None or mimetype is None:
raise ValueError("Both content and mimetype must be provided.")
self.alternatives.append((content, mimetype))
def _create_message(self, msg):
return self._create_attachments(self._create_alternatives(msg))
def _create_alternatives(self, msg):
encoding = self.encoding or settings.DEFAULT_CHARSET
if self.alternatives:
body_msg = msg
msg = SafeMIMEMultipart(
_subtype=self.alternative_subtype, encoding=encoding
)
if self.body:
msg.attach(body_msg)
for alternative in self.alternatives:
msg.attach(self._create_mime_attachment(*alternative))
return msg
|
3ccbeea8c1aabece1ee27f0b0630b869cf5428f09fb4157981591180fd8a95f6 | from django.conf import settings
from django.utils.functional import LazyObject
from django.utils.module_loading import import_string
from .base import Storage
from .filesystem import FileSystemStorage
__all__ = (
"FileSystemStorage",
"Storage",
"DefaultStorage",
"default_storage",
"get_storage_class",
)
def get_storage_class(import_path=None):
return import_string(import_path or settings.DEFAULT_FILE_STORAGE)
class DefaultStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class()()
default_storage = DefaultStorage()
|
5c2c4846b92de5687957677b69854309ea700848b5985d7dcc78f9444f4ba7c7 | import os
import pathlib
from django.core.exceptions import SuspiciousFileOperation
from django.core.files import File
from django.core.files.utils import validate_file_name
from django.utils.crypto import get_random_string
from django.utils.text import get_valid_filename
class Storage:
"""
A base storage class, providing some default behaviors that all other
storage systems can inherit or override, as necessary.
"""
# The following methods represent a public interface to private methods.
# These shouldn't be overridden by subclasses unless absolutely necessary.
def open(self, name, mode="rb"):
"""Retrieve the specified file from storage."""
return self._open(name, mode)
def save(self, name, content, max_length=None):
"""
Save new content to the file specified by name. The content should be
a proper File object or any Python file-like object, ready to be read
from the beginning.
"""
# Get the proper name for the file, as it will actually be saved.
if name is None:
name = content.name
if not hasattr(content, "chunks"):
content = File(content, name)
name = self.get_available_name(name, max_length=max_length)
name = self._save(name, content)
# Ensure that the name returned from the storage system is still valid.
validate_file_name(name, allow_relative_path=True)
return name
# These methods are part of the public API, with default implementations.
def get_valid_name(self, name):
"""
Return a filename, based on the provided filename, that's suitable for
use in the target storage system.
"""
return get_valid_filename(name)
def get_alternative_name(self, file_root, file_ext):
"""
Return an alternative filename, by adding an underscore and a random 7
character alphanumeric string (before the file extension, if one
exists) to the filename.
"""
return "%s_%s%s" % (file_root, get_random_string(7), file_ext)
def get_available_name(self, name, max_length=None):
"""
Return a filename that's free on the target storage system and
available for new content to be written to.
"""
name = str(name).replace("\\", "/")
dir_name, file_name = os.path.split(name)
if ".." in pathlib.PurePath(dir_name).parts:
raise SuspiciousFileOperation(
"Detected path traversal attempt in '%s'" % dir_name
)
validate_file_name(file_name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, generate an alternative filename
# until it doesn't exist.
# Truncate original name if required, so the new filename does not
# exceed the max_length.
while self.exists(name) or (max_length and len(name) > max_length):
# file_ext includes the dot.
name = os.path.join(
dir_name, self.get_alternative_name(file_root, file_ext)
)
if max_length is None:
continue
# Truncate file_root if max_length exceeded.
truncation = len(name) - max_length
if truncation > 0:
file_root = file_root[:-truncation]
# Entire file_root was truncated in attempt to find an
# available filename.
if not file_root:
raise SuspiciousFileOperation(
'Storage can not find an available filename for "%s". '
"Please make sure that the corresponding file field "
'allows sufficient "max_length".' % name
)
name = os.path.join(
dir_name, self.get_alternative_name(file_root, file_ext)
)
return name
def generate_filename(self, filename):
"""
Validate the filename by calling get_valid_name() and return a filename
to be passed to the save() method.
"""
filename = str(filename).replace("\\", "/")
# `filename` may include a path as returned by FileField.upload_to.
dirname, filename = os.path.split(filename)
if ".." in pathlib.PurePath(dirname).parts:
raise SuspiciousFileOperation(
"Detected path traversal attempt in '%s'" % dirname
)
return os.path.normpath(os.path.join(dirname, self.get_valid_name(filename)))
def path(self, name):
"""
Return a local filesystem path where the file can be retrieved using
Python's built-in open() function. Storage systems that can't be
accessed using open() should *not* implement this method.
"""
raise NotImplementedError("This backend doesn't support absolute paths.")
# The following methods form the public API for storage systems, but with
# no default implementations. Subclasses must implement *all* of these.
def delete(self, name):
"""
Delete the specified file from the storage system.
"""
raise NotImplementedError(
"subclasses of Storage must provide a delete() method"
)
def exists(self, name):
"""
Return True if a file referenced by the given name already exists in the
storage system, or False if the name is available for a new file.
"""
raise NotImplementedError(
"subclasses of Storage must provide an exists() method"
)
def listdir(self, path):
"""
List the contents of the specified path. Return a 2-tuple of lists:
the first item being directories, the second item being files.
"""
raise NotImplementedError(
"subclasses of Storage must provide a listdir() method"
)
def size(self, name):
"""
Return the total size, in bytes, of the file specified by name.
"""
raise NotImplementedError("subclasses of Storage must provide a size() method")
def url(self, name):
"""
Return an absolute URL where the file's contents can be accessed
directly by a web browser.
"""
raise NotImplementedError("subclasses of Storage must provide a url() method")
def get_accessed_time(self, name):
"""
Return the last accessed time (as a datetime) of the file specified by
name. The datetime will be timezone-aware if USE_TZ=True.
"""
raise NotImplementedError(
"subclasses of Storage must provide a get_accessed_time() method"
)
def get_created_time(self, name):
"""
Return the creation time (as a datetime) of the file specified by name.
The datetime will be timezone-aware if USE_TZ=True.
"""
raise NotImplementedError(
"subclasses of Storage must provide a get_created_time() method"
)
def get_modified_time(self, name):
"""
Return the last modified time (as a datetime) of the file specified by
name. The datetime will be timezone-aware if USE_TZ=True.
"""
raise NotImplementedError(
"subclasses of Storage must provide a get_modified_time() method"
)
|
82913ccfd5cfc603f1525d9531a7d0ce61ccf2be8292951140b33de61b042143 | import os
from datetime import datetime, timezone
from urllib.parse import urljoin
from django.conf import settings
from django.core.files import File, locks
from django.core.files.move import file_move_safe
from django.core.signals import setting_changed
from django.utils._os import safe_join
from django.utils.deconstruct import deconstructible
from django.utils.encoding import filepath_to_uri
from django.utils.functional import cached_property
from .base import Storage
from .mixins import StorageSettingsMixin
@deconstructible(path="django.core.files.storage.FileSystemStorage")
class FileSystemStorage(Storage, StorageSettingsMixin):
"""
Standard filesystem storage
"""
# The combination of O_CREAT and O_EXCL makes os.open() raise OSError if
# the file already exists before it's opened.
OS_OPEN_FLAGS = os.O_WRONLY | os.O_CREAT | os.O_EXCL | getattr(os, "O_BINARY", 0)
def __init__(
self,
location=None,
base_url=None,
file_permissions_mode=None,
directory_permissions_mode=None,
):
self._location = location
self._base_url = base_url
self._file_permissions_mode = file_permissions_mode
self._directory_permissions_mode = directory_permissions_mode
setting_changed.connect(self._clear_cached_properties)
@cached_property
def base_location(self):
return self._value_or_setting(self._location, settings.MEDIA_ROOT)
@cached_property
def location(self):
return os.path.abspath(self.base_location)
@cached_property
def base_url(self):
if self._base_url is not None and not self._base_url.endswith("/"):
self._base_url += "/"
return self._value_or_setting(self._base_url, settings.MEDIA_URL)
@cached_property
def file_permissions_mode(self):
return self._value_or_setting(
self._file_permissions_mode, settings.FILE_UPLOAD_PERMISSIONS
)
@cached_property
def directory_permissions_mode(self):
return self._value_or_setting(
self._directory_permissions_mode, settings.FILE_UPLOAD_DIRECTORY_PERMISSIONS
)
def _open(self, name, mode="rb"):
return File(open(self.path(name), mode))
def _save(self, name, content):
full_path = self.path(name)
# Create any intermediate directories that do not exist.
directory = os.path.dirname(full_path)
try:
if self.directory_permissions_mode is not None:
# Set the umask because os.makedirs() doesn't apply the "mode"
# argument to intermediate-level directories.
old_umask = os.umask(0o777 & ~self.directory_permissions_mode)
try:
os.makedirs(
directory, self.directory_permissions_mode, exist_ok=True
)
finally:
os.umask(old_umask)
else:
os.makedirs(directory, exist_ok=True)
except FileExistsError:
raise FileExistsError("%s exists and is not a directory." % directory)
# There's a potential race condition between get_available_name and
# saving the file; it's possible that two threads might return the
# same name, at which point all sorts of fun happens. So we need to
# try to create the file, but if it already exists we have to go back
# to get_available_name() and try again.
while True:
try:
# This file has a file path that we can move.
if hasattr(content, "temporary_file_path"):
file_move_safe(content.temporary_file_path(), full_path)
# This is a normal uploadedfile that we can stream.
else:
# The current umask value is masked out by os.open!
fd = os.open(full_path, self.OS_OPEN_FLAGS, 0o666)
_file = None
try:
locks.lock(fd, locks.LOCK_EX)
for chunk in content.chunks():
if _file is None:
mode = "wb" if isinstance(chunk, bytes) else "wt"
_file = os.fdopen(fd, mode)
_file.write(chunk)
finally:
locks.unlock(fd)
if _file is not None:
_file.close()
else:
os.close(fd)
except FileExistsError:
# A new name is needed if the file exists.
name = self.get_available_name(name)
full_path = self.path(name)
else:
# OK, the file save worked. Break out of the loop.
break
if self.file_permissions_mode is not None:
os.chmod(full_path, self.file_permissions_mode)
# Ensure the saved path is always relative to the storage root.
name = os.path.relpath(full_path, self.location)
# Ensure the moved file has the same gid as the storage root.
self._ensure_location_group_id(full_path)
# Store filenames with forward slashes, even on Windows.
return str(name).replace("\\", "/")
def _ensure_location_group_id(self, full_path):
if os.name == "posix":
file_gid = os.stat(full_path).st_gid
location_gid = os.stat(self.location).st_gid
if file_gid != location_gid:
try:
os.chown(full_path, uid=-1, gid=location_gid)
except PermissionError:
pass
def delete(self, name):
if not name:
raise ValueError("The name must be given to delete().")
name = self.path(name)
# If the file or directory exists, delete it from the filesystem.
try:
if os.path.isdir(name):
os.rmdir(name)
else:
os.remove(name)
except FileNotFoundError:
# FileNotFoundError is raised if the file or directory was removed
# concurrently.
pass
def exists(self, name):
return os.path.lexists(self.path(name))
def listdir(self, path):
path = self.path(path)
directories, files = [], []
with os.scandir(path) as entries:
for entry in entries:
if entry.is_dir():
directories.append(entry.name)
else:
files.append(entry.name)
return directories, files
def path(self, name):
return safe_join(self.location, name)
def size(self, name):
return os.path.getsize(self.path(name))
def url(self, name):
if self.base_url is None:
raise ValueError("This file is not accessible via a URL.")
url = filepath_to_uri(name)
if url is not None:
url = url.lstrip("/")
return urljoin(self.base_url, url)
def _datetime_from_timestamp(self, ts):
"""
If timezone support is enabled, make an aware datetime object in UTC;
otherwise make a naive one in the local timezone.
"""
tz = timezone.utc if settings.USE_TZ else None
return datetime.fromtimestamp(ts, tz=tz)
def get_accessed_time(self, name):
return self._datetime_from_timestamp(os.path.getatime(self.path(name)))
def get_created_time(self, name):
return self._datetime_from_timestamp(os.path.getctime(self.path(name)))
def get_modified_time(self, name):
return self._datetime_from_timestamp(os.path.getmtime(self.path(name)))
|
8ff637ba7ce4f42726c7e4108e3e00a02dcc5215c8c399c56c3605dd09ff161d | class StorageSettingsMixin:
def _clear_cached_properties(self, setting, **kwargs):
"""Reset setting based property values."""
if setting == "MEDIA_ROOT":
self.__dict__.pop("base_location", None)
self.__dict__.pop("location", None)
elif setting == "MEDIA_URL":
self.__dict__.pop("base_url", None)
elif setting == "FILE_UPLOAD_PERMISSIONS":
self.__dict__.pop("file_permissions_mode", None)
elif setting == "FILE_UPLOAD_DIRECTORY_PERMISSIONS":
self.__dict__.pop("directory_permissions_mode", None)
def _value_or_setting(self, value, setting):
return setting if value is None else value
|
357950b6180be45ec19b3650a96e325fe0e45d7c5e65cca777f07bb72abb20d4 | """SMTP email backend class."""
import smtplib
import ssl
import threading
from django.conf import settings
from django.core.mail.backends.base import BaseEmailBackend
from django.core.mail.message import sanitize_address
from django.core.mail.utils import DNS_NAME
from django.utils.functional import cached_property
class EmailBackend(BaseEmailBackend):
"""
A wrapper that manages the SMTP network connection.
"""
def __init__(
self,
host=None,
port=None,
username=None,
password=None,
use_tls=None,
fail_silently=False,
use_ssl=None,
timeout=None,
ssl_keyfile=None,
ssl_certfile=None,
**kwargs,
):
super().__init__(fail_silently=fail_silently)
self.host = host or settings.EMAIL_HOST
self.port = port or settings.EMAIL_PORT
self.username = settings.EMAIL_HOST_USER if username is None else username
self.password = settings.EMAIL_HOST_PASSWORD if password is None else password
self.use_tls = settings.EMAIL_USE_TLS if use_tls is None else use_tls
self.use_ssl = settings.EMAIL_USE_SSL if use_ssl is None else use_ssl
self.timeout = settings.EMAIL_TIMEOUT if timeout is None else timeout
self.ssl_keyfile = (
settings.EMAIL_SSL_KEYFILE if ssl_keyfile is None else ssl_keyfile
)
self.ssl_certfile = (
settings.EMAIL_SSL_CERTFILE if ssl_certfile is None else ssl_certfile
)
if self.use_ssl and self.use_tls:
raise ValueError(
"EMAIL_USE_TLS/EMAIL_USE_SSL are mutually exclusive, so only set "
"one of those settings to True."
)
self.connection = None
self._lock = threading.RLock()
@property
def connection_class(self):
return smtplib.SMTP_SSL if self.use_ssl else smtplib.SMTP
@cached_property
def ssl_context(self):
ssl_context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLS_CLIENT)
if self.ssl_certfile or self.ssl_keyfile:
ssl_context.load_cert_chain(self.ssl_certfile, self.ssl_keyfile)
return ssl_context
def open(self):
"""
Ensure an open connection to the email server. Return whether or not a
new connection was required (True or False) or None if an exception
passed silently.
"""
if self.connection:
# Nothing to do if the connection is already open.
return False
# If local_hostname is not specified, socket.getfqdn() gets used.
# For performance, we use the cached FQDN for local_hostname.
connection_params = {"local_hostname": DNS_NAME.get_fqdn()}
if self.timeout is not None:
connection_params["timeout"] = self.timeout
if self.use_ssl:
connection_params["context"] = self.ssl_context
try:
self.connection = self.connection_class(
self.host, self.port, **connection_params
)
# TLS/SSL are mutually exclusive, so only attempt TLS over
# non-secure connections.
if not self.use_ssl and self.use_tls:
self.connection.starttls(context=self.ssl_context)
if self.username and self.password:
self.connection.login(self.username, self.password)
return True
except OSError:
if not self.fail_silently:
raise
def close(self):
"""Close the connection to the email server."""
if self.connection is None:
return
try:
try:
self.connection.quit()
except (ssl.SSLError, smtplib.SMTPServerDisconnected):
# This happens when calling quit() on a TLS connection
# sometimes, or when the connection was already disconnected
# by the server.
self.connection.close()
except smtplib.SMTPException:
if self.fail_silently:
return
raise
finally:
self.connection = None
def send_messages(self, email_messages):
"""
Send one or more EmailMessage objects and return the number of email
messages sent.
"""
if not email_messages:
return 0
with self._lock:
new_conn_created = self.open()
if not self.connection or new_conn_created is None:
# We failed silently on open().
# Trying to send would be pointless.
return 0
num_sent = 0
for message in email_messages:
sent = self._send(message)
if sent:
num_sent += 1
if new_conn_created:
self.close()
return num_sent
def _send(self, email_message):
"""A helper method that does the actual sending."""
if not email_message.recipients():
return False
encoding = email_message.encoding or settings.DEFAULT_CHARSET
from_email = sanitize_address(email_message.from_email, encoding)
recipients = [
sanitize_address(addr, encoding) for addr in email_message.recipients()
]
message = email_message.message()
try:
self.connection.sendmail(
from_email, recipients, message.as_bytes(linesep="\r\n")
)
except smtplib.SMTPException:
if not self.fail_silently:
raise
return False
return True
|
279d1bb2bf2b2e3a7fcf37558925430dc4cb7c3918db57c4528caa0a07876ae2 | from django.db.models import Transform
from django.db.models.lookups import PostgresOperatorLookup
from django.db.models.sql.query import Query
from .search import SearchVector, SearchVectorExact, SearchVectorField
class DataContains(PostgresOperatorLookup):
lookup_name = "contains"
postgres_operator = "@>"
class ContainedBy(PostgresOperatorLookup):
lookup_name = "contained_by"
postgres_operator = "<@"
class Overlap(PostgresOperatorLookup):
lookup_name = "overlap"
postgres_operator = "&&"
def get_prep_lookup(self):
from .expressions import ArraySubquery
if isinstance(self.rhs, Query):
self.rhs = ArraySubquery(self.rhs)
return super().get_prep_lookup()
class HasKey(PostgresOperatorLookup):
lookup_name = "has_key"
postgres_operator = "?"
prepare_rhs = False
class HasKeys(PostgresOperatorLookup):
lookup_name = "has_keys"
postgres_operator = "?&"
def get_prep_lookup(self):
return [str(item) for item in self.rhs]
class HasAnyKeys(HasKeys):
lookup_name = "has_any_keys"
postgres_operator = "?|"
class Unaccent(Transform):
bilateral = True
lookup_name = "unaccent"
function = "UNACCENT"
class SearchLookup(SearchVectorExact):
lookup_name = "search"
def process_lhs(self, qn, connection):
if not isinstance(self.lhs.output_field, SearchVectorField):
config = getattr(self.rhs, "config", None)
self.lhs = SearchVector(self.lhs, config=config)
lhs, lhs_params = super().process_lhs(qn, connection)
return lhs, lhs_params
class TrigramSimilar(PostgresOperatorLookup):
lookup_name = "trigram_similar"
postgres_operator = "%%"
class TrigramWordSimilar(PostgresOperatorLookup):
lookup_name = "trigram_word_similar"
postgres_operator = "%%>"
class TrigramStrictWordSimilar(PostgresOperatorLookup):
lookup_name = "trigram_strict_word_similar"
postgres_operator = "%%>>"
|
b66924a2242d7537418e8a54471f5a067d34cd4a7bf30135426fa1eeb9659eb4 | import warnings
from django.contrib.postgres.indexes import OpClass
from django.core.exceptions import ValidationError
from django.db import DEFAULT_DB_ALIAS, NotSupportedError
from django.db.backends.ddl_references import Expressions, Statement, Table
from django.db.models import BaseConstraint, Deferrable, F, Q
from django.db.models.expressions import Exists, ExpressionList
from django.db.models.indexes import IndexExpression
from django.db.models.lookups import PostgresOperatorLookup
from django.db.models.sql import Query
from django.utils.deprecation import RemovedInDjango50Warning
__all__ = ["ExclusionConstraint"]
class ExclusionConstraintExpression(IndexExpression):
template = "%(expressions)s WITH %(operator)s"
class ExclusionConstraint(BaseConstraint):
template = (
"CONSTRAINT %(name)s EXCLUDE USING %(index_type)s "
"(%(expressions)s)%(include)s%(where)s%(deferrable)s"
)
def __init__(
self,
*,
name,
expressions,
index_type=None,
condition=None,
deferrable=None,
include=None,
opclasses=(),
violation_error_message=None,
):
if index_type and index_type.lower() not in {"gist", "spgist"}:
raise ValueError(
"Exclusion constraints only support GiST or SP-GiST indexes."
)
if not expressions:
raise ValueError(
"At least one expression is required to define an exclusion "
"constraint."
)
if not all(
isinstance(expr, (list, tuple)) and len(expr) == 2 for expr in expressions
):
raise ValueError("The expressions must be a list of 2-tuples.")
if not isinstance(condition, (type(None), Q)):
raise ValueError("ExclusionConstraint.condition must be a Q instance.")
if not isinstance(deferrable, (type(None), Deferrable)):
raise ValueError(
"ExclusionConstraint.deferrable must be a Deferrable instance."
)
if not isinstance(include, (type(None), list, tuple)):
raise ValueError("ExclusionConstraint.include must be a list or tuple.")
if not isinstance(opclasses, (list, tuple)):
raise ValueError("ExclusionConstraint.opclasses must be a list or tuple.")
if opclasses and len(expressions) != len(opclasses):
raise ValueError(
"ExclusionConstraint.expressions and "
"ExclusionConstraint.opclasses must have the same number of "
"elements."
)
self.expressions = expressions
self.index_type = index_type or "GIST"
self.condition = condition
self.deferrable = deferrable
self.include = tuple(include) if include else ()
self.opclasses = opclasses
if self.opclasses:
warnings.warn(
"The opclasses argument is deprecated in favor of using "
"django.contrib.postgres.indexes.OpClass in "
"ExclusionConstraint.expressions.",
category=RemovedInDjango50Warning,
stacklevel=2,
)
super().__init__(name=name, violation_error_message=violation_error_message)
def _get_expressions(self, schema_editor, query):
expressions = []
for idx, (expression, operator) in enumerate(self.expressions):
if isinstance(expression, str):
expression = F(expression)
try:
expression = OpClass(expression, self.opclasses[idx])
except IndexError:
pass
expression = ExclusionConstraintExpression(expression, operator=operator)
expression.set_wrapper_classes(schema_editor.connection)
expressions.append(expression)
return ExpressionList(*expressions).resolve_expression(query)
def _get_condition_sql(self, compiler, schema_editor, query):
if self.condition is None:
return None
where = query.build_where(self.condition)
sql, params = where.as_sql(compiler, schema_editor.connection)
return sql % tuple(schema_editor.quote_value(p) for p in params)
def constraint_sql(self, model, schema_editor):
query = Query(model, alias_cols=False)
compiler = query.get_compiler(connection=schema_editor.connection)
expressions = self._get_expressions(schema_editor, query)
table = model._meta.db_table
condition = self._get_condition_sql(compiler, schema_editor, query)
include = [
model._meta.get_field(field_name).column for field_name in self.include
]
return Statement(
self.template,
table=Table(table, schema_editor.quote_name),
name=schema_editor.quote_name(self.name),
index_type=self.index_type,
expressions=Expressions(
table, expressions, compiler, schema_editor.quote_value
),
where=" WHERE (%s)" % condition if condition else "",
include=schema_editor._index_include_sql(model, include),
deferrable=schema_editor._deferrable_constraint_sql(self.deferrable),
)
def create_sql(self, model, schema_editor):
self.check_supported(schema_editor)
return Statement(
"ALTER TABLE %(table)s ADD %(constraint)s",
table=Table(model._meta.db_table, schema_editor.quote_name),
constraint=self.constraint_sql(model, schema_editor),
)
def remove_sql(self, model, schema_editor):
return schema_editor._delete_constraint_sql(
schema_editor.sql_delete_check,
model,
schema_editor.quote_name(self.name),
)
def check_supported(self, schema_editor):
if (
self.include
and self.index_type.lower() == "spgist"
and not schema_editor.connection.features.supports_covering_spgist_indexes
):
raise NotSupportedError(
"Covering exclusion constraints using an SP-GiST index "
"require PostgreSQL 14+."
)
def deconstruct(self):
path, args, kwargs = super().deconstruct()
kwargs["expressions"] = self.expressions
if self.condition is not None:
kwargs["condition"] = self.condition
if self.index_type.lower() != "gist":
kwargs["index_type"] = self.index_type
if self.deferrable:
kwargs["deferrable"] = self.deferrable
if self.include:
kwargs["include"] = self.include
if self.opclasses:
kwargs["opclasses"] = self.opclasses
return path, args, kwargs
def __eq__(self, other):
if isinstance(other, self.__class__):
return (
self.name == other.name
and self.index_type == other.index_type
and self.expressions == other.expressions
and self.condition == other.condition
and self.deferrable == other.deferrable
and self.include == other.include
and self.opclasses == other.opclasses
and self.violation_error_message == other.violation_error_message
)
return super().__eq__(other)
def __repr__(self):
return "<%s: index_type=%s expressions=%s name=%s%s%s%s%s>" % (
self.__class__.__qualname__,
repr(self.index_type),
repr(self.expressions),
repr(self.name),
"" if self.condition is None else " condition=%s" % self.condition,
"" if self.deferrable is None else " deferrable=%r" % self.deferrable,
"" if not self.include else " include=%s" % repr(self.include),
"" if not self.opclasses else " opclasses=%s" % repr(self.opclasses),
)
def validate(self, model, instance, exclude=None, using=DEFAULT_DB_ALIAS):
queryset = model._default_manager.using(using)
replacement_map = instance._get_field_value_map(
meta=model._meta, exclude=exclude
)
replacements = {F(field): value for field, value in replacement_map.items()}
lookups = []
for idx, (expression, operator) in enumerate(self.expressions):
if isinstance(expression, str):
expression = F(expression)
if exclude:
if isinstance(expression, F):
if expression.name in exclude:
return
else:
for expr in expression.flatten():
if isinstance(expr, F) and expr.name in exclude:
return
rhs_expression = expression.replace_expressions(replacements)
# Remove OpClass because it only has sense during the constraint
# creation.
if isinstance(expression, OpClass):
expression = expression.get_source_expressions()[0]
if isinstance(rhs_expression, OpClass):
rhs_expression = rhs_expression.get_source_expressions()[0]
lookup = PostgresOperatorLookup(lhs=expression, rhs=rhs_expression)
lookup.postgres_operator = operator
lookups.append(lookup)
queryset = queryset.filter(*lookups)
model_class_pk = instance._get_pk_val(model._meta)
if not instance._state.adding and model_class_pk is not None:
queryset = queryset.exclude(pk=model_class_pk)
if not self.condition:
if queryset.exists():
raise ValidationError(self.get_violation_error_message())
else:
if (self.condition & Exists(queryset.filter(self.condition))).check(
replacement_map, using=using
):
raise ValidationError(self.get_violation_error_message())
|
8aa11871de02922bd92278152bd1ea1291f21d9b36f5a05ce605df4d8328a29a | import unicodedata
from django import forms
from django.contrib.auth import authenticate, get_user_model, password_validation
from django.contrib.auth.hashers import UNUSABLE_PASSWORD_PREFIX, identify_hasher
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import ValidationError
from django.core.mail import EmailMultiAlternatives
from django.template import loader
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from django.utils.text import capfirst
from django.utils.translation import gettext
from django.utils.translation import gettext_lazy as _
UserModel = get_user_model()
def _unicode_ci_compare(s1, s2):
"""
Perform case-insensitive comparison of two identifiers, using the
recommended algorithm from Unicode Technical Report 36, section
2.11.2(B)(2).
"""
return (
unicodedata.normalize("NFKC", s1).casefold()
== unicodedata.normalize("NFKC", s2).casefold()
)
class ReadOnlyPasswordHashWidget(forms.Widget):
template_name = "auth/widgets/read_only_password_hash.html"
read_only = True
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
summary = []
if not value or value.startswith(UNUSABLE_PASSWORD_PREFIX):
summary.append({"label": gettext("No password set.")})
else:
try:
hasher = identify_hasher(value)
except ValueError:
summary.append(
{
"label": gettext(
"Invalid password format or unknown hashing algorithm."
)
}
)
else:
for key, value_ in hasher.safe_summary(value).items():
summary.append({"label": gettext(key), "value": value_})
context["summary"] = summary
return context
def id_for_label(self, id_):
return None
class ReadOnlyPasswordHashField(forms.Field):
widget = ReadOnlyPasswordHashWidget
def __init__(self, *args, **kwargs):
kwargs.setdefault("required", False)
kwargs.setdefault("disabled", True)
super().__init__(*args, **kwargs)
class UsernameField(forms.CharField):
def to_python(self, value):
return unicodedata.normalize("NFKC", super().to_python(value))
def widget_attrs(self, widget):
return {
**super().widget_attrs(widget),
"autocapitalize": "none",
"autocomplete": "username",
}
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
"password_mismatch": _("The two password fields didn’t match."),
}
password1 = forms.CharField(
label=_("Password"),
strip=False,
widget=forms.PasswordInput(attrs={"autocomplete": "new-password"}),
help_text=password_validation.password_validators_help_text_html(),
)
password2 = forms.CharField(
label=_("Password confirmation"),
widget=forms.PasswordInput(attrs={"autocomplete": "new-password"}),
strip=False,
help_text=_("Enter the same password as before, for verification."),
)
class Meta:
model = User
fields = ("username",)
field_classes = {"username": UsernameField}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self._meta.model.USERNAME_FIELD in self.fields:
self.fields[self._meta.model.USERNAME_FIELD].widget.attrs[
"autofocus"
] = True
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise ValidationError(
self.error_messages["password_mismatch"],
code="password_mismatch",
)
return password2
def _post_clean(self):
super()._post_clean()
# Validate the password after self.instance is updated with form data
# by super().
password = self.cleaned_data.get("password2")
if password:
try:
password_validation.validate_password(password, self.instance)
except ValidationError as error:
self.add_error("password2", error)
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
password = ReadOnlyPasswordHashField(
label=_("Password"),
help_text=_(
"Raw passwords are not stored, so there is no way to see this "
"user’s password, but you can change the password using "
'<a href="{}">this form</a>.'
),
)
class Meta:
model = User
fields = "__all__"
field_classes = {"username": UsernameField}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
password = self.fields.get("password")
if password:
password.help_text = password.help_text.format(
f"../../{self.instance.pk}/password/"
)
user_permissions = self.fields.get("user_permissions")
if user_permissions:
user_permissions.queryset = user_permissions.queryset.select_related(
"content_type"
)
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = UsernameField(widget=forms.TextInput(attrs={"autofocus": True}))
password = forms.CharField(
label=_("Password"),
strip=False,
widget=forms.PasswordInput(attrs={"autocomplete": "current-password"}),
)
error_messages = {
"invalid_login": _(
"Please enter a correct %(username)s and password. Note that both "
"fields may be case-sensitive."
),
"inactive": _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
The 'request' parameter is set for custom auth use by subclasses.
The form data comes in via the standard 'data' kwarg.
"""
self.request = request
self.user_cache = None
super().__init__(*args, **kwargs)
# Set the max length and label for the "username" field.
self.username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)
username_max_length = self.username_field.max_length or 254
self.fields["username"].max_length = username_max_length
self.fields["username"].widget.attrs["maxlength"] = username_max_length
if self.fields["username"].label is None:
self.fields["username"].label = capfirst(self.username_field.verbose_name)
def clean(self):
username = self.cleaned_data.get("username")
password = self.cleaned_data.get("password")
if username is not None and password:
self.user_cache = authenticate(
self.request, username=username, password=password
)
if self.user_cache is None:
raise self.get_invalid_login_error()
else:
self.confirm_login_allowed(self.user_cache)
return self.cleaned_data
def confirm_login_allowed(self, user):
"""
Controls whether the given User may log in. This is a policy setting,
independent of end-user authentication. This default behavior is to
allow login by active users, and reject login by inactive users.
If the given user cannot log in, this method should raise a
``ValidationError``.
If the given user may log in, this method should return None.
"""
if not user.is_active:
raise ValidationError(
self.error_messages["inactive"],
code="inactive",
)
def get_user(self):
return self.user_cache
def get_invalid_login_error(self):
return ValidationError(
self.error_messages["invalid_login"],
code="invalid_login",
params={"username": self.username_field.verbose_name},
)
class PasswordResetForm(forms.Form):
email = forms.EmailField(
label=_("Email"),
max_length=254,
widget=forms.EmailInput(attrs={"autocomplete": "email"}),
)
def send_mail(
self,
subject_template_name,
email_template_name,
context,
from_email,
to_email,
html_email_template_name=None,
):
"""
Send a django.core.mail.EmailMultiAlternatives to `to_email`.
"""
subject = loader.render_to_string(subject_template_name, context)
# Email subject *must not* contain newlines
subject = "".join(subject.splitlines())
body = loader.render_to_string(email_template_name, context)
email_message = EmailMultiAlternatives(subject, body, from_email, [to_email])
if html_email_template_name is not None:
html_email = loader.render_to_string(html_email_template_name, context)
email_message.attach_alternative(html_email, "text/html")
email_message.send()
def get_users(self, email):
"""Given an email, return matching user(s) who should receive a reset.
This allows subclasses to more easily customize the default policies
that prevent inactive users and users with unusable passwords from
resetting their password.
"""
email_field_name = UserModel.get_email_field_name()
active_users = UserModel._default_manager.filter(
**{
"%s__iexact" % email_field_name: email,
"is_active": True,
}
)
return (
u
for u in active_users
if u.has_usable_password()
and _unicode_ci_compare(email, getattr(u, email_field_name))
)
def save(
self,
domain_override=None,
subject_template_name="registration/password_reset_subject.txt",
email_template_name="registration/password_reset_email.html",
use_https=False,
token_generator=default_token_generator,
from_email=None,
request=None,
html_email_template_name=None,
extra_email_context=None,
):
"""
Generate a one-use only link for resetting password and send it to the
user.
"""
email = self.cleaned_data["email"]
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
email_field_name = UserModel.get_email_field_name()
for user in self.get_users(email):
user_email = getattr(user, email_field_name)
context = {
"email": user_email,
"domain": domain,
"site_name": site_name,
"uid": urlsafe_base64_encode(force_bytes(user.pk)),
"user": user,
"token": token_generator.make_token(user),
"protocol": "https" if use_https else "http",
**(extra_email_context or {}),
}
self.send_mail(
subject_template_name,
email_template_name,
context,
from_email,
user_email,
html_email_template_name=html_email_template_name,
)
class SetPasswordForm(forms.Form):
"""
A form that lets a user set their password without entering the old
password
"""
error_messages = {
"password_mismatch": _("The two password fields didn’t match."),
}
new_password1 = forms.CharField(
label=_("New password"),
widget=forms.PasswordInput(attrs={"autocomplete": "new-password"}),
strip=False,
help_text=password_validation.password_validators_help_text_html(),
)
new_password2 = forms.CharField(
label=_("New password confirmation"),
strip=False,
widget=forms.PasswordInput(attrs={"autocomplete": "new-password"}),
)
def __init__(self, user, *args, **kwargs):
self.user = user
super().__init__(*args, **kwargs)
def clean_new_password2(self):
password1 = self.cleaned_data.get("new_password1")
password2 = self.cleaned_data.get("new_password2")
if password1 and password2 and password1 != password2:
raise ValidationError(
self.error_messages["password_mismatch"],
code="password_mismatch",
)
password_validation.validate_password(password2, self.user)
return password2
def save(self, commit=True):
password = self.cleaned_data["new_password1"]
self.user.set_password(password)
if commit:
self.user.save()
return self.user
class PasswordChangeForm(SetPasswordForm):
"""
A form that lets a user change their password by entering their old
password.
"""
error_messages = {
**SetPasswordForm.error_messages,
"password_incorrect": _(
"Your old password was entered incorrectly. Please enter it again."
),
}
old_password = forms.CharField(
label=_("Old password"),
strip=False,
widget=forms.PasswordInput(
attrs={"autocomplete": "current-password", "autofocus": True}
),
)
field_order = ["old_password", "new_password1", "new_password2"]
def clean_old_password(self):
"""
Validate that the old_password field is correct.
"""
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise ValidationError(
self.error_messages["password_incorrect"],
code="password_incorrect",
)
return old_password
class AdminPasswordChangeForm(forms.Form):
"""
A form used to change the password of a user in the admin interface.
"""
error_messages = {
"password_mismatch": _("The two password fields didn’t match."),
}
required_css_class = "required"
password1 = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput(
attrs={"autocomplete": "new-password", "autofocus": True}
),
strip=False,
help_text=password_validation.password_validators_help_text_html(),
)
password2 = forms.CharField(
label=_("Password (again)"),
widget=forms.PasswordInput(attrs={"autocomplete": "new-password"}),
strip=False,
help_text=_("Enter the same password as before, for verification."),
)
def __init__(self, user, *args, **kwargs):
self.user = user
super().__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise ValidationError(
self.error_messages["password_mismatch"],
code="password_mismatch",
)
password_validation.validate_password(password2, self.user)
return password2
def save(self, commit=True):
"""Save the new password."""
password = self.cleaned_data["password1"]
self.user.set_password(password)
if commit:
self.user.save()
return self.user
@property
def changed_data(self):
data = super().changed_data
for name in self.fields:
if name not in data:
return []
return ["password"]
|
3651385fc283420f2a9015dce2073c2f38647303c38a1eaa94e65671e5e632c1 | import functools
import itertools
from collections import defaultdict
from asgiref.sync import sync_to_async
from django.contrib.contenttypes.models import ContentType
from django.core import checks
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, models, router, transaction
from django.db.models import DO_NOTHING, ForeignObject, ForeignObjectRel
from django.db.models.base import ModelBase, make_foreign_order_accessors
from django.db.models.fields.mixins import FieldCacheMixin
from django.db.models.fields.related import (
ReverseManyToOneDescriptor,
lazy_related_operation,
)
from django.db.models.query_utils import PathInfo
from django.db.models.sql import AND
from django.db.models.sql.where import WhereNode
from django.db.models.utils import AltersData
from django.utils.functional import cached_property
class GenericForeignKey(FieldCacheMixin):
"""
Provide a generic many-to-one relation through the ``content_type`` and
``object_id`` fields.
This class also doubles as an accessor to the related object (similar to
ForwardManyToOneDescriptor) by adding itself as a model attribute.
"""
# Field flags
auto_created = False
concrete = False
editable = False
hidden = False
is_relation = True
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
related_model = None
remote_field = None
def __init__(
self, ct_field="content_type", fk_field="object_id", for_concrete_model=True
):
self.ct_field = ct_field
self.fk_field = fk_field
self.for_concrete_model = for_concrete_model
self.editable = False
self.rel = None
self.column = None
def contribute_to_class(self, cls, name, **kwargs):
self.name = name
self.model = cls
cls._meta.add_field(self, private=True)
setattr(cls, name, self)
def get_filter_kwargs_for_object(self, obj):
"""See corresponding method on Field"""
return {
self.fk_field: getattr(obj, self.fk_field),
self.ct_field: getattr(obj, self.ct_field),
}
def get_forward_related_filter(self, obj):
"""See corresponding method on RelatedField"""
return {
self.fk_field: obj.pk,
self.ct_field: ContentType.objects.get_for_model(obj).pk,
}
def __str__(self):
model = self.model
return "%s.%s" % (model._meta.label, self.name)
def check(self, **kwargs):
return [
*self._check_field_name(),
*self._check_object_id_field(),
*self._check_content_type_field(),
]
def _check_field_name(self):
if self.name.endswith("_"):
return [
checks.Error(
"Field names must not end with an underscore.",
obj=self,
id="fields.E001",
)
]
else:
return []
def _check_object_id_field(self):
try:
self.model._meta.get_field(self.fk_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey object ID references the "
"nonexistent field '%s'." % self.fk_field,
obj=self,
id="contenttypes.E001",
)
]
else:
return []
def _check_content_type_field(self):
"""
Check if field named `field_name` in model `model` exists and is a
valid content_type field (is a ForeignKey to ContentType).
"""
try:
field = self.model._meta.get_field(self.ct_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey content type references the "
"nonexistent field '%s.%s'."
% (self.model._meta.object_name, self.ct_field),
obj=self,
id="contenttypes.E002",
)
]
else:
if not isinstance(field, models.ForeignKey):
return [
checks.Error(
"'%s.%s' is not a ForeignKey."
% (self.model._meta.object_name, self.ct_field),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id="contenttypes.E003",
)
]
elif field.remote_field.model != ContentType:
return [
checks.Error(
"'%s.%s' is not a ForeignKey to 'contenttypes.ContentType'."
% (self.model._meta.object_name, self.ct_field),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id="contenttypes.E004",
)
]
else:
return []
def get_cache_name(self):
return self.name
def get_content_type(self, obj=None, id=None, using=None):
if obj is not None:
return ContentType.objects.db_manager(obj._state.db).get_for_model(
obj, for_concrete_model=self.for_concrete_model
)
elif id is not None:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is not None:
raise ValueError("Custom queryset can't be used for this lookup.")
# For efficiency, group the instances by content type and then do one
# query per model
fk_dict = defaultdict(set)
# We need one instance for each group in order to get the right db:
instance_dict = {}
ct_attname = self.model._meta.get_field(self.ct_field).get_attname()
for instance in instances:
# We avoid looking for values if either ct_id or fkey value is None
ct_id = getattr(instance, ct_attname)
if ct_id is not None:
fk_val = getattr(instance, self.fk_field)
if fk_val is not None:
fk_dict[ct_id].add(fk_val)
instance_dict[ct_id] = instance
ret_val = []
for ct_id, fkeys in fk_dict.items():
instance = instance_dict[ct_id]
ct = self.get_content_type(id=ct_id, using=instance._state.db)
ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys))
# For doing the join in Python, we have to match both the FK val and the
# content type, so we use a callable that returns a (fk, class) pair.
def gfk_key(obj):
ct_id = getattr(obj, ct_attname)
if ct_id is None:
return None
else:
model = self.get_content_type(
id=ct_id, using=obj._state.db
).model_class()
return (
model._meta.pk.get_prep_value(getattr(obj, self.fk_field)),
model,
)
return (
ret_val,
lambda obj: (obj.pk, obj.__class__),
gfk_key,
True,
self.name,
False,
)
def __get__(self, instance, cls=None):
if instance is None:
return self
# Don't use getattr(instance, self.ct_field) here because that might
# reload the same ContentType over and over (#5570). Instead, get the
# content type ID here, and later when the actual instance is needed,
# use ContentType.objects.get_for_id(), which has a global cache.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
pk_val = getattr(instance, self.fk_field)
rel_obj = self.get_cached_value(instance, default=None)
if rel_obj is None and self.is_cached(instance):
return rel_obj
if rel_obj is not None:
ct_match = (
ct_id == self.get_content_type(obj=rel_obj, using=instance._state.db).id
)
pk_match = rel_obj._meta.pk.to_python(pk_val) == rel_obj.pk
if ct_match and pk_match:
return rel_obj
else:
rel_obj = None
if ct_id is not None:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=pk_val)
except ObjectDoesNotExist:
pass
self.set_cached_value(instance, rel_obj)
return rel_obj
def __set__(self, instance, value):
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value.pk
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
self.set_cached_value(instance, value)
class GenericRel(ForeignObjectRel):
"""
Used by GenericRelation to store information about the relation.
"""
def __init__(
self,
field,
to,
related_name=None,
related_query_name=None,
limit_choices_to=None,
):
super().__init__(
field,
to,
related_name=related_query_name or "+",
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
on_delete=DO_NOTHING,
)
class GenericRelation(ForeignObject):
"""
Provide a reverse to a relation created by a GenericForeignKey.
"""
# Field flags
auto_created = False
empty_strings_allowed = False
many_to_many = False
many_to_one = False
one_to_many = True
one_to_one = False
rel_class = GenericRel
mti_inherited = False
def __init__(
self,
to,
object_id_field="object_id",
content_type_field="content_type",
for_concrete_model=True,
related_query_name=None,
limit_choices_to=None,
**kwargs,
):
kwargs["rel"] = self.rel_class(
self,
to,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
)
# Reverse relations are always nullable (Django can't enforce that a
# foreign key on the related model points to this model).
kwargs["null"] = True
kwargs["blank"] = True
kwargs["on_delete"] = models.CASCADE
kwargs["editable"] = False
kwargs["serialize"] = False
# This construct is somewhat of an abuse of ForeignObject. This field
# represents a relation from pk to object_id field. But, this relation
# isn't direct, the join is generated reverse along foreign key. So,
# the from_field is object_id field, to_field is pk because of the
# reverse join.
super().__init__(to, from_fields=[object_id_field], to_fields=[], **kwargs)
self.object_id_field_name = object_id_field
self.content_type_field_name = content_type_field
self.for_concrete_model = for_concrete_model
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_generic_foreign_key_existence(),
]
def _is_matching_generic_foreign_key(self, field):
"""
Return True if field is a GenericForeignKey whose content type and
object id fields correspond to the equivalent attributes on this
GenericRelation.
"""
return (
isinstance(field, GenericForeignKey)
and field.ct_field == self.content_type_field_name
and field.fk_field == self.object_id_field_name
)
def _check_generic_foreign_key_existence(self):
target = self.remote_field.model
if isinstance(target, ModelBase):
fields = target._meta.private_fields
if any(self._is_matching_generic_foreign_key(field) for field in fields):
return []
else:
return [
checks.Error(
"The GenericRelation defines a relation with the model "
"'%s', but that model does not have a GenericForeignKey."
% target._meta.label,
obj=self,
id="contenttypes.E004",
)
]
else:
return []
def resolve_related_fields(self):
self.to_fields = [self.model._meta.pk.name]
return [
(
self.remote_field.model._meta.get_field(self.object_id_field_name),
self.model._meta.pk,
)
]
def _get_path_info_with_parent(self, filtered_relation):
"""
Return the path that joins the current model through any parent models.
The idea is that if you have a GFK defined on a parent model then we
need to join the parent model first, then the child model.
"""
# With an inheritance chain ChildTag -> Tag and Tag defines the
# GenericForeignKey, and a TaggedItem model has a GenericRelation to
# ChildTag, then we need to generate a join from TaggedItem to Tag
# (as Tag.object_id == TaggedItem.pk), and another join from Tag to
# ChildTag (as that is where the relation is to). Do this by first
# generating a join to the parent model, then generating joins to the
# child models.
path = []
opts = self.remote_field.model._meta.concrete_model._meta
parent_opts = opts.get_field(self.object_id_field_name).model._meta
target = parent_opts.pk
path.append(
PathInfo(
from_opts=self.model._meta,
to_opts=parent_opts,
target_fields=(target,),
join_field=self.remote_field,
m2m=True,
direct=False,
filtered_relation=filtered_relation,
)
)
# Collect joins needed for the parent -> child chain. This is easiest
# to do if we collect joins for the child -> parent chain and then
# reverse the direction (call to reverse() and use of
# field.remote_field.get_path_info()).
parent_field_chain = []
while parent_opts != opts:
field = opts.get_ancestor_link(parent_opts.model)
parent_field_chain.append(field)
opts = field.remote_field.model._meta
parent_field_chain.reverse()
for field in parent_field_chain:
path.extend(field.remote_field.path_infos)
return path
def get_path_info(self, filtered_relation=None):
opts = self.remote_field.model._meta
object_id_field = opts.get_field(self.object_id_field_name)
if object_id_field.model != opts.model:
return self._get_path_info_with_parent(filtered_relation)
else:
target = opts.pk
return [
PathInfo(
from_opts=self.model._meta,
to_opts=opts,
target_fields=(target,),
join_field=self.remote_field,
m2m=True,
direct=False,
filtered_relation=filtered_relation,
)
]
def get_reverse_path_info(self, filtered_relation=None):
opts = self.model._meta
from_opts = self.remote_field.model._meta
return [
PathInfo(
from_opts=from_opts,
to_opts=opts,
target_fields=(opts.pk,),
join_field=self,
m2m=False,
direct=False,
filtered_relation=filtered_relation,
)
]
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return str([instance.pk for instance in qs])
def contribute_to_class(self, cls, name, **kwargs):
kwargs["private_only"] = True
super().contribute_to_class(cls, name, **kwargs)
self.model = cls
# Disable the reverse relation for fields inherited by subclasses of a
# model in multi-table inheritance. The reverse relation points to the
# field of the base model.
if self.mti_inherited:
self.remote_field.related_name = "+"
self.remote_field.related_query_name = None
setattr(cls, self.name, ReverseGenericManyToOneDescriptor(self.remote_field))
# Add get_RELATED_order() and set_RELATED_order() to the model this
# field belongs to, if the model on the other end of this relation
# is ordered with respect to its corresponding GenericForeignKey.
if not cls._meta.abstract:
def make_generic_foreign_order_accessors(related_model, model):
if self._is_matching_generic_foreign_key(
model._meta.order_with_respect_to
):
make_foreign_order_accessors(model, related_model)
lazy_related_operation(
make_generic_foreign_order_accessors,
self.model,
self.remote_field.model,
)
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def get_content_type(self):
"""
Return the content type associated with this field's model.
"""
return ContentType.objects.get_for_model(
self.model, for_concrete_model=self.for_concrete_model
)
def get_extra_restriction(self, alias, remote_alias):
field = self.remote_field.model._meta.get_field(self.content_type_field_name)
contenttype_pk = self.get_content_type().pk
lookup = field.get_lookup("exact")(field.get_col(remote_alias), contenttype_pk)
return WhereNode([lookup], connector=AND)
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
return self.remote_field.model._base_manager.db_manager(using).filter(
**{
"%s__pk"
% self.content_type_field_name: ContentType.objects.db_manager(using)
.get_for_model(self.model, for_concrete_model=self.for_concrete_model)
.pk,
"%s__in" % self.object_id_field_name: [obj.pk for obj in objs],
}
)
class ReverseGenericManyToOneDescriptor(ReverseManyToOneDescriptor):
"""
Accessor to the related objects manager on the one-to-many relation created
by GenericRelation.
In the example::
class Post(Model):
comments = GenericRelation(Comment)
``post.comments`` is a ReverseGenericManyToOneDescriptor instance.
"""
@cached_property
def related_manager_cls(self):
return create_generic_related_manager(
self.rel.model._default_manager.__class__,
self.rel,
)
def create_generic_related_manager(superclass, rel):
"""
Factory function to create a manager that subclasses another manager
(generally the default manager of a given model) and adds behaviors
specific to generic relations.
"""
class GenericRelatedObjectManager(superclass, AltersData):
def __init__(self, instance=None):
super().__init__()
self.instance = instance
self.model = rel.model
self.get_content_type = functools.partial(
ContentType.objects.db_manager(instance._state.db).get_for_model,
for_concrete_model=rel.field.for_concrete_model,
)
self.content_type = self.get_content_type(instance)
self.content_type_field_name = rel.field.content_type_field_name
self.object_id_field_name = rel.field.object_id_field_name
self.prefetch_cache_name = rel.field.attname
self.pk_val = instance.pk
self.core_filters = {
"%s__pk" % self.content_type_field_name: self.content_type.id,
self.object_id_field_name: self.pk_val,
}
def __call__(self, *, manager):
manager = getattr(self.model, manager)
manager_class = create_generic_related_manager(manager.__class__, rel)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def __str__(self):
return repr(self)
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
db = self._db or router.db_for_read(self.model, instance=self.instance)
return queryset.using(db).filter(**self.core_filters)
def _remove_prefetched_objects(self):
try:
self.instance._prefetched_objects_cache.pop(self.prefetch_cache_name)
except (AttributeError, KeyError):
pass # nothing to clear from cache
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
queryset = super().get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super().get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
# Group instances by content types.
content_type_queries = [
models.Q.create(
[
(f"{self.content_type_field_name}__pk", content_type_id),
(f"{self.object_id_field_name}__in", {obj.pk for obj in objs}),
]
)
for content_type_id, objs in itertools.groupby(
sorted(instances, key=lambda obj: self.get_content_type(obj).pk),
lambda obj: self.get_content_type(obj).pk,
)
]
query = models.Q.create(content_type_queries, connector=models.Q.OR)
# We (possibly) need to convert object IDs to the type of the
# instances' PK in order to match up instances:
object_id_converter = instances[0]._meta.pk.to_python
content_type_id_field_name = "%s_id" % self.content_type_field_name
return (
queryset.filter(query),
lambda relobj: (
object_id_converter(getattr(relobj, self.object_id_field_name)),
getattr(relobj, content_type_id_field_name),
),
lambda obj: (obj.pk, self.get_content_type(obj).pk),
False,
self.prefetch_cache_name,
False,
)
def add(self, *objs, bulk=True):
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError(
"'%s' instance expected, got %r"
% (self.model._meta.object_name, obj)
)
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
if bulk:
pks = []
for obj in objs:
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first." % obj
)
check_and_update_obj(obj)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(
**{
self.content_type_field_name: self.content_type,
self.object_id_field_name: self.pk_val,
}
)
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
async def aadd(self, *objs, bulk=True):
return await sync_to_async(self.add)(*objs, bulk=bulk)
aadd.alters_data = True
def remove(self, *objs, bulk=True):
if not objs:
return
self._clear(self.filter(pk__in=[o.pk for o in objs]), bulk)
remove.alters_data = True
async def aremove(self, *objs, bulk=True):
return await sync_to_async(self.remove)(*objs, bulk=bulk)
aremove.alters_data = True
def clear(self, *, bulk=True):
self._clear(self, bulk)
clear.alters_data = True
async def aclear(self, *, bulk=True):
return await sync_to_async(self.clear)(bulk=bulk)
aclear.alters_data = True
def _clear(self, queryset, bulk):
self._remove_prefetched_objects()
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.delete()` creates its own atomic block which
# contains the `pre_delete` and `post_delete` signal handlers.
queryset.delete()
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
obj.delete()
_clear.alters_data = True
def set(self, objs, *, bulk=True, clear=False):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs)
self.add(*new_objs, bulk=bulk)
set.alters_data = True
async def aset(self, objs, *, bulk=True, clear=False):
return await sync_to_async(self.set)(objs, bulk=bulk, clear=clear)
aset.alters_data = True
def create(self, **kwargs):
self._remove_prefetched_objects()
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super().using(db).create(**kwargs)
create.alters_data = True
async def acreate(self, **kwargs):
return await sync_to_async(self.create)(**kwargs)
acreate.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super().using(db).get_or_create(**kwargs)
get_or_create.alters_data = True
async def aget_or_create(self, **kwargs):
return await sync_to_async(self.get_or_create)(**kwargs)
aget_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super().using(db).update_or_create(**kwargs)
update_or_create.alters_data = True
async def aupdate_or_create(self, **kwargs):
return await sync_to_async(self.update_or_create)(**kwargs)
aupdate_or_create.alters_data = True
return GenericRelatedObjectManager
|
218bb27c23f56eb9df542b9f377436894247a4e2a348f5953b7052f6d0f0033c | from django.core.exceptions import FieldDoesNotExist
from django.db import IntegrityError, connection, migrations, models, transaction
from django.db.migrations.migration import Migration
from django.db.migrations.operations.fields import FieldOperation
from django.db.migrations.state import ModelState, ProjectState
from django.db.models.functions import Abs
from django.db.transaction import atomic
from django.test import (
SimpleTestCase,
ignore_warnings,
override_settings,
skipUnlessDBFeature,
)
from django.test.utils import CaptureQueriesContext
from django.utils.deprecation import RemovedInDjango51Warning
from .models import FoodManager, FoodQuerySet, UnicodeModel
from .test_base import OperationTestBase
class Mixin:
pass
class OperationTests(OperationTestBase):
"""
Tests running the operations and making sure they do what they say they do.
Each test looks at their state changing, and then their database operation -
both forwards and backwards.
"""
def test_create_model(self):
"""
Tests the CreateModel operation.
Most other tests use this operation as part of setup, so check failures
here first.
"""
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
)
self.assertEqual(operation.describe(), "Create model Pony")
self.assertEqual(operation.migration_name_fragment, "pony")
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(new_state.models["test_crmo", "pony"].name, "Pony")
self.assertEqual(len(new_state.models["test_crmo", "pony"].fields), 2)
# Test the database alteration
self.assertTableNotExists("test_crmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["fields", "name"])
# And default manager not in set
operation = migrations.CreateModel(
"Foo", fields=[], managers=[("objects", models.Manager())]
)
definition = operation.deconstruct()
self.assertNotIn("managers", definition[2])
def test_create_model_with_duplicate_field_name(self):
with self.assertRaisesMessage(
ValueError, "Found duplicate value pink in CreateModel fields argument."
):
migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.TextField()),
("pink", models.IntegerField(default=1)),
],
)
def test_create_model_with_duplicate_base(self):
message = "Found duplicate value test_crmo.pony in CreateModel bases argument."
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
"test_crmo.Pony",
"test_crmo.Pony",
),
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
"test_crmo.Pony",
"test_crmo.pony",
),
)
message = (
"Found duplicate value migrations.unicodemodel in CreateModel bases "
"argument."
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
UnicodeModel,
UnicodeModel,
),
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
UnicodeModel,
"migrations.unicodemodel",
),
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
UnicodeModel,
"migrations.UnicodeModel",
),
)
message = (
"Found duplicate value <class 'django.db.models.base.Model'> in "
"CreateModel bases argument."
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
models.Model,
models.Model,
),
)
message = (
"Found duplicate value <class 'migrations.test_operations.Mixin'> in "
"CreateModel bases argument."
)
with self.assertRaisesMessage(ValueError, message):
migrations.CreateModel(
"Pony",
fields=[],
bases=(
Mixin,
Mixin,
),
)
def test_create_model_with_duplicate_manager_name(self):
with self.assertRaisesMessage(
ValueError,
"Found duplicate value objects in CreateModel managers argument.",
):
migrations.CreateModel(
"Pony",
fields=[],
managers=[
("objects", models.Manager()),
("objects", models.Manager()),
],
)
def test_create_model_with_unique_after(self):
"""
Tests the CreateModel operation directly followed by an
AlterUniqueTogether (bug #22844 - sqlite remake issues)
"""
operation1 = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
)
operation2 = migrations.CreateModel(
"Rider",
[
("id", models.AutoField(primary_key=True)),
("number", models.IntegerField(default=1)),
("pony", models.ForeignKey("test_crmoua.Pony", models.CASCADE)),
],
)
operation3 = migrations.AlterUniqueTogether(
"Rider",
[
("number", "pony"),
],
)
# Test the database alteration
project_state = ProjectState()
self.assertTableNotExists("test_crmoua_pony")
self.assertTableNotExists("test_crmoua_rider")
with connection.schema_editor() as editor:
new_state = project_state.clone()
operation1.state_forwards("test_crmoua", new_state)
operation1.database_forwards(
"test_crmoua", editor, project_state, new_state
)
project_state, new_state = new_state, new_state.clone()
operation2.state_forwards("test_crmoua", new_state)
operation2.database_forwards(
"test_crmoua", editor, project_state, new_state
)
project_state, new_state = new_state, new_state.clone()
operation3.state_forwards("test_crmoua", new_state)
operation3.database_forwards(
"test_crmoua", editor, project_state, new_state
)
self.assertTableExists("test_crmoua_pony")
self.assertTableExists("test_crmoua_rider")
def test_create_model_m2m(self):
"""
Test the creation of a model with a ManyToMany field and the
auto-created "through" model.
"""
project_state = self.set_up_test_model("test_crmomm")
operation = migrations.CreateModel(
"Stable",
[
("id", models.AutoField(primary_key=True)),
("ponies", models.ManyToManyField("Pony", related_name="stables")),
],
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_crmomm", new_state)
# Test the database alteration
self.assertTableNotExists("test_crmomm_stable_ponies")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmomm", editor, project_state, new_state)
self.assertTableExists("test_crmomm_stable")
self.assertTableExists("test_crmomm_stable_ponies")
self.assertColumnNotExists("test_crmomm_stable", "ponies")
# Make sure the M2M field actually works
with atomic():
Pony = new_state.apps.get_model("test_crmomm", "Pony")
Stable = new_state.apps.get_model("test_crmomm", "Stable")
stable = Stable.objects.create()
p1 = Pony.objects.create(pink=False, weight=4.55)
p2 = Pony.objects.create(pink=True, weight=5.43)
stable.ponies.add(p1, p2)
self.assertEqual(stable.ponies.count(), 2)
stable.ponies.all().delete()
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crmomm", editor, new_state, project_state
)
self.assertTableNotExists("test_crmomm_stable")
self.assertTableNotExists("test_crmomm_stable_ponies")
@skipUnlessDBFeature("supports_collation_on_charfield", "supports_foreign_keys")
def test_create_fk_models_to_pk_field_db_collation(self):
"""Creation of models with a FK to a PK with db_collation."""
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
app_label = "test_cfkmtopkfdbc"
operations = [
migrations.CreateModel(
"Pony",
[
(
"id",
models.CharField(
primary_key=True,
max_length=10,
db_collation=collation,
),
),
],
)
]
project_state = self.apply_operations(app_label, ProjectState(), operations)
# ForeignKey.
new_state = project_state.clone()
operation = migrations.CreateModel(
"Rider",
[
("id", models.AutoField(primary_key=True)),
("pony", models.ForeignKey("Pony", models.CASCADE)),
],
)
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnCollation(f"{app_label}_rider", "pony_id", collation)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
# OneToOneField.
new_state = project_state.clone()
operation = migrations.CreateModel(
"ShetlandPony",
[
(
"pony",
models.OneToOneField("Pony", models.CASCADE, primary_key=True),
),
("cuteness", models.IntegerField(default=1)),
],
)
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnCollation(f"{app_label}_shetlandpony", "pony_id", collation)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
def test_create_model_inheritance(self):
"""
Tests the CreateModel operation on a multi-table inheritance setup.
"""
project_state = self.set_up_test_model("test_crmoih")
# Test the state alteration
operation = migrations.CreateModel(
"ShetlandPony",
[
(
"pony_ptr",
models.OneToOneField(
"test_crmoih.Pony",
models.CASCADE,
auto_created=True,
primary_key=True,
to_field="id",
serialize=False,
),
),
("cuteness", models.IntegerField(default=1)),
],
)
new_state = project_state.clone()
operation.state_forwards("test_crmoih", new_state)
self.assertIn(("test_crmoih", "shetlandpony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crmoih_shetlandpony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmoih", editor, project_state, new_state)
self.assertTableExists("test_crmoih_shetlandpony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crmoih", editor, new_state, project_state
)
self.assertTableNotExists("test_crmoih_shetlandpony")
def test_create_proxy_model(self):
"""
CreateModel ignores proxy models.
"""
project_state = self.set_up_test_model("test_crprmo")
# Test the state alteration
operation = migrations.CreateModel(
"ProxyPony",
[],
options={"proxy": True},
bases=("test_crprmo.Pony",),
)
self.assertEqual(operation.describe(), "Create proxy model ProxyPony")
new_state = project_state.clone()
operation.state_forwards("test_crprmo", new_state)
self.assertIn(("test_crprmo", "proxypony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crprmo", editor, project_state, new_state)
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crprmo", editor, new_state, project_state
)
self.assertTableNotExists("test_crprmo_proxypony")
self.assertTableExists("test_crprmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["bases", "fields", "name", "options"])
def test_create_unmanaged_model(self):
"""
CreateModel ignores unmanaged models.
"""
project_state = self.set_up_test_model("test_crummo")
# Test the state alteration
operation = migrations.CreateModel(
"UnmanagedPony",
[],
options={"proxy": True},
bases=("test_crummo.Pony",),
)
self.assertEqual(operation.describe(), "Create proxy model UnmanagedPony")
new_state = project_state.clone()
operation.state_forwards("test_crummo", new_state)
self.assertIn(("test_crummo", "unmanagedpony"), new_state.models)
# Test the database alteration
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crummo", editor, project_state, new_state)
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crummo", editor, new_state, project_state
)
self.assertTableNotExists("test_crummo_unmanagedpony")
self.assertTableExists("test_crummo_pony")
@skipUnlessDBFeature("supports_table_check_constraints")
def test_create_model_with_constraint(self):
where = models.Q(pink__gt=2)
check_constraint = models.CheckConstraint(
check=where, name="test_constraint_pony_pink_gt_2"
)
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
],
options={"constraints": [check_constraint]},
)
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(
len(new_state.models["test_crmo", "pony"].options["constraints"]), 1
)
# Test database alteration
self.assertTableNotExists("test_crmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
with connection.cursor() as cursor:
with self.assertRaises(IntegrityError):
cursor.execute("INSERT INTO test_crmo_pony (id, pink) VALUES (1, 1)")
# Test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# Test deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2]["options"]["constraints"], [check_constraint])
@skipUnlessDBFeature("supports_table_check_constraints")
def test_create_model_with_boolean_expression_in_check_constraint(self):
app_label = "test_crmobechc"
rawsql_constraint = models.CheckConstraint(
check=models.expressions.RawSQL(
"price < %s", (1000,), output_field=models.BooleanField()
),
name=f"{app_label}_price_lt_1000_raw",
)
wrapper_constraint = models.CheckConstraint(
check=models.expressions.ExpressionWrapper(
models.Q(price__gt=500) | models.Q(price__lt=500),
output_field=models.BooleanField(),
),
name=f"{app_label}_price_neq_500_wrap",
)
operation = migrations.CreateModel(
"Product",
[
("id", models.AutoField(primary_key=True)),
("price", models.IntegerField(null=True)),
],
options={"constraints": [rawsql_constraint, wrapper_constraint]},
)
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Add table.
self.assertTableNotExists(app_label)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertTableExists(f"{app_label}_product")
insert_sql = f"INSERT INTO {app_label}_product (id, price) VALUES (%d, %d)"
with connection.cursor() as cursor:
with self.assertRaises(IntegrityError):
cursor.execute(insert_sql % (1, 1000))
cursor.execute(insert_sql % (1, 999))
with self.assertRaises(IntegrityError):
cursor.execute(insert_sql % (2, 500))
cursor.execute(insert_sql % (2, 499))
def test_create_model_with_partial_unique_constraint(self):
partial_unique_constraint = models.UniqueConstraint(
fields=["pink"],
condition=models.Q(weight__gt=5),
name="test_constraint_pony_pink_for_weight_gt_5_uniq",
)
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
("weight", models.FloatField()),
],
options={"constraints": [partial_unique_constraint]},
)
# Test the state alteration
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(
len(new_state.models["test_crmo", "pony"].options["constraints"]), 1
)
# Test database alteration
self.assertTableNotExists("test_crmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
# Test constraint works
Pony = new_state.apps.get_model("test_crmo", "Pony")
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=6.0)
if connection.features.supports_partial_indexes:
with self.assertRaises(IntegrityError):
Pony.objects.create(pink=1, weight=7.0)
else:
Pony.objects.create(pink=1, weight=7.0)
# Test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# Test deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2]["options"]["constraints"], [partial_unique_constraint]
)
def test_create_model_with_deferred_unique_constraint(self):
deferred_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="deferrable_pink_constraint",
deferrable=models.Deferrable.DEFERRED,
)
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
],
options={"constraints": [deferred_unique_constraint]},
)
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(
len(new_state.models["test_crmo", "pony"].options["constraints"]), 1
)
self.assertTableNotExists("test_crmo_pony")
# Create table.
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
Pony = new_state.apps.get_model("test_crmo", "Pony")
Pony.objects.create(pink=1)
if connection.features.supports_deferrable_unique_constraints:
# Unique constraint is deferred.
with transaction.atomic():
obj = Pony.objects.create(pink=1)
obj.pink = 2
obj.save()
# Constraint behavior can be changed with SET CONSTRAINTS.
with self.assertRaises(IntegrityError):
with transaction.atomic(), connection.cursor() as cursor:
quoted_name = connection.ops.quote_name(
deferred_unique_constraint.name
)
cursor.execute("SET CONSTRAINTS %s IMMEDIATE" % quoted_name)
obj = Pony.objects.create(pink=1)
obj.pink = 3
obj.save()
else:
Pony.objects.create(pink=1)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2]["options"]["constraints"],
[deferred_unique_constraint],
)
@skipUnlessDBFeature("supports_covering_indexes")
def test_create_model_with_covering_unique_constraint(self):
covering_unique_constraint = models.UniqueConstraint(
fields=["pink"],
include=["weight"],
name="test_constraint_pony_pink_covering_weight",
)
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=3)),
("weight", models.FloatField()),
],
options={"constraints": [covering_unique_constraint]},
)
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crmo", new_state)
self.assertEqual(
len(new_state.models["test_crmo", "pony"].options["constraints"]), 1
)
self.assertTableNotExists("test_crmo_pony")
# Create table.
with connection.schema_editor() as editor:
operation.database_forwards("test_crmo", editor, project_state, new_state)
self.assertTableExists("test_crmo_pony")
Pony = new_state.apps.get_model("test_crmo", "Pony")
Pony.objects.create(pink=1, weight=4.0)
with self.assertRaises(IntegrityError):
Pony.objects.create(pink=1, weight=7.0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards("test_crmo", editor, new_state, project_state)
self.assertTableNotExists("test_crmo_pony")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "CreateModel")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2]["options"]["constraints"],
[covering_unique_constraint],
)
def test_create_model_managers(self):
"""
The managers on a model are set.
"""
project_state = self.set_up_test_model("test_cmoma")
# Test the state alteration
operation = migrations.CreateModel(
"Food",
fields=[
("id", models.AutoField(primary_key=True)),
],
managers=[
("food_qs", FoodQuerySet.as_manager()),
("food_mgr", FoodManager("a", "b")),
("food_mgr_kwargs", FoodManager("x", "y", 3, 4)),
],
)
self.assertEqual(operation.describe(), "Create model Food")
new_state = project_state.clone()
operation.state_forwards("test_cmoma", new_state)
self.assertIn(("test_cmoma", "food"), new_state.models)
managers = new_state.models["test_cmoma", "food"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
def test_delete_model(self):
"""
Tests the DeleteModel operation.
"""
project_state = self.set_up_test_model("test_dlmo")
# Test the state alteration
operation = migrations.DeleteModel("Pony")
self.assertEqual(operation.describe(), "Delete model Pony")
self.assertEqual(operation.migration_name_fragment, "delete_pony")
new_state = project_state.clone()
operation.state_forwards("test_dlmo", new_state)
self.assertNotIn(("test_dlmo", "pony"), new_state.models)
# Test the database alteration
self.assertTableExists("test_dlmo_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_dlmo", editor, project_state, new_state)
self.assertTableNotExists("test_dlmo_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_dlmo", editor, new_state, project_state)
self.assertTableExists("test_dlmo_pony")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "DeleteModel")
self.assertEqual(definition[1], [])
self.assertEqual(list(definition[2]), ["name"])
def test_delete_proxy_model(self):
"""
Tests the DeleteModel operation ignores proxy models.
"""
project_state = self.set_up_test_model("test_dlprmo", proxy_model=True)
# Test the state alteration
operation = migrations.DeleteModel("ProxyPony")
new_state = project_state.clone()
operation.state_forwards("test_dlprmo", new_state)
self.assertIn(("test_dlprmo", "proxypony"), project_state.models)
self.assertNotIn(("test_dlprmo", "proxypony"), new_state.models)
# Test the database alteration
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
with connection.schema_editor() as editor:
operation.database_forwards("test_dlprmo", editor, project_state, new_state)
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_dlprmo", editor, new_state, project_state
)
self.assertTableExists("test_dlprmo_pony")
self.assertTableNotExists("test_dlprmo_proxypony")
def test_delete_mti_model(self):
project_state = self.set_up_test_model("test_dlmtimo", mti_model=True)
# Test the state alteration
operation = migrations.DeleteModel("ShetlandPony")
new_state = project_state.clone()
operation.state_forwards("test_dlmtimo", new_state)
self.assertIn(("test_dlmtimo", "shetlandpony"), project_state.models)
self.assertNotIn(("test_dlmtimo", "shetlandpony"), new_state.models)
# Test the database alteration
self.assertTableExists("test_dlmtimo_pony")
self.assertTableExists("test_dlmtimo_shetlandpony")
self.assertColumnExists("test_dlmtimo_shetlandpony", "pony_ptr_id")
with connection.schema_editor() as editor:
operation.database_forwards(
"test_dlmtimo", editor, project_state, new_state
)
self.assertTableExists("test_dlmtimo_pony")
self.assertTableNotExists("test_dlmtimo_shetlandpony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_dlmtimo", editor, new_state, project_state
)
self.assertTableExists("test_dlmtimo_pony")
self.assertTableExists("test_dlmtimo_shetlandpony")
self.assertColumnExists("test_dlmtimo_shetlandpony", "pony_ptr_id")
def test_rename_model(self):
"""
Tests the RenameModel operation.
"""
project_state = self.set_up_test_model("test_rnmo", related_model=True)
# Test the state alteration
operation = migrations.RenameModel("Pony", "Horse")
self.assertEqual(operation.describe(), "Rename model Pony to Horse")
self.assertEqual(operation.migration_name_fragment, "rename_pony_horse")
# Test initial state and database
self.assertIn(("test_rnmo", "pony"), project_state.models)
self.assertNotIn(("test_rnmo", "horse"), project_state.models)
self.assertTableExists("test_rnmo_pony")
self.assertTableNotExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id")
)
self.assertFKNotExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id")
)
# Migrate forwards
new_state = project_state.clone()
atomic_rename = connection.features.supports_atomic_references_rename
new_state = self.apply_operations(
"test_rnmo", new_state, [operation], atomic=atomic_rename
)
# Test new state and database
self.assertNotIn(("test_rnmo", "pony"), new_state.models)
self.assertIn(("test_rnmo", "horse"), new_state.models)
# RenameModel also repoints all incoming FKs and M2Ms
self.assertEqual(
new_state.models["test_rnmo", "rider"].fields["pony"].remote_field.model,
"test_rnmo.Horse",
)
self.assertTableNotExists("test_rnmo_pony")
self.assertTableExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKNotExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id")
)
self.assertFKExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id")
)
# Migrate backwards
original_state = self.unapply_operations(
"test_rnmo", project_state, [operation], atomic=atomic_rename
)
# Test original state and database
self.assertIn(("test_rnmo", "pony"), original_state.models)
self.assertNotIn(("test_rnmo", "horse"), original_state.models)
self.assertEqual(
original_state.models["test_rnmo", "rider"]
.fields["pony"]
.remote_field.model,
"Pony",
)
self.assertTableExists("test_rnmo_pony")
self.assertTableNotExists("test_rnmo_horse")
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_pony", "id")
)
self.assertFKNotExists(
"test_rnmo_rider", ["pony_id"], ("test_rnmo_horse", "id")
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameModel")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"old_name": "Pony", "new_name": "Horse"})
def test_rename_model_state_forwards(self):
"""
RenameModel operations shouldn't trigger the caching of rendered apps
on state without prior apps.
"""
state = ProjectState()
state.add_model(ModelState("migrations", "Foo", []))
operation = migrations.RenameModel("Foo", "Bar")
operation.state_forwards("migrations", state)
self.assertNotIn("apps", state.__dict__)
self.assertNotIn(("migrations", "foo"), state.models)
self.assertIn(("migrations", "bar"), state.models)
# Now with apps cached.
apps = state.apps
operation = migrations.RenameModel("Bar", "Foo")
operation.state_forwards("migrations", state)
self.assertIs(state.apps, apps)
self.assertNotIn(("migrations", "bar"), state.models)
self.assertIn(("migrations", "foo"), state.models)
def test_rename_model_with_self_referential_fk(self):
"""
Tests the RenameModel operation on model with self referential FK.
"""
project_state = self.set_up_test_model("test_rmwsrf", related_model=True)
# Test the state alteration
operation = migrations.RenameModel("Rider", "HorseRider")
self.assertEqual(operation.describe(), "Rename model Rider to HorseRider")
new_state = project_state.clone()
operation.state_forwards("test_rmwsrf", new_state)
self.assertNotIn(("test_rmwsrf", "rider"), new_state.models)
self.assertIn(("test_rmwsrf", "horserider"), new_state.models)
# Remember, RenameModel also repoints all incoming FKs and M2Ms
self.assertEqual(
"self",
new_state.models["test_rmwsrf", "horserider"]
.fields["friend"]
.remote_field.model,
)
HorseRider = new_state.apps.get_model("test_rmwsrf", "horserider")
self.assertIs(
HorseRider._meta.get_field("horserider").remote_field.model, HorseRider
)
# Test the database alteration
self.assertTableExists("test_rmwsrf_rider")
self.assertTableNotExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_rider", "id")
)
self.assertFKNotExists(
"test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_horserider", "id")
)
atomic_rename = connection.features.supports_atomic_references_rename
with connection.schema_editor(atomic=atomic_rename) as editor:
operation.database_forwards("test_rmwsrf", editor, project_state, new_state)
self.assertTableNotExists("test_rmwsrf_rider")
self.assertTableExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKNotExists(
"test_rmwsrf_horserider", ["friend_id"], ("test_rmwsrf_rider", "id")
)
self.assertFKExists(
"test_rmwsrf_horserider",
["friend_id"],
("test_rmwsrf_horserider", "id"),
)
# And test reversal
with connection.schema_editor(atomic=atomic_rename) as editor:
operation.database_backwards(
"test_rmwsrf", editor, new_state, project_state
)
self.assertTableExists("test_rmwsrf_rider")
self.assertTableNotExists("test_rmwsrf_horserider")
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_rider", "id")
)
self.assertFKNotExists(
"test_rmwsrf_rider", ["friend_id"], ("test_rmwsrf_horserider", "id")
)
def test_rename_model_with_superclass_fk(self):
"""
Tests the RenameModel operation on a model which has a superclass that
has a foreign key.
"""
project_state = self.set_up_test_model(
"test_rmwsc", related_model=True, mti_model=True
)
# Test the state alteration
operation = migrations.RenameModel("ShetlandPony", "LittleHorse")
self.assertEqual(
operation.describe(), "Rename model ShetlandPony to LittleHorse"
)
new_state = project_state.clone()
operation.state_forwards("test_rmwsc", new_state)
self.assertNotIn(("test_rmwsc", "shetlandpony"), new_state.models)
self.assertIn(("test_rmwsc", "littlehorse"), new_state.models)
# RenameModel shouldn't repoint the superclass's relations, only local ones
self.assertEqual(
project_state.models["test_rmwsc", "rider"]
.fields["pony"]
.remote_field.model,
new_state.models["test_rmwsc", "rider"].fields["pony"].remote_field.model,
)
# Before running the migration we have a table for Shetland Pony, not
# Little Horse.
self.assertTableExists("test_rmwsc_shetlandpony")
self.assertTableNotExists("test_rmwsc_littlehorse")
if connection.features.supports_foreign_keys:
# and the foreign key on rider points to pony, not shetland pony
self.assertFKExists(
"test_rmwsc_rider", ["pony_id"], ("test_rmwsc_pony", "id")
)
self.assertFKNotExists(
"test_rmwsc_rider", ["pony_id"], ("test_rmwsc_shetlandpony", "id")
)
with connection.schema_editor(
atomic=connection.features.supports_atomic_references_rename
) as editor:
operation.database_forwards("test_rmwsc", editor, project_state, new_state)
# Now we have a little horse table, not shetland pony
self.assertTableNotExists("test_rmwsc_shetlandpony")
self.assertTableExists("test_rmwsc_littlehorse")
if connection.features.supports_foreign_keys:
# but the Foreign keys still point at pony, not little horse
self.assertFKExists(
"test_rmwsc_rider", ["pony_id"], ("test_rmwsc_pony", "id")
)
self.assertFKNotExists(
"test_rmwsc_rider", ["pony_id"], ("test_rmwsc_littlehorse", "id")
)
def test_rename_model_with_self_referential_m2m(self):
app_label = "test_rename_model_with_self_referential_m2m"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"ReflexivePony",
fields=[
("id", models.AutoField(primary_key=True)),
("ponies", models.ManyToManyField("self")),
],
),
],
)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameModel("ReflexivePony", "ReflexivePony2"),
],
atomic=connection.features.supports_atomic_references_rename,
)
Pony = project_state.apps.get_model(app_label, "ReflexivePony2")
pony = Pony.objects.create()
pony.ponies.add(pony)
def test_rename_model_with_m2m(self):
app_label = "test_rename_model_with_m2m"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("riders", models.ManyToManyField("Rider")),
],
),
],
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameModel("Pony", "Pony2"),
],
atomic=connection.features.supports_atomic_references_rename,
)
Pony = project_state.apps.get_model(app_label, "Pony2")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
self.assertEqual(Pony.objects.count(), 2)
self.assertEqual(Rider.objects.count(), 2)
self.assertEqual(
Pony._meta.get_field("riders").remote_field.through.objects.count(), 2
)
def test_rename_model_with_db_table_rename_m2m(self):
app_label = "test_rmwdbrm2m"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("riders", models.ManyToManyField("Rider")),
],
options={"db_table": "pony"},
),
],
)
new_state = self.apply_operations(
app_label,
project_state,
operations=[migrations.RenameModel("Pony", "PinkPony")],
atomic=connection.features.supports_atomic_references_rename,
)
Pony = new_state.apps.get_model(app_label, "PinkPony")
Rider = new_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
def test_rename_m2m_target_model(self):
app_label = "test_rename_m2m_target_model"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("riders", models.ManyToManyField("Rider")),
],
),
],
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameModel("Rider", "Rider2"),
],
atomic=connection.features.supports_atomic_references_rename,
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider2")
pony = Pony.objects.create()
rider = Rider.objects.create()
pony.riders.add(rider)
self.assertEqual(Pony.objects.count(), 2)
self.assertEqual(Rider.objects.count(), 2)
self.assertEqual(
Pony._meta.get_field("riders").remote_field.through.objects.count(), 2
)
def test_rename_m2m_through_model(self):
app_label = "test_rename_through"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"rider",
models.ForeignKey(
"test_rename_through.Rider", models.CASCADE
),
),
(
"pony",
models.ForeignKey(
"test_rename_through.Pony", models.CASCADE
),
),
],
),
migrations.AddField(
"Pony",
"riders",
models.ManyToManyField(
"test_rename_through.Rider",
through="test_rename_through.PonyRider",
),
),
],
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
PonyRider = project_state.apps.get_model(app_label, "PonyRider")
pony = Pony.objects.create()
rider = Rider.objects.create()
PonyRider.objects.create(pony=pony, rider=rider)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameModel("PonyRider", "PonyRider2"),
],
)
Pony = project_state.apps.get_model(app_label, "Pony")
Rider = project_state.apps.get_model(app_label, "Rider")
PonyRider = project_state.apps.get_model(app_label, "PonyRider2")
pony = Pony.objects.first()
rider = Rider.objects.create()
PonyRider.objects.create(pony=pony, rider=rider)
self.assertEqual(Pony.objects.count(), 1)
self.assertEqual(Rider.objects.count(), 2)
self.assertEqual(PonyRider.objects.count(), 2)
self.assertEqual(pony.riders.count(), 2)
def test_rename_m2m_model_after_rename_field(self):
"""RenameModel renames a many-to-many column after a RenameField."""
app_label = "test_rename_multiple"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=20)),
],
),
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"pony",
models.ForeignKey(
"test_rename_multiple.Pony", models.CASCADE
),
),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
("riders", models.ManyToManyField("Rider")),
],
),
migrations.RenameField(
model_name="pony", old_name="name", new_name="fancy_name"
),
migrations.RenameModel(old_name="Rider", new_name="Jockey"),
],
atomic=connection.features.supports_atomic_references_rename,
)
Pony = project_state.apps.get_model(app_label, "Pony")
Jockey = project_state.apps.get_model(app_label, "Jockey")
PonyRider = project_state.apps.get_model(app_label, "PonyRider")
# No "no such column" error means the column was renamed correctly.
pony = Pony.objects.create(fancy_name="a good name")
jockey = Jockey.objects.create(pony=pony)
ponyrider = PonyRider.objects.create()
ponyrider.riders.add(jockey)
def test_add_field(self):
"""
Tests the AddField operation.
"""
# Test the state alteration
operation = migrations.AddField(
"Pony",
"height",
models.FloatField(null=True, default=5),
)
self.assertEqual(operation.describe(), "Add field height to Pony")
self.assertEqual(operation.migration_name_fragment, "pony_height")
project_state, new_state = self.make_test_state("test_adfl", operation)
self.assertEqual(len(new_state.models["test_adfl", "pony"].fields), 4)
field = new_state.models["test_adfl", "pony"].fields["height"]
self.assertEqual(field.default, 5)
# Test the database alteration
self.assertColumnNotExists("test_adfl_pony", "height")
with connection.schema_editor() as editor:
operation.database_forwards("test_adfl", editor, project_state, new_state)
self.assertColumnExists("test_adfl_pony", "height")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_adfl", editor, new_state, project_state)
self.assertColumnNotExists("test_adfl_pony", "height")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddField")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["field", "model_name", "name"])
def test_add_charfield(self):
"""
Tests the AddField operation on TextField.
"""
project_state = self.set_up_test_model("test_adchfl")
Pony = project_state.apps.get_model("test_adchfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations(
"test_adchfl",
project_state,
[
migrations.AddField(
"Pony",
"text",
models.CharField(max_length=10, default="some text"),
),
migrations.AddField(
"Pony",
"empty",
models.CharField(max_length=10, default=""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.CharField(max_length=10, default="42"),
),
# Manual quoting is fragile and could trip on quotes. Refs #xyz.
migrations.AddField(
"Pony",
"quotes",
models.CharField(max_length=10, default='"\'"'),
),
],
)
Pony = new_state.apps.get_model("test_adchfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
self.assertEqual(pony.text, "some text")
self.assertEqual(pony.empty, "")
self.assertEqual(pony.digits, "42")
self.assertEqual(pony.quotes, '"\'"')
def test_add_textfield(self):
"""
Tests the AddField operation on TextField.
"""
project_state = self.set_up_test_model("test_adtxtfl")
Pony = project_state.apps.get_model("test_adtxtfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations(
"test_adtxtfl",
project_state,
[
migrations.AddField(
"Pony",
"text",
models.TextField(default="some text"),
),
migrations.AddField(
"Pony",
"empty",
models.TextField(default=""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.TextField(default="42"),
),
# Manual quoting is fragile and could trip on quotes. Refs #xyz.
migrations.AddField(
"Pony",
"quotes",
models.TextField(default='"\'"'),
),
],
)
Pony = new_state.apps.get_model("test_adtxtfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
self.assertEqual(pony.text, "some text")
self.assertEqual(pony.empty, "")
self.assertEqual(pony.digits, "42")
self.assertEqual(pony.quotes, '"\'"')
def test_add_binaryfield(self):
"""
Tests the AddField operation on TextField/BinaryField.
"""
project_state = self.set_up_test_model("test_adbinfl")
Pony = project_state.apps.get_model("test_adbinfl", "Pony")
pony = Pony.objects.create(weight=42)
new_state = self.apply_operations(
"test_adbinfl",
project_state,
[
migrations.AddField(
"Pony",
"blob",
models.BinaryField(default=b"some text"),
),
migrations.AddField(
"Pony",
"empty",
models.BinaryField(default=b""),
),
# If not properly quoted digits would be interpreted as an int.
migrations.AddField(
"Pony",
"digits",
models.BinaryField(default=b"42"),
),
# Manual quoting is fragile and could trip on quotes. Refs #xyz.
migrations.AddField(
"Pony",
"quotes",
models.BinaryField(default=b'"\'"'),
),
],
)
Pony = new_state.apps.get_model("test_adbinfl", "Pony")
pony = Pony.objects.get(pk=pony.pk)
# SQLite returns buffer/memoryview, cast to bytes for checking.
self.assertEqual(bytes(pony.blob), b"some text")
self.assertEqual(bytes(pony.empty), b"")
self.assertEqual(bytes(pony.digits), b"42")
self.assertEqual(bytes(pony.quotes), b'"\'"')
def test_column_name_quoting(self):
"""
Column names that are SQL keywords shouldn't cause problems when used
in migrations (#22168).
"""
project_state = self.set_up_test_model("test_regr22168")
operation = migrations.AddField(
"Pony",
"order",
models.IntegerField(default=0),
)
new_state = project_state.clone()
operation.state_forwards("test_regr22168", new_state)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_regr22168", editor, project_state, new_state
)
self.assertColumnExists("test_regr22168_pony", "order")
def test_add_field_preserve_default(self):
"""
Tests the AddField operation's state alteration
when preserve_default = False.
"""
project_state = self.set_up_test_model("test_adflpd")
# Test the state alteration
operation = migrations.AddField(
"Pony",
"height",
models.FloatField(null=True, default=4),
preserve_default=False,
)
new_state = project_state.clone()
operation.state_forwards("test_adflpd", new_state)
self.assertEqual(len(new_state.models["test_adflpd", "pony"].fields), 4)
field = new_state.models["test_adflpd", "pony"].fields["height"]
self.assertEqual(field.default, models.NOT_PROVIDED)
# Test the database alteration
project_state.apps.get_model("test_adflpd", "pony").objects.create(
weight=4,
)
self.assertColumnNotExists("test_adflpd_pony", "height")
with connection.schema_editor() as editor:
operation.database_forwards("test_adflpd", editor, project_state, new_state)
self.assertColumnExists("test_adflpd_pony", "height")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddField")
self.assertEqual(definition[1], [])
self.assertEqual(
sorted(definition[2]), ["field", "model_name", "name", "preserve_default"]
)
def test_add_field_m2m(self):
"""
Tests the AddField operation with a ManyToManyField.
"""
project_state = self.set_up_test_model("test_adflmm", second_model=True)
# Test the state alteration
operation = migrations.AddField(
"Pony", "stables", models.ManyToManyField("Stable", related_name="ponies")
)
new_state = project_state.clone()
operation.state_forwards("test_adflmm", new_state)
self.assertEqual(len(new_state.models["test_adflmm", "pony"].fields), 4)
# Test the database alteration
self.assertTableNotExists("test_adflmm_pony_stables")
with connection.schema_editor() as editor:
operation.database_forwards("test_adflmm", editor, project_state, new_state)
self.assertTableExists("test_adflmm_pony_stables")
self.assertColumnNotExists("test_adflmm_pony", "stables")
# Make sure the M2M field actually works
with atomic():
Pony = new_state.apps.get_model("test_adflmm", "Pony")
p = Pony.objects.create(pink=False, weight=4.55)
p.stables.create()
self.assertEqual(p.stables.count(), 1)
p.stables.all().delete()
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_adflmm", editor, new_state, project_state
)
self.assertTableNotExists("test_adflmm_pony_stables")
def test_alter_field_m2m(self):
project_state = self.set_up_test_model("test_alflmm", second_model=True)
project_state = self.apply_operations(
"test_alflmm",
project_state,
operations=[
migrations.AddField(
"Pony",
"stables",
models.ManyToManyField("Stable", related_name="ponies"),
)
],
)
Pony = project_state.apps.get_model("test_alflmm", "Pony")
self.assertFalse(Pony._meta.get_field("stables").blank)
project_state = self.apply_operations(
"test_alflmm",
project_state,
operations=[
migrations.AlterField(
"Pony",
"stables",
models.ManyToManyField(
to="Stable", related_name="ponies", blank=True
),
)
],
)
Pony = project_state.apps.get_model("test_alflmm", "Pony")
self.assertTrue(Pony._meta.get_field("stables").blank)
def test_repoint_field_m2m(self):
project_state = self.set_up_test_model(
"test_alflmm", second_model=True, third_model=True
)
project_state = self.apply_operations(
"test_alflmm",
project_state,
operations=[
migrations.AddField(
"Pony",
"places",
models.ManyToManyField("Stable", related_name="ponies"),
)
],
)
Pony = project_state.apps.get_model("test_alflmm", "Pony")
project_state = self.apply_operations(
"test_alflmm",
project_state,
operations=[
migrations.AlterField(
"Pony",
"places",
models.ManyToManyField(to="Van", related_name="ponies"),
)
],
)
# Ensure the new field actually works
Pony = project_state.apps.get_model("test_alflmm", "Pony")
p = Pony.objects.create(pink=False, weight=4.55)
p.places.create()
self.assertEqual(p.places.count(), 1)
p.places.all().delete()
def test_remove_field_m2m(self):
project_state = self.set_up_test_model("test_rmflmm", second_model=True)
project_state = self.apply_operations(
"test_rmflmm",
project_state,
operations=[
migrations.AddField(
"Pony",
"stables",
models.ManyToManyField("Stable", related_name="ponies"),
)
],
)
self.assertTableExists("test_rmflmm_pony_stables")
with_field_state = project_state.clone()
operations = [migrations.RemoveField("Pony", "stables")]
project_state = self.apply_operations(
"test_rmflmm", project_state, operations=operations
)
self.assertTableNotExists("test_rmflmm_pony_stables")
# And test reversal
self.unapply_operations("test_rmflmm", with_field_state, operations=operations)
self.assertTableExists("test_rmflmm_pony_stables")
def test_remove_field_m2m_with_through(self):
project_state = self.set_up_test_model("test_rmflmmwt", second_model=True)
self.assertTableNotExists("test_rmflmmwt_ponystables")
project_state = self.apply_operations(
"test_rmflmmwt",
project_state,
operations=[
migrations.CreateModel(
"PonyStables",
fields=[
(
"pony",
models.ForeignKey("test_rmflmmwt.Pony", models.CASCADE),
),
(
"stable",
models.ForeignKey("test_rmflmmwt.Stable", models.CASCADE),
),
],
),
migrations.AddField(
"Pony",
"stables",
models.ManyToManyField(
"Stable",
related_name="ponies",
through="test_rmflmmwt.PonyStables",
),
),
],
)
self.assertTableExists("test_rmflmmwt_ponystables")
operations = [
migrations.RemoveField("Pony", "stables"),
migrations.DeleteModel("PonyStables"),
]
self.apply_operations("test_rmflmmwt", project_state, operations=operations)
def test_remove_field(self):
"""
Tests the RemoveField operation.
"""
project_state = self.set_up_test_model("test_rmfl")
# Test the state alteration
operation = migrations.RemoveField("Pony", "pink")
self.assertEqual(operation.describe(), "Remove field pink from Pony")
self.assertEqual(operation.migration_name_fragment, "remove_pony_pink")
new_state = project_state.clone()
operation.state_forwards("test_rmfl", new_state)
self.assertEqual(len(new_state.models["test_rmfl", "pony"].fields), 2)
# Test the database alteration
self.assertColumnExists("test_rmfl_pony", "pink")
with connection.schema_editor() as editor:
operation.database_forwards("test_rmfl", editor, project_state, new_state)
self.assertColumnNotExists("test_rmfl_pony", "pink")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_rmfl", editor, new_state, project_state)
self.assertColumnExists("test_rmfl_pony", "pink")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveField")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "name": "pink"})
def test_remove_fk(self):
"""
Tests the RemoveField operation on a foreign key.
"""
project_state = self.set_up_test_model("test_rfk", related_model=True)
self.assertColumnExists("test_rfk_rider", "pony_id")
operation = migrations.RemoveField("Rider", "pony")
new_state = project_state.clone()
operation.state_forwards("test_rfk", new_state)
with connection.schema_editor() as editor:
operation.database_forwards("test_rfk", editor, project_state, new_state)
self.assertColumnNotExists("test_rfk_rider", "pony_id")
with connection.schema_editor() as editor:
operation.database_backwards("test_rfk", editor, new_state, project_state)
self.assertColumnExists("test_rfk_rider", "pony_id")
def test_alter_model_table(self):
"""
Tests the AlterModelTable operation.
"""
project_state = self.set_up_test_model("test_almota")
# Test the state alteration
operation = migrations.AlterModelTable("Pony", "test_almota_pony_2")
self.assertEqual(
operation.describe(), "Rename table for Pony to test_almota_pony_2"
)
self.assertEqual(operation.migration_name_fragment, "alter_pony_table")
new_state = project_state.clone()
operation.state_forwards("test_almota", new_state)
self.assertEqual(
new_state.models["test_almota", "pony"].options["db_table"],
"test_almota_pony_2",
)
# Test the database alteration
self.assertTableExists("test_almota_pony")
self.assertTableNotExists("test_almota_pony_2")
with connection.schema_editor() as editor:
operation.database_forwards("test_almota", editor, project_state, new_state)
self.assertTableNotExists("test_almota_pony")
self.assertTableExists("test_almota_pony_2")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_almota", editor, new_state, project_state
)
self.assertTableExists("test_almota_pony")
self.assertTableNotExists("test_almota_pony_2")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelTable")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"name": "Pony", "table": "test_almota_pony_2"})
def test_alter_model_table_none(self):
"""
Tests the AlterModelTable operation if the table name is set to None.
"""
operation = migrations.AlterModelTable("Pony", None)
self.assertEqual(operation.describe(), "Rename table for Pony to (default)")
def test_alter_model_table_noop(self):
"""
Tests the AlterModelTable operation if the table name is not changed.
"""
project_state = self.set_up_test_model("test_almota")
# Test the state alteration
operation = migrations.AlterModelTable("Pony", "test_almota_pony")
new_state = project_state.clone()
operation.state_forwards("test_almota", new_state)
self.assertEqual(
new_state.models["test_almota", "pony"].options["db_table"],
"test_almota_pony",
)
# Test the database alteration
self.assertTableExists("test_almota_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_almota", editor, project_state, new_state)
self.assertTableExists("test_almota_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_almota", editor, new_state, project_state
)
self.assertTableExists("test_almota_pony")
def test_alter_model_table_m2m(self):
"""
AlterModelTable should rename auto-generated M2M tables.
"""
app_label = "test_talflmltlm2m"
pony_db_table = "pony_foo"
project_state = self.set_up_test_model(
app_label, second_model=True, db_table=pony_db_table
)
# Add the M2M field
first_state = project_state.clone()
operation = migrations.AddField(
"Pony", "stables", models.ManyToManyField("Stable")
)
operation.state_forwards(app_label, first_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, first_state)
original_m2m_table = "%s_%s" % (pony_db_table, "stables")
new_m2m_table = "%s_%s" % (app_label, "pony_stables")
self.assertTableExists(original_m2m_table)
self.assertTableNotExists(new_m2m_table)
# Rename the Pony db_table which should also rename the m2m table.
second_state = first_state.clone()
operation = migrations.AlterModelTable(name="pony", table=None)
operation.state_forwards(app_label, second_state)
atomic_rename = connection.features.supports_atomic_references_rename
with connection.schema_editor(atomic=atomic_rename) as editor:
operation.database_forwards(app_label, editor, first_state, second_state)
self.assertTableExists(new_m2m_table)
self.assertTableNotExists(original_m2m_table)
# And test reversal
with connection.schema_editor(atomic=atomic_rename) as editor:
operation.database_backwards(app_label, editor, second_state, first_state)
self.assertTableExists(original_m2m_table)
self.assertTableNotExists(new_m2m_table)
def test_alter_model_table_m2m_field(self):
app_label = "test_talm2mfl"
project_state = self.set_up_test_model(app_label, second_model=True)
# Add the M2M field.
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.AddField(
"Pony",
"stables",
models.ManyToManyField("Stable"),
)
],
)
m2m_table = f"{app_label}_pony_stables"
self.assertColumnExists(m2m_table, "pony_id")
self.assertColumnExists(m2m_table, "stable_id")
# Point the M2M field to self.
with_field_state = project_state.clone()
operations = [
migrations.AlterField(
model_name="Pony",
name="stables",
field=models.ManyToManyField("self"),
)
]
project_state = self.apply_operations(
app_label, project_state, operations=operations
)
self.assertColumnExists(m2m_table, "from_pony_id")
self.assertColumnExists(m2m_table, "to_pony_id")
# Reversal.
self.unapply_operations(app_label, with_field_state, operations=operations)
self.assertColumnExists(m2m_table, "pony_id")
self.assertColumnExists(m2m_table, "stable_id")
def test_alter_field(self):
"""
Tests the AlterField operation.
"""
project_state = self.set_up_test_model("test_alfl")
# Test the state alteration
operation = migrations.AlterField(
"Pony", "pink", models.IntegerField(null=True)
)
self.assertEqual(operation.describe(), "Alter field pink on Pony")
self.assertEqual(operation.migration_name_fragment, "alter_pony_pink")
new_state = project_state.clone()
operation.state_forwards("test_alfl", new_state)
self.assertIs(
project_state.models["test_alfl", "pony"].fields["pink"].null, False
)
self.assertIs(new_state.models["test_alfl", "pony"].fields["pink"].null, True)
# Test the database alteration
self.assertColumnNotNull("test_alfl_pony", "pink")
with connection.schema_editor() as editor:
operation.database_forwards("test_alfl", editor, project_state, new_state)
self.assertColumnNull("test_alfl_pony", "pink")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_alfl", editor, new_state, project_state)
self.assertColumnNotNull("test_alfl_pony", "pink")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterField")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["field", "model_name", "name"])
def test_alter_field_add_db_column_noop(self):
"""
AlterField operation is a noop when adding only a db_column and the
column name is not changed.
"""
app_label = "test_afadbn"
project_state = self.set_up_test_model(app_label, related_model=True)
pony_table = "%s_pony" % app_label
new_state = project_state.clone()
operation = migrations.AlterField(
"Pony", "weight", models.FloatField(db_column="weight")
)
operation.state_forwards(app_label, new_state)
self.assertIsNone(
project_state.models[app_label, "pony"].fields["weight"].db_column,
)
self.assertEqual(
new_state.models[app_label, "pony"].fields["weight"].db_column,
"weight",
)
self.assertColumnExists(pony_table, "weight")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnExists(pony_table, "weight")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_backwards(
app_label, editor, new_state, project_state
)
self.assertColumnExists(pony_table, "weight")
rider_table = "%s_rider" % app_label
new_state = project_state.clone()
operation = migrations.AlterField(
"Rider",
"pony",
models.ForeignKey("Pony", models.CASCADE, db_column="pony_id"),
)
operation.state_forwards(app_label, new_state)
self.assertIsNone(
project_state.models[app_label, "rider"].fields["pony"].db_column,
)
self.assertIs(
new_state.models[app_label, "rider"].fields["pony"].db_column,
"pony_id",
)
self.assertColumnExists(rider_table, "pony_id")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnExists(rider_table, "pony_id")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(app_label, editor, new_state, project_state)
self.assertColumnExists(rider_table, "pony_id")
def test_alter_field_pk(self):
"""
The AlterField operation on primary keys (things like PostgreSQL's
SERIAL weirdness).
"""
project_state = self.set_up_test_model("test_alflpk")
# Test the state alteration
operation = migrations.AlterField(
"Pony", "id", models.IntegerField(primary_key=True)
)
new_state = project_state.clone()
operation.state_forwards("test_alflpk", new_state)
self.assertIsInstance(
project_state.models["test_alflpk", "pony"].fields["id"],
models.AutoField,
)
self.assertIsInstance(
new_state.models["test_alflpk", "pony"].fields["id"],
models.IntegerField,
)
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alflpk", editor, project_state, new_state)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alflpk", editor, new_state, project_state
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_field_pk_fk(self):
"""
Tests the AlterField operation on primary keys changes any FKs pointing to it.
"""
project_state = self.set_up_test_model("test_alflpkfk", related_model=True)
project_state = self.apply_operations(
"test_alflpkfk",
project_state,
[
migrations.CreateModel(
"Stable",
fields=[
("ponies", models.ManyToManyField("Pony")),
],
),
migrations.AddField(
"Pony",
"stables",
models.ManyToManyField("Stable"),
),
],
)
# Test the state alteration
operation = migrations.AlterField(
"Pony", "id", models.FloatField(primary_key=True)
)
new_state = project_state.clone()
operation.state_forwards("test_alflpkfk", new_state)
self.assertIsInstance(
project_state.models["test_alflpkfk", "pony"].fields["id"],
models.AutoField,
)
self.assertIsInstance(
new_state.models["test_alflpkfk", "pony"].fields["id"],
models.FloatField,
)
def assertIdTypeEqualsFkType():
with connection.cursor() as cursor:
id_type, id_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(
cursor, "test_alflpkfk_pony"
)
if c.name == "id"
][0]
fk_type, fk_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(
cursor, "test_alflpkfk_rider"
)
if c.name == "pony_id"
][0]
m2m_fk_type, m2m_fk_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(
cursor,
"test_alflpkfk_pony_stables",
)
if c.name == "pony_id"
][0]
remote_m2m_fk_type, remote_m2m_fk_null = [
(c.type_code, c.null_ok)
for c in connection.introspection.get_table_description(
cursor,
"test_alflpkfk_stable_ponies",
)
if c.name == "pony_id"
][0]
self.assertEqual(id_type, fk_type)
self.assertEqual(id_type, m2m_fk_type)
self.assertEqual(id_type, remote_m2m_fk_type)
self.assertEqual(id_null, fk_null)
self.assertEqual(id_null, m2m_fk_null)
self.assertEqual(id_null, remote_m2m_fk_null)
assertIdTypeEqualsFkType()
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_alflpkfk", editor, project_state, new_state
)
assertIdTypeEqualsFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_alflpkfk_pony_stables",
["pony_id"],
("test_alflpkfk_pony", "id"),
)
self.assertFKExists(
"test_alflpkfk_stable_ponies",
["pony_id"],
("test_alflpkfk_pony", "id"),
)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alflpkfk", editor, new_state, project_state
)
assertIdTypeEqualsFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
"test_alflpkfk_pony_stables",
["pony_id"],
("test_alflpkfk_pony", "id"),
)
self.assertFKExists(
"test_alflpkfk_stable_ponies",
["pony_id"],
("test_alflpkfk_pony", "id"),
)
@skipUnlessDBFeature("supports_collation_on_charfield", "supports_foreign_keys")
def test_alter_field_pk_fk_db_collation(self):
"""
AlterField operation of db_collation on primary keys changes any FKs
pointing to it.
"""
collation = connection.features.test_collations.get("non_default")
if not collation:
self.skipTest("Language collations are not supported.")
app_label = "test_alflpkfkdbc"
project_state = self.apply_operations(
app_label,
ProjectState(),
[
migrations.CreateModel(
"Pony",
[
("id", models.CharField(primary_key=True, max_length=10)),
],
),
migrations.CreateModel(
"Rider",
[
("pony", models.ForeignKey("Pony", models.CASCADE)),
],
),
migrations.CreateModel(
"Stable",
[
("ponies", models.ManyToManyField("Pony")),
],
),
],
)
# State alteration.
operation = migrations.AlterField(
"Pony",
"id",
models.CharField(
primary_key=True,
max_length=10,
db_collation=collation,
),
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Database alteration.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertColumnCollation(f"{app_label}_pony", "id", collation)
self.assertColumnCollation(f"{app_label}_rider", "pony_id", collation)
self.assertColumnCollation(f"{app_label}_stable_ponies", "pony_id", collation)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
def test_alter_field_pk_mti_fk(self):
app_label = "test_alflpkmtifk"
project_state = self.set_up_test_model(app_label, mti_model=True)
project_state = self.apply_operations(
app_label,
project_state,
[
migrations.CreateModel(
"ShetlandRider",
fields=[
(
"pony",
models.ForeignKey(
f"{app_label}.ShetlandPony", models.CASCADE
),
),
],
),
],
)
operation = migrations.AlterField(
"Pony",
"id",
models.BigAutoField(primary_key=True),
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertIsInstance(
new_state.models[app_label, "pony"].fields["id"],
models.BigAutoField,
)
def _get_column_id_type(cursor, table, column):
return [
c.type_code
for c in connection.introspection.get_table_description(
cursor,
f"{app_label}_{table}",
)
if c.name == column
][0]
def assertIdTypeEqualsMTIFkType():
with connection.cursor() as cursor:
parent_id_type = _get_column_id_type(cursor, "pony", "id")
child_id_type = _get_column_id_type(
cursor, "shetlandpony", "pony_ptr_id"
)
mti_id_type = _get_column_id_type(cursor, "shetlandrider", "pony_id")
self.assertEqual(parent_id_type, child_id_type)
self.assertEqual(parent_id_type, mti_id_type)
assertIdTypeEqualsMTIFkType()
# Alter primary key.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
assertIdTypeEqualsMTIFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
f"{app_label}_shetlandpony",
["pony_ptr_id"],
(f"{app_label}_pony", "id"),
)
self.assertFKExists(
f"{app_label}_shetlandrider",
["pony_id"],
(f"{app_label}_shetlandpony", "pony_ptr_id"),
)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
assertIdTypeEqualsMTIFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
f"{app_label}_shetlandpony",
["pony_ptr_id"],
(f"{app_label}_pony", "id"),
)
self.assertFKExists(
f"{app_label}_shetlandrider",
["pony_id"],
(f"{app_label}_shetlandpony", "pony_ptr_id"),
)
def test_alter_field_pk_mti_and_fk_to_base(self):
app_label = "test_alflpkmtiftb"
project_state = self.set_up_test_model(
app_label,
mti_model=True,
related_model=True,
)
operation = migrations.AlterField(
"Pony",
"id",
models.BigAutoField(primary_key=True),
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertIsInstance(
new_state.models[app_label, "pony"].fields["id"],
models.BigAutoField,
)
def _get_column_id_type(cursor, table, column):
return [
c.type_code
for c in connection.introspection.get_table_description(
cursor,
f"{app_label}_{table}",
)
if c.name == column
][0]
def assertIdTypeEqualsMTIFkType():
with connection.cursor() as cursor:
parent_id_type = _get_column_id_type(cursor, "pony", "id")
fk_id_type = _get_column_id_type(cursor, "rider", "pony_id")
child_id_type = _get_column_id_type(
cursor, "shetlandpony", "pony_ptr_id"
)
self.assertEqual(parent_id_type, child_id_type)
self.assertEqual(parent_id_type, fk_id_type)
assertIdTypeEqualsMTIFkType()
# Alter primary key.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
assertIdTypeEqualsMTIFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
f"{app_label}_shetlandpony",
["pony_ptr_id"],
(f"{app_label}_pony", "id"),
)
self.assertFKExists(
f"{app_label}_rider",
["pony_id"],
(f"{app_label}_pony", "id"),
)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
assertIdTypeEqualsMTIFkType()
if connection.features.supports_foreign_keys:
self.assertFKExists(
f"{app_label}_shetlandpony",
["pony_ptr_id"],
(f"{app_label}_pony", "id"),
)
self.assertFKExists(
f"{app_label}_rider",
["pony_id"],
(f"{app_label}_pony", "id"),
)
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_field_reloads_state_on_fk_with_to_field_target_type_change(self):
app_label = "test_alflrsfkwtflttc"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
("code", models.IntegerField(unique=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
(
"rider",
models.ForeignKey(
"%s.Rider" % app_label, models.CASCADE, to_field="code"
),
),
],
),
],
)
operation = migrations.AlterField(
"Rider",
"code",
models.CharField(max_length=100, unique=True),
)
self.apply_operations(app_label, project_state, operations=[operation])
id_type, id_null = [
(c.type_code, c.null_ok)
for c in self.get_table_description("%s_rider" % app_label)
if c.name == "code"
][0]
fk_type, fk_null = [
(c.type_code, c.null_ok)
for c in self.get_table_description("%s_pony" % app_label)
if c.name == "rider_id"
][0]
self.assertEqual(id_type, fk_type)
self.assertEqual(id_null, fk_null)
@skipUnlessDBFeature("supports_foreign_keys")
def test_alter_field_reloads_state_fk_with_to_field_related_name_target_type_change(
self,
):
app_label = "test_alflrsfkwtflrnttc"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.AutoField(primary_key=True)),
("code", models.PositiveIntegerField(unique=True)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
(
"rider",
models.ForeignKey(
"%s.Rider" % app_label,
models.CASCADE,
to_field="code",
related_name="+",
),
),
],
),
],
)
operation = migrations.AlterField(
"Rider",
"code",
models.CharField(max_length=100, unique=True),
)
self.apply_operations(app_label, project_state, operations=[operation])
def test_alter_field_reloads_state_on_fk_target_changes(self):
"""
If AlterField doesn't reload state appropriately, the second AlterField
crashes on MySQL due to not dropping the PonyRider.pony foreign key
constraint before modifying the column.
"""
app_label = "alter_alter_field_reloads_state_on_fk_target_changes"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
(
"rider",
models.ForeignKey("%s.Rider" % app_label, models.CASCADE),
),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"pony",
models.ForeignKey("%s.Pony" % app_label, models.CASCADE),
),
],
),
],
)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.AlterField(
"Rider", "id", models.CharField(primary_key=True, max_length=99)
),
migrations.AlterField(
"Pony", "id", models.CharField(primary_key=True, max_length=99)
),
],
)
def test_alter_field_reloads_state_on_fk_with_to_field_target_changes(self):
"""
If AlterField doesn't reload state appropriately, the second AlterField
crashes on MySQL due to not dropping the PonyRider.pony foreign key
constraint before modifying the column.
"""
app_label = "alter_alter_field_reloads_state_on_fk_with_to_field_target_changes"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
("slug", models.CharField(unique=True, max_length=100)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
(
"rider",
models.ForeignKey(
"%s.Rider" % app_label, models.CASCADE, to_field="slug"
),
),
("slug", models.CharField(unique=True, max_length=100)),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"pony",
models.ForeignKey(
"%s.Pony" % app_label, models.CASCADE, to_field="slug"
),
),
],
),
],
)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.AlterField(
"Rider", "slug", models.CharField(unique=True, max_length=99)
),
migrations.AlterField(
"Pony", "slug", models.CharField(unique=True, max_length=99)
),
],
)
def test_alter_field_pk_fk_char_to_int(self):
app_label = "alter_field_pk_fk_char_to_int"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
name="Parent",
fields=[
("id", models.CharField(max_length=255, primary_key=True)),
],
),
migrations.CreateModel(
name="Child",
fields=[
("id", models.BigAutoField(primary_key=True)),
(
"parent",
models.ForeignKey(
f"{app_label}.Parent",
on_delete=models.CASCADE,
),
),
],
),
],
)
self.apply_operations(
app_label,
project_state,
operations=[
migrations.AlterField(
model_name="parent",
name="id",
field=models.BigIntegerField(primary_key=True),
),
],
)
def test_rename_field_reloads_state_on_fk_target_changes(self):
"""
If RenameField doesn't reload state appropriately, the AlterField
crashes on MySQL due to not dropping the PonyRider.pony foreign key
constraint before modifying the column.
"""
app_label = "alter_rename_field_reloads_state_on_fk_target_changes"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Rider",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
],
),
migrations.CreateModel(
"Pony",
fields=[
("id", models.CharField(primary_key=True, max_length=100)),
(
"rider",
models.ForeignKey("%s.Rider" % app_label, models.CASCADE),
),
],
),
migrations.CreateModel(
"PonyRider",
fields=[
("id", models.AutoField(primary_key=True)),
(
"pony",
models.ForeignKey("%s.Pony" % app_label, models.CASCADE),
),
],
),
],
)
project_state = self.apply_operations(
app_label,
project_state,
operations=[
migrations.RenameField("Rider", "id", "id2"),
migrations.AlterField(
"Pony", "id", models.CharField(primary_key=True, max_length=99)
),
],
atomic=connection.features.supports_atomic_references_rename,
)
def test_rename_field(self):
"""
Tests the RenameField operation.
"""
project_state = self.set_up_test_model("test_rnfl")
operation = migrations.RenameField("Pony", "pink", "blue")
self.assertEqual(operation.describe(), "Rename field pink on Pony to blue")
self.assertEqual(operation.migration_name_fragment, "rename_pink_pony_blue")
new_state = project_state.clone()
operation.state_forwards("test_rnfl", new_state)
self.assertIn("blue", new_state.models["test_rnfl", "pony"].fields)
self.assertNotIn("pink", new_state.models["test_rnfl", "pony"].fields)
# Rename field.
self.assertColumnExists("test_rnfl_pony", "pink")
self.assertColumnNotExists("test_rnfl_pony", "blue")
with connection.schema_editor() as editor:
operation.database_forwards("test_rnfl", editor, project_state, new_state)
self.assertColumnExists("test_rnfl_pony", "blue")
self.assertColumnNotExists("test_rnfl_pony", "pink")
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards("test_rnfl", editor, new_state, project_state)
self.assertColumnExists("test_rnfl_pony", "pink")
self.assertColumnNotExists("test_rnfl_pony", "blue")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameField")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "old_name": "pink", "new_name": "blue"},
)
def test_rename_field_unique_together(self):
project_state = self.set_up_test_model("test_rnflut", unique_together=True)
operation = migrations.RenameField("Pony", "pink", "blue")
new_state = project_state.clone()
operation.state_forwards("test_rnflut", new_state)
# unique_together has the renamed column.
self.assertIn(
"blue",
new_state.models["test_rnflut", "pony"].options["unique_together"][0],
)
self.assertNotIn(
"pink",
new_state.models["test_rnflut", "pony"].options["unique_together"][0],
)
# Rename field.
self.assertColumnExists("test_rnflut_pony", "pink")
self.assertColumnNotExists("test_rnflut_pony", "blue")
with connection.schema_editor() as editor:
operation.database_forwards("test_rnflut", editor, project_state, new_state)
self.assertColumnExists("test_rnflut_pony", "blue")
self.assertColumnNotExists("test_rnflut_pony", "pink")
# The unique constraint has been ported over.
with connection.cursor() as cursor:
cursor.execute("INSERT INTO test_rnflut_pony (blue, weight) VALUES (1, 1)")
with self.assertRaises(IntegrityError):
with atomic():
cursor.execute(
"INSERT INTO test_rnflut_pony (blue, weight) VALUES (1, 1)"
)
cursor.execute("DELETE FROM test_rnflut_pony")
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(
"test_rnflut", editor, new_state, project_state
)
self.assertColumnExists("test_rnflut_pony", "pink")
self.assertColumnNotExists("test_rnflut_pony", "blue")
@ignore_warnings(category=RemovedInDjango51Warning)
def test_rename_field_index_together(self):
project_state = self.set_up_test_model("test_rnflit", index_together=True)
operation = migrations.RenameField("Pony", "pink", "blue")
new_state = project_state.clone()
operation.state_forwards("test_rnflit", new_state)
self.assertIn("blue", new_state.models["test_rnflit", "pony"].fields)
self.assertNotIn("pink", new_state.models["test_rnflit", "pony"].fields)
# index_together has the renamed column.
self.assertIn(
"blue", new_state.models["test_rnflit", "pony"].options["index_together"][0]
)
self.assertNotIn(
"pink", new_state.models["test_rnflit", "pony"].options["index_together"][0]
)
# Rename field.
self.assertColumnExists("test_rnflit_pony", "pink")
self.assertColumnNotExists("test_rnflit_pony", "blue")
with connection.schema_editor() as editor:
operation.database_forwards("test_rnflit", editor, project_state, new_state)
self.assertColumnExists("test_rnflit_pony", "blue")
self.assertColumnNotExists("test_rnflit_pony", "pink")
# The index constraint has been ported over.
self.assertIndexExists("test_rnflit_pony", ["weight", "blue"])
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(
"test_rnflit", editor, new_state, project_state
)
self.assertIndexExists("test_rnflit_pony", ["weight", "pink"])
def test_rename_field_with_db_column(self):
project_state = self.apply_operations(
"test_rfwdbc",
ProjectState(),
operations=[
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("field", models.IntegerField(db_column="db_field")),
(
"fk_field",
models.ForeignKey(
"Pony",
models.CASCADE,
db_column="db_fk_field",
),
),
],
),
],
)
new_state = project_state.clone()
operation = migrations.RenameField("Pony", "field", "renamed_field")
operation.state_forwards("test_rfwdbc", new_state)
self.assertIn("renamed_field", new_state.models["test_rfwdbc", "pony"].fields)
self.assertNotIn("field", new_state.models["test_rfwdbc", "pony"].fields)
self.assertColumnExists("test_rfwdbc_pony", "db_field")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(
"test_rfwdbc", editor, project_state, new_state
)
self.assertColumnExists("test_rfwdbc_pony", "db_field")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_backwards(
"test_rfwdbc", editor, new_state, project_state
)
self.assertColumnExists("test_rfwdbc_pony", "db_field")
new_state = project_state.clone()
operation = migrations.RenameField("Pony", "fk_field", "renamed_fk_field")
operation.state_forwards("test_rfwdbc", new_state)
self.assertIn(
"renamed_fk_field", new_state.models["test_rfwdbc", "pony"].fields
)
self.assertNotIn("fk_field", new_state.models["test_rfwdbc", "pony"].fields)
self.assertColumnExists("test_rfwdbc_pony", "db_fk_field")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_forwards(
"test_rfwdbc", editor, project_state, new_state
)
self.assertColumnExists("test_rfwdbc_pony", "db_fk_field")
with connection.schema_editor() as editor:
with self.assertNumQueries(0):
operation.database_backwards(
"test_rfwdbc", editor, new_state, project_state
)
self.assertColumnExists("test_rfwdbc_pony", "db_fk_field")
def test_rename_field_case(self):
project_state = self.apply_operations(
"test_rfmx",
ProjectState(),
operations=[
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("field", models.IntegerField()),
],
),
],
)
new_state = project_state.clone()
operation = migrations.RenameField("Pony", "field", "FiElD")
operation.state_forwards("test_rfmx", new_state)
self.assertIn("FiElD", new_state.models["test_rfmx", "pony"].fields)
self.assertColumnExists("test_rfmx_pony", "field")
with connection.schema_editor() as editor:
operation.database_forwards("test_rfmx", editor, project_state, new_state)
self.assertColumnExists(
"test_rfmx_pony",
connection.introspection.identifier_converter("FiElD"),
)
with connection.schema_editor() as editor:
operation.database_backwards("test_rfmx", editor, new_state, project_state)
self.assertColumnExists("test_rfmx_pony", "field")
def test_rename_missing_field(self):
state = ProjectState()
state.add_model(ModelState("app", "model", []))
with self.assertRaisesMessage(
FieldDoesNotExist, "app.model has no field named 'field'"
):
migrations.RenameField("model", "field", "new_field").state_forwards(
"app", state
)
def test_rename_referenced_field_state_forward(self):
state = ProjectState()
state.add_model(
ModelState(
"app",
"Model",
[
("id", models.AutoField(primary_key=True)),
("field", models.IntegerField(unique=True)),
],
)
)
state.add_model(
ModelState(
"app",
"OtherModel",
[
("id", models.AutoField(primary_key=True)),
(
"fk",
models.ForeignKey("Model", models.CASCADE, to_field="field"),
),
(
"fo",
models.ForeignObject(
"Model",
models.CASCADE,
from_fields=("fk",),
to_fields=("field",),
),
),
],
)
)
operation = migrations.RenameField("Model", "field", "renamed")
new_state = state.clone()
operation.state_forwards("app", new_state)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fk"].remote_field.field_name,
"renamed",
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fk"].from_fields, ["self"]
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fk"].to_fields, ("renamed",)
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fo"].from_fields, ("fk",)
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fo"].to_fields, ("renamed",)
)
operation = migrations.RenameField("OtherModel", "fk", "renamed_fk")
new_state = state.clone()
operation.state_forwards("app", new_state)
self.assertEqual(
new_state.models["app", "othermodel"]
.fields["renamed_fk"]
.remote_field.field_name,
"renamed",
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["renamed_fk"].from_fields,
("self",),
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["renamed_fk"].to_fields,
("renamed",),
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fo"].from_fields,
("renamed_fk",),
)
self.assertEqual(
new_state.models["app", "othermodel"].fields["fo"].to_fields, ("renamed",)
)
def test_alter_unique_together(self):
"""
Tests the AlterUniqueTogether operation.
"""
project_state = self.set_up_test_model("test_alunto")
# Test the state alteration
operation = migrations.AlterUniqueTogether("Pony", [("pink", "weight")])
self.assertEqual(
operation.describe(), "Alter unique_together for Pony (1 constraint(s))"
)
self.assertEqual(
operation.migration_name_fragment,
"alter_pony_unique_together",
)
new_state = project_state.clone()
operation.state_forwards("test_alunto", new_state)
self.assertEqual(
len(
project_state.models["test_alunto", "pony"].options.get(
"unique_together", set()
)
),
0,
)
self.assertEqual(
len(
new_state.models["test_alunto", "pony"].options.get(
"unique_together", set()
)
),
1,
)
# Make sure we can insert duplicate rows
with connection.cursor() as cursor:
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("DELETE FROM test_alunto_pony")
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_alunto", editor, project_state, new_state
)
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
with self.assertRaises(IntegrityError):
with atomic():
cursor.execute(
"INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)"
)
cursor.execute("DELETE FROM test_alunto_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alunto", editor, new_state, project_state
)
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("INSERT INTO test_alunto_pony (pink, weight) VALUES (1, 1)")
cursor.execute("DELETE FROM test_alunto_pony")
# Test flat unique_together
operation = migrations.AlterUniqueTogether("Pony", ("pink", "weight"))
operation.state_forwards("test_alunto", new_state)
self.assertEqual(
len(
new_state.models["test_alunto", "pony"].options.get(
"unique_together", set()
)
),
1,
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterUniqueTogether")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2], {"name": "Pony", "unique_together": {("pink", "weight")}}
)
def test_alter_unique_together_remove(self):
operation = migrations.AlterUniqueTogether("Pony", None)
self.assertEqual(
operation.describe(), "Alter unique_together for Pony (0 constraint(s))"
)
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
def test_remove_unique_together_on_pk_field(self):
app_label = "test_rutopkf"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Pony",
fields=[("id", models.AutoField(primary_key=True))],
options={"unique_together": {("id",)}},
),
],
)
table_name = f"{app_label}_pony"
pk_constraint_name = f"{table_name}_pkey"
unique_together_constraint_name = f"{table_name}_id_fb61f881_uniq"
self.assertConstraintExists(table_name, pk_constraint_name, value=False)
self.assertConstraintExists(
table_name, unique_together_constraint_name, value=False
)
new_state = project_state.clone()
operation = migrations.AlterUniqueTogether("Pony", set())
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertConstraintExists(table_name, pk_constraint_name, value=False)
self.assertConstraintNotExists(table_name, unique_together_constraint_name)
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
def test_remove_unique_together_on_unique_field(self):
app_label = "test_rutouf"
project_state = self.apply_operations(
app_label,
ProjectState(),
operations=[
migrations.CreateModel(
"Pony",
fields=[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=30, unique=True)),
],
options={"unique_together": {("name",)}},
),
],
)
table_name = f"{app_label}_pony"
unique_constraint_name = f"{table_name}_name_key"
unique_together_constraint_name = f"{table_name}_name_694f3b9f_uniq"
self.assertConstraintExists(table_name, unique_constraint_name, value=False)
self.assertConstraintExists(
table_name, unique_together_constraint_name, value=False
)
new_state = project_state.clone()
operation = migrations.AlterUniqueTogether("Pony", set())
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertConstraintExists(table_name, unique_constraint_name, value=False)
self.assertConstraintNotExists(table_name, unique_together_constraint_name)
def test_add_index(self):
"""
Test the AddIndex operation.
"""
project_state = self.set_up_test_model("test_adin")
msg = (
"Indexes passed to AddIndex operations require a name argument. "
"<Index: fields=['pink']> doesn't have one."
)
with self.assertRaisesMessage(ValueError, msg):
migrations.AddIndex("Pony", models.Index(fields=["pink"]))
index = models.Index(fields=["pink"], name="test_adin_pony_pink_idx")
operation = migrations.AddIndex("Pony", index)
self.assertEqual(
operation.describe(),
"Create index test_adin_pony_pink_idx on field(s) pink of model Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"pony_test_adin_pony_pink_idx",
)
new_state = project_state.clone()
operation.state_forwards("test_adin", new_state)
# Test the database alteration
self.assertEqual(
len(new_state.models["test_adin", "pony"].options["indexes"]), 1
)
self.assertIndexNotExists("test_adin_pony", ["pink"])
with connection.schema_editor() as editor:
operation.database_forwards("test_adin", editor, project_state, new_state)
self.assertIndexExists("test_adin_pony", ["pink"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_adin", editor, new_state, project_state)
self.assertIndexNotExists("test_adin_pony", ["pink"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddIndex")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "index": index})
def test_remove_index(self):
"""
Test the RemoveIndex operation.
"""
project_state = self.set_up_test_model("test_rmin", multicol_index=True)
self.assertTableExists("test_rmin_pony")
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
operation = migrations.RemoveIndex("Pony", "pony_test_idx")
self.assertEqual(operation.describe(), "Remove index pony_test_idx from Pony")
self.assertEqual(
operation.migration_name_fragment,
"remove_pony_pony_test_idx",
)
new_state = project_state.clone()
operation.state_forwards("test_rmin", new_state)
# Test the state alteration
self.assertEqual(
len(new_state.models["test_rmin", "pony"].options["indexes"]), 0
)
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_rmin", editor, project_state, new_state)
self.assertIndexNotExists("test_rmin_pony", ["pink", "weight"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards("test_rmin", editor, new_state, project_state)
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveIndex")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "name": "pony_test_idx"})
# Also test a field dropped with index - sqlite remake issue
operations = [
migrations.RemoveIndex("Pony", "pony_test_idx"),
migrations.RemoveField("Pony", "pink"),
]
self.assertColumnExists("test_rmin_pony", "pink")
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
# Test database alteration
new_state = project_state.clone()
self.apply_operations("test_rmin", new_state, operations=operations)
self.assertColumnNotExists("test_rmin_pony", "pink")
self.assertIndexNotExists("test_rmin_pony", ["pink", "weight"])
# And test reversal
self.unapply_operations("test_rmin", project_state, operations=operations)
self.assertIndexExists("test_rmin_pony", ["pink", "weight"])
def test_rename_index(self):
app_label = "test_rnin"
project_state = self.set_up_test_model(app_label, index=True)
table_name = app_label + "_pony"
self.assertIndexNameExists(table_name, "pony_pink_idx")
self.assertIndexNameNotExists(table_name, "new_pony_test_idx")
operation = migrations.RenameIndex(
"Pony", new_name="new_pony_test_idx", old_name="pony_pink_idx"
)
self.assertEqual(
operation.describe(),
"Rename index pony_pink_idx on Pony to new_pony_test_idx",
)
self.assertEqual(
operation.migration_name_fragment,
"rename_pony_pink_idx_new_pony_test_idx",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Rename index.
expected_queries = 1 if connection.features.can_rename_index else 2
with connection.schema_editor() as editor, self.assertNumQueries(
expected_queries
):
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameNotExists(table_name, "pony_pink_idx")
self.assertIndexNameExists(table_name, "new_pony_test_idx")
# Reversal.
with connection.schema_editor() as editor, self.assertNumQueries(
expected_queries
):
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameExists(table_name, "pony_pink_idx")
self.assertIndexNameNotExists(table_name, "new_pony_test_idx")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameIndex")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"old_name": "pony_pink_idx",
"new_name": "new_pony_test_idx",
},
)
def test_rename_index_arguments(self):
msg = "RenameIndex.old_name and old_fields are mutually exclusive."
with self.assertRaisesMessage(ValueError, msg):
migrations.RenameIndex(
"Pony",
new_name="new_idx_name",
old_name="old_idx_name",
old_fields=("weight", "pink"),
)
msg = "RenameIndex requires one of old_name and old_fields arguments to be set."
with self.assertRaisesMessage(ValueError, msg):
migrations.RenameIndex("Pony", new_name="new_idx_name")
@ignore_warnings(category=RemovedInDjango51Warning)
def test_rename_index_unnamed_index(self):
app_label = "test_rninui"
project_state = self.set_up_test_model(app_label, index_together=True)
table_name = app_label + "_pony"
self.assertIndexNameNotExists(table_name, "new_pony_test_idx")
operation = migrations.RenameIndex(
"Pony", new_name="new_pony_test_idx", old_fields=("weight", "pink")
)
self.assertEqual(
operation.describe(),
"Rename unnamed index for ('weight', 'pink') on Pony to new_pony_test_idx",
)
self.assertEqual(
operation.migration_name_fragment,
"rename_pony_weight_pink_new_pony_test_idx",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
# Rename index.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameExists(table_name, "new_pony_test_idx")
# Reverse is a no-op.
with connection.schema_editor() as editor, self.assertNumQueries(0):
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameExists(table_name, "new_pony_test_idx")
# Reapply, RenameIndex operation is a noop when the old and new name
# match.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, new_state, project_state)
self.assertIndexNameExists(table_name, "new_pony_test_idx")
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RenameIndex")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"new_name": "new_pony_test_idx",
"old_fields": ("weight", "pink"),
},
)
def test_rename_index_unknown_unnamed_index(self):
app_label = "test_rninuui"
project_state = self.set_up_test_model(app_label)
operation = migrations.RenameIndex(
"Pony", new_name="new_pony_test_idx", old_fields=("weight", "pink")
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
msg = "Found wrong number (0) of indexes for test_rninuui_pony(weight, pink)."
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, msg):
operation.database_forwards(app_label, editor, project_state, new_state)
def test_add_index_state_forwards(self):
project_state = self.set_up_test_model("test_adinsf")
index = models.Index(fields=["pink"], name="test_adinsf_pony_pink_idx")
old_model = project_state.apps.get_model("test_adinsf", "Pony")
new_state = project_state.clone()
operation = migrations.AddIndex("Pony", index)
operation.state_forwards("test_adinsf", new_state)
new_model = new_state.apps.get_model("test_adinsf", "Pony")
self.assertIsNot(old_model, new_model)
def test_remove_index_state_forwards(self):
project_state = self.set_up_test_model("test_rminsf")
index = models.Index(fields=["pink"], name="test_rminsf_pony_pink_idx")
migrations.AddIndex("Pony", index).state_forwards("test_rminsf", project_state)
old_model = project_state.apps.get_model("test_rminsf", "Pony")
new_state = project_state.clone()
operation = migrations.RemoveIndex("Pony", "test_rminsf_pony_pink_idx")
operation.state_forwards("test_rminsf", new_state)
new_model = new_state.apps.get_model("test_rminsf", "Pony")
self.assertIsNot(old_model, new_model)
def test_rename_index_state_forwards(self):
app_label = "test_rnidsf"
project_state = self.set_up_test_model(app_label, index=True)
old_model = project_state.apps.get_model(app_label, "Pony")
new_state = project_state.clone()
operation = migrations.RenameIndex(
"Pony", new_name="new_pony_pink_idx", old_name="pony_pink_idx"
)
operation.state_forwards(app_label, new_state)
new_model = new_state.apps.get_model(app_label, "Pony")
self.assertIsNot(old_model, new_model)
self.assertEqual(new_model._meta.indexes[0].name, "new_pony_pink_idx")
@ignore_warnings(category=RemovedInDjango51Warning)
def test_rename_index_state_forwards_unnamed_index(self):
app_label = "test_rnidsfui"
project_state = self.set_up_test_model(app_label, index_together=True)
old_model = project_state.apps.get_model(app_label, "Pony")
new_state = project_state.clone()
operation = migrations.RenameIndex(
"Pony", new_name="new_pony_pink_idx", old_fields=("weight", "pink")
)
operation.state_forwards(app_label, new_state)
new_model = new_state.apps.get_model(app_label, "Pony")
self.assertIsNot(old_model, new_model)
self.assertEqual(new_model._meta.index_together, tuple())
self.assertEqual(new_model._meta.indexes[0].name, "new_pony_pink_idx")
@skipUnlessDBFeature("supports_expression_indexes")
def test_add_func_index(self):
app_label = "test_addfuncin"
index_name = f"{app_label}_pony_abs_idx"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(app_label)
index = models.Index(Abs("weight"), name=index_name)
operation = migrations.AddIndex("Pony", index)
self.assertEqual(
operation.describe(),
"Create index test_addfuncin_pony_abs_idx on Abs(F(weight)) on model Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"pony_test_addfuncin_pony_abs_idx",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(len(new_state.models[app_label, "pony"].options["indexes"]), 1)
self.assertIndexNameNotExists(table_name, index_name)
# Add index.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameExists(table_name, index_name)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameNotExists(table_name, index_name)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddIndex")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "index": index})
@skipUnlessDBFeature("supports_expression_indexes")
def test_remove_func_index(self):
app_label = "test_rmfuncin"
index_name = f"{app_label}_pony_abs_idx"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(
app_label,
indexes=[
models.Index(Abs("weight"), name=index_name),
],
)
self.assertTableExists(table_name)
self.assertIndexNameExists(table_name, index_name)
operation = migrations.RemoveIndex("Pony", index_name)
self.assertEqual(
operation.describe(),
"Remove index test_rmfuncin_pony_abs_idx from Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"remove_pony_test_rmfuncin_pony_abs_idx",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(len(new_state.models[app_label, "pony"].options["indexes"]), 0)
# Remove index.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameNotExists(table_name, index_name)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameExists(table_name, index_name)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveIndex")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "name": index_name})
@skipUnlessDBFeature("supports_expression_indexes")
def test_alter_field_with_func_index(self):
app_label = "test_alfuncin"
index_name = f"{app_label}_pony_idx"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(
app_label,
indexes=[models.Index(Abs("pink"), name=index_name)],
)
operation = migrations.AlterField(
"Pony", "pink", models.IntegerField(null=True)
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameExists(table_name, index_name)
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameExists(table_name, index_name)
def test_alter_field_with_index(self):
"""
Test AlterField operation with an index to ensure indexes created via
Meta.indexes don't get dropped with sqlite3 remake.
"""
project_state = self.set_up_test_model("test_alflin", index=True)
operation = migrations.AlterField(
"Pony", "pink", models.IntegerField(null=True)
)
new_state = project_state.clone()
operation.state_forwards("test_alflin", new_state)
# Test the database alteration
self.assertColumnNotNull("test_alflin_pony", "pink")
with connection.schema_editor() as editor:
operation.database_forwards("test_alflin", editor, project_state, new_state)
# Index hasn't been dropped
self.assertIndexExists("test_alflin_pony", ["pink"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alflin", editor, new_state, project_state
)
# Ensure the index is still there
self.assertIndexExists("test_alflin_pony", ["pink"])
@ignore_warnings(category=RemovedInDjango51Warning)
def test_alter_index_together(self):
"""
Tests the AlterIndexTogether operation.
"""
project_state = self.set_up_test_model("test_alinto")
# Test the state alteration
operation = migrations.AlterIndexTogether("Pony", [("pink", "weight")])
self.assertEqual(
operation.describe(), "Alter index_together for Pony (1 constraint(s))"
)
self.assertEqual(
operation.migration_name_fragment,
"alter_pony_index_together",
)
new_state = project_state.clone()
operation.state_forwards("test_alinto", new_state)
self.assertEqual(
len(
project_state.models["test_alinto", "pony"].options.get(
"index_together", set()
)
),
0,
)
self.assertEqual(
len(
new_state.models["test_alinto", "pony"].options.get(
"index_together", set()
)
),
1,
)
# Make sure there's no matching index
self.assertIndexNotExists("test_alinto_pony", ["pink", "weight"])
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_alinto", editor, project_state, new_state)
self.assertIndexExists("test_alinto_pony", ["pink", "weight"])
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alinto", editor, new_state, project_state
)
self.assertIndexNotExists("test_alinto_pony", ["pink", "weight"])
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterIndexTogether")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2], {"name": "Pony", "index_together": {("pink", "weight")}}
)
def test_alter_index_together_remove(self):
operation = migrations.AlterIndexTogether("Pony", None)
self.assertEqual(
operation.describe(), "Alter index_together for Pony (0 constraint(s))"
)
@skipUnlessDBFeature("allows_multiple_constraints_on_same_fields")
@ignore_warnings(category=RemovedInDjango51Warning)
def test_alter_index_together_remove_with_unique_together(self):
app_label = "test_alintoremove_wunto"
table_name = "%s_pony" % app_label
project_state = self.set_up_test_model(app_label, unique_together=True)
self.assertUniqueConstraintExists(table_name, ["pink", "weight"])
# Add index together.
new_state = project_state.clone()
operation = migrations.AlterIndexTogether("Pony", [("pink", "weight")])
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexExists(table_name, ["pink", "weight"])
# Remove index together.
project_state = new_state
new_state = project_state.clone()
operation = migrations.AlterIndexTogether("Pony", set())
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNotExists(table_name, ["pink", "weight"])
self.assertUniqueConstraintExists(table_name, ["pink", "weight"])
@skipUnlessDBFeature("supports_table_check_constraints")
def test_add_constraint(self):
project_state = self.set_up_test_model("test_addconstraint")
gt_check = models.Q(pink__gt=2)
gt_constraint = models.CheckConstraint(
check=gt_check, name="test_add_constraint_pony_pink_gt_2"
)
gt_operation = migrations.AddConstraint("Pony", gt_constraint)
self.assertEqual(
gt_operation.describe(),
"Create constraint test_add_constraint_pony_pink_gt_2 on model Pony",
)
self.assertEqual(
gt_operation.migration_name_fragment,
"pony_test_add_constraint_pony_pink_gt_2",
)
# Test the state alteration
new_state = project_state.clone()
gt_operation.state_forwards("test_addconstraint", new_state)
self.assertEqual(
len(new_state.models["test_addconstraint", "pony"].options["constraints"]),
1,
)
Pony = new_state.apps.get_model("test_addconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
# Test the database alteration
with connection.schema_editor() as editor:
gt_operation.database_forwards(
"test_addconstraint", editor, project_state, new_state
)
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=1.0)
# Add another one.
lt_check = models.Q(pink__lt=100)
lt_constraint = models.CheckConstraint(
check=lt_check, name="test_add_constraint_pony_pink_lt_100"
)
lt_operation = migrations.AddConstraint("Pony", lt_constraint)
lt_operation.state_forwards("test_addconstraint", new_state)
self.assertEqual(
len(new_state.models["test_addconstraint", "pony"].options["constraints"]),
2,
)
Pony = new_state.apps.get_model("test_addconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 2)
with connection.schema_editor() as editor:
lt_operation.database_forwards(
"test_addconstraint", editor, project_state, new_state
)
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=100, weight=1.0)
# Test reversal
with connection.schema_editor() as editor:
gt_operation.database_backwards(
"test_addconstraint", editor, new_state, project_state
)
Pony.objects.create(pink=1, weight=1.0)
# Test deconstruction
definition = gt_operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2], {"model_name": "Pony", "constraint": gt_constraint}
)
@skipUnlessDBFeature("supports_table_check_constraints")
def test_add_constraint_percent_escaping(self):
app_label = "add_constraint_string_quoting"
operations = [
migrations.CreateModel(
"Author",
fields=[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
("surname", models.CharField(max_length=100, default="")),
("rebate", models.CharField(max_length=100)),
],
),
]
from_state = self.apply_operations(app_label, ProjectState(), operations)
# "%" generated in startswith lookup should be escaped in a way that is
# considered a leading wildcard.
check = models.Q(name__startswith="Albert")
constraint = models.CheckConstraint(check=check, name="name_constraint")
operation = migrations.AddConstraint("Author", constraint)
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Author = to_state.apps.get_model(app_label, "Author")
with self.assertRaises(IntegrityError), transaction.atomic():
Author.objects.create(name="Artur")
# Literal "%" should be escaped in a way that is not a considered a
# wildcard.
check = models.Q(rebate__endswith="%")
constraint = models.CheckConstraint(check=check, name="rebate_constraint")
operation = migrations.AddConstraint("Author", constraint)
from_state = to_state
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
Author = to_state.apps.get_model(app_label, "Author")
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Author = to_state.apps.get_model(app_label, "Author")
with self.assertRaises(IntegrityError), transaction.atomic():
Author.objects.create(name="Albert", rebate="10$")
author = Author.objects.create(name="Albert", rebate="10%")
self.assertEqual(Author.objects.get(), author)
# Right-hand-side baked "%" literals should not be used for parameters
# interpolation.
check = ~models.Q(surname__startswith=models.F("name"))
constraint = models.CheckConstraint(check=check, name="name_constraint_rhs")
operation = migrations.AddConstraint("Author", constraint)
from_state = to_state
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Author = to_state.apps.get_model(app_label, "Author")
with self.assertRaises(IntegrityError), transaction.atomic():
Author.objects.create(name="Albert", surname="Alberto")
@skipUnlessDBFeature("supports_table_check_constraints")
def test_add_or_constraint(self):
app_label = "test_addorconstraint"
constraint_name = "add_constraint_or"
from_state = self.set_up_test_model(app_label)
check = models.Q(pink__gt=2, weight__gt=2) | models.Q(weight__lt=0)
constraint = models.CheckConstraint(check=check, name=constraint_name)
operation = migrations.AddConstraint("Pony", constraint)
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Pony = to_state.apps.get_model(app_label, "Pony")
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=2, weight=3.0)
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=3, weight=1.0)
Pony.objects.bulk_create(
[
Pony(pink=3, weight=-1.0),
Pony(pink=1, weight=-1.0),
Pony(pink=3, weight=3.0),
]
)
@skipUnlessDBFeature("supports_table_check_constraints")
def test_add_constraint_combinable(self):
app_label = "test_addconstraint_combinable"
operations = [
migrations.CreateModel(
"Book",
fields=[
("id", models.AutoField(primary_key=True)),
("read", models.PositiveIntegerField()),
("unread", models.PositiveIntegerField()),
],
),
]
from_state = self.apply_operations(app_label, ProjectState(), operations)
constraint = models.CheckConstraint(
check=models.Q(read=(100 - models.F("unread"))),
name="test_addconstraint_combinable_sum_100",
)
operation = migrations.AddConstraint("Book", constraint)
to_state = from_state.clone()
operation.state_forwards(app_label, to_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, from_state, to_state)
Book = to_state.apps.get_model(app_label, "Book")
with self.assertRaises(IntegrityError), transaction.atomic():
Book.objects.create(read=70, unread=10)
Book.objects.create(read=70, unread=30)
@skipUnlessDBFeature("supports_table_check_constraints")
def test_remove_constraint(self):
project_state = self.set_up_test_model(
"test_removeconstraint",
constraints=[
models.CheckConstraint(
check=models.Q(pink__gt=2),
name="test_remove_constraint_pony_pink_gt_2",
),
models.CheckConstraint(
check=models.Q(pink__lt=100),
name="test_remove_constraint_pony_pink_lt_100",
),
],
)
gt_operation = migrations.RemoveConstraint(
"Pony", "test_remove_constraint_pony_pink_gt_2"
)
self.assertEqual(
gt_operation.describe(),
"Remove constraint test_remove_constraint_pony_pink_gt_2 from model Pony",
)
self.assertEqual(
gt_operation.migration_name_fragment,
"remove_pony_test_remove_constraint_pony_pink_gt_2",
)
# Test state alteration
new_state = project_state.clone()
gt_operation.state_forwards("test_removeconstraint", new_state)
self.assertEqual(
len(
new_state.models["test_removeconstraint", "pony"].options["constraints"]
),
1,
)
Pony = new_state.apps.get_model("test_removeconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
# Test database alteration
with connection.schema_editor() as editor:
gt_operation.database_forwards(
"test_removeconstraint", editor, project_state, new_state
)
Pony.objects.create(pink=1, weight=1.0).delete()
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=100, weight=1.0)
# Remove the other one.
lt_operation = migrations.RemoveConstraint(
"Pony", "test_remove_constraint_pony_pink_lt_100"
)
lt_operation.state_forwards("test_removeconstraint", new_state)
self.assertEqual(
len(
new_state.models["test_removeconstraint", "pony"].options["constraints"]
),
0,
)
Pony = new_state.apps.get_model("test_removeconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
with connection.schema_editor() as editor:
lt_operation.database_forwards(
"test_removeconstraint", editor, project_state, new_state
)
Pony.objects.create(pink=100, weight=1.0).delete()
# Test reversal
with connection.schema_editor() as editor:
gt_operation.database_backwards(
"test_removeconstraint", editor, new_state, project_state
)
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=1.0)
# Test deconstruction
definition = gt_operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "name": "test_remove_constraint_pony_pink_gt_2"},
)
def test_add_partial_unique_constraint(self):
project_state = self.set_up_test_model("test_addpartialuniqueconstraint")
partial_unique_constraint = models.UniqueConstraint(
fields=["pink"],
condition=models.Q(weight__gt=5),
name="test_constraint_pony_pink_for_weight_gt_5_uniq",
)
operation = migrations.AddConstraint("Pony", partial_unique_constraint)
self.assertEqual(
operation.describe(),
"Create constraint test_constraint_pony_pink_for_weight_gt_5_uniq "
"on model Pony",
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_addpartialuniqueconstraint", new_state)
self.assertEqual(
len(
new_state.models["test_addpartialuniqueconstraint", "pony"].options[
"constraints"
]
),
1,
)
Pony = new_state.apps.get_model("test_addpartialuniqueconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_addpartialuniqueconstraint", editor, project_state, new_state
)
# Test constraint works
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=6.0)
if connection.features.supports_partial_indexes:
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=7.0)
else:
Pony.objects.create(pink=1, weight=7.0)
# Test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_addpartialuniqueconstraint", editor, new_state, project_state
)
# Test constraint doesn't work
Pony.objects.create(pink=1, weight=7.0)
# Test deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "constraint": partial_unique_constraint},
)
def test_remove_partial_unique_constraint(self):
project_state = self.set_up_test_model(
"test_removepartialuniqueconstraint",
constraints=[
models.UniqueConstraint(
fields=["pink"],
condition=models.Q(weight__gt=5),
name="test_constraint_pony_pink_for_weight_gt_5_uniq",
),
],
)
gt_operation = migrations.RemoveConstraint(
"Pony", "test_constraint_pony_pink_for_weight_gt_5_uniq"
)
self.assertEqual(
gt_operation.describe(),
"Remove constraint test_constraint_pony_pink_for_weight_gt_5_uniq from "
"model Pony",
)
# Test state alteration
new_state = project_state.clone()
gt_operation.state_forwards("test_removepartialuniqueconstraint", new_state)
self.assertEqual(
len(
new_state.models["test_removepartialuniqueconstraint", "pony"].options[
"constraints"
]
),
0,
)
Pony = new_state.apps.get_model("test_removepartialuniqueconstraint", "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
# Test database alteration
with connection.schema_editor() as editor:
gt_operation.database_forwards(
"test_removepartialuniqueconstraint", editor, project_state, new_state
)
# Test constraint doesn't work
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=6.0)
Pony.objects.create(pink=1, weight=7.0).delete()
# Test reversal
with connection.schema_editor() as editor:
gt_operation.database_backwards(
"test_removepartialuniqueconstraint", editor, new_state, project_state
)
# Test constraint works
if connection.features.supports_partial_indexes:
with self.assertRaises(IntegrityError), transaction.atomic():
Pony.objects.create(pink=1, weight=7.0)
else:
Pony.objects.create(pink=1, weight=7.0)
# Test deconstruction
definition = gt_operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"name": "test_constraint_pony_pink_for_weight_gt_5_uniq",
},
)
def test_add_deferred_unique_constraint(self):
app_label = "test_adddeferred_uc"
project_state = self.set_up_test_model(app_label)
deferred_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="deferred_pink_constraint_add",
deferrable=models.Deferrable.DEFERRED,
)
operation = migrations.AddConstraint("Pony", deferred_unique_constraint)
self.assertEqual(
operation.describe(),
"Create constraint deferred_pink_constraint_add on model Pony",
)
# Add constraint.
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 1
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
with connection.schema_editor() as editor, CaptureQueriesContext(
connection
) as ctx:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony.objects.create(pink=1, weight=4.0)
if connection.features.supports_deferrable_unique_constraints:
# Unique constraint is deferred.
with transaction.atomic():
obj = Pony.objects.create(pink=1, weight=4.0)
obj.pink = 2
obj.save()
# Constraint behavior can be changed with SET CONSTRAINTS.
with self.assertRaises(IntegrityError):
with transaction.atomic(), connection.cursor() as cursor:
quoted_name = connection.ops.quote_name(
deferred_unique_constraint.name
)
cursor.execute("SET CONSTRAINTS %s IMMEDIATE" % quoted_name)
obj = Pony.objects.create(pink=1, weight=4.0)
obj.pink = 3
obj.save()
else:
self.assertEqual(len(ctx), 0)
Pony.objects.create(pink=1, weight=4.0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "constraint": deferred_unique_constraint},
)
def test_remove_deferred_unique_constraint(self):
app_label = "test_removedeferred_uc"
deferred_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="deferred_pink_constraint_rm",
deferrable=models.Deferrable.DEFERRED,
)
project_state = self.set_up_test_model(
app_label, constraints=[deferred_unique_constraint]
)
operation = migrations.RemoveConstraint("Pony", deferred_unique_constraint.name)
self.assertEqual(
operation.describe(),
"Remove constraint deferred_pink_constraint_rm from model Pony",
)
# Remove constraint.
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 0
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
with connection.schema_editor() as editor, CaptureQueriesContext(
connection
) as ctx:
operation.database_forwards(app_label, editor, project_state, new_state)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0).delete()
if not connection.features.supports_deferrable_unique_constraints:
self.assertEqual(len(ctx), 0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
if connection.features.supports_deferrable_unique_constraints:
# Unique constraint is deferred.
with transaction.atomic():
obj = Pony.objects.create(pink=1, weight=4.0)
obj.pink = 2
obj.save()
# Constraint behavior can be changed with SET CONSTRAINTS.
with self.assertRaises(IntegrityError):
with transaction.atomic(), connection.cursor() as cursor:
quoted_name = connection.ops.quote_name(
deferred_unique_constraint.name
)
cursor.execute("SET CONSTRAINTS %s IMMEDIATE" % quoted_name)
obj = Pony.objects.create(pink=1, weight=4.0)
obj.pink = 3
obj.save()
else:
Pony.objects.create(pink=1, weight=4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"name": "deferred_pink_constraint_rm",
},
)
def test_add_covering_unique_constraint(self):
app_label = "test_addcovering_uc"
project_state = self.set_up_test_model(app_label)
covering_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="covering_pink_constraint_add",
include=["weight"],
)
operation = migrations.AddConstraint("Pony", covering_unique_constraint)
self.assertEqual(
operation.describe(),
"Create constraint covering_pink_constraint_add on model Pony",
)
# Add constraint.
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 1
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
with connection.schema_editor() as editor, CaptureQueriesContext(
connection
) as ctx:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony.objects.create(pink=1, weight=4.0)
if connection.features.supports_covering_indexes:
with self.assertRaises(IntegrityError):
Pony.objects.create(pink=1, weight=4.0)
else:
self.assertEqual(len(ctx), 0)
Pony.objects.create(pink=1, weight=4.0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "constraint": covering_unique_constraint},
)
def test_remove_covering_unique_constraint(self):
app_label = "test_removecovering_uc"
covering_unique_constraint = models.UniqueConstraint(
fields=["pink"],
name="covering_pink_constraint_rm",
include=["weight"],
)
project_state = self.set_up_test_model(
app_label, constraints=[covering_unique_constraint]
)
operation = migrations.RemoveConstraint("Pony", covering_unique_constraint.name)
self.assertEqual(
operation.describe(),
"Remove constraint covering_pink_constraint_rm from model Pony",
)
# Remove constraint.
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 0
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
with connection.schema_editor() as editor, CaptureQueriesContext(
connection
) as ctx:
operation.database_forwards(app_label, editor, project_state, new_state)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=4.0).delete()
if not connection.features.supports_covering_indexes:
self.assertEqual(len(ctx), 0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
if connection.features.supports_covering_indexes:
with self.assertRaises(IntegrityError):
Pony.objects.create(pink=1, weight=4.0)
else:
Pony.objects.create(pink=1, weight=4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{
"model_name": "Pony",
"name": "covering_pink_constraint_rm",
},
)
def test_alter_field_with_func_unique_constraint(self):
app_label = "test_alfuncuc"
constraint_name = f"{app_label}_pony_uq"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(
app_label,
constraints=[
models.UniqueConstraint("pink", "weight", name=constraint_name)
],
)
operation = migrations.AlterField(
"Pony", "pink", models.IntegerField(null=True)
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
if connection.features.supports_expression_indexes:
self.assertIndexNameExists(table_name, constraint_name)
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
if connection.features.supports_expression_indexes:
self.assertIndexNameExists(table_name, constraint_name)
def test_add_func_unique_constraint(self):
app_label = "test_adfuncuc"
constraint_name = f"{app_label}_pony_abs_uq"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(app_label)
constraint = models.UniqueConstraint(Abs("weight"), name=constraint_name)
operation = migrations.AddConstraint("Pony", constraint)
self.assertEqual(
operation.describe(),
"Create constraint test_adfuncuc_pony_abs_uq on model Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"pony_test_adfuncuc_pony_abs_uq",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 1
)
self.assertIndexNameNotExists(table_name, constraint_name)
# Add constraint.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
Pony = new_state.apps.get_model(app_label, "Pony")
Pony.objects.create(weight=4.0)
if connection.features.supports_expression_indexes:
self.assertIndexNameExists(table_name, constraint_name)
with self.assertRaises(IntegrityError):
Pony.objects.create(weight=-4.0)
else:
self.assertIndexNameNotExists(table_name, constraint_name)
Pony.objects.create(weight=-4.0)
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
self.assertIndexNameNotExists(table_name, constraint_name)
# Constraint doesn't work.
Pony.objects.create(weight=-4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "AddConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"model_name": "Pony", "constraint": constraint},
)
def test_remove_func_unique_constraint(self):
app_label = "test_rmfuncuc"
constraint_name = f"{app_label}_pony_abs_uq"
table_name = f"{app_label}_pony"
project_state = self.set_up_test_model(
app_label,
constraints=[
models.UniqueConstraint(Abs("weight"), name=constraint_name),
],
)
self.assertTableExists(table_name)
if connection.features.supports_expression_indexes:
self.assertIndexNameExists(table_name, constraint_name)
operation = migrations.RemoveConstraint("Pony", constraint_name)
self.assertEqual(
operation.describe(),
"Remove constraint test_rmfuncuc_pony_abs_uq from model Pony",
)
self.assertEqual(
operation.migration_name_fragment,
"remove_pony_test_rmfuncuc_pony_abs_uq",
)
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
self.assertEqual(
len(new_state.models[app_label, "pony"].options["constraints"]), 0
)
Pony = new_state.apps.get_model(app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 0)
# Remove constraint.
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
self.assertIndexNameNotExists(table_name, constraint_name)
# Constraint doesn't work.
Pony.objects.create(pink=1, weight=4.0)
Pony.objects.create(pink=1, weight=-4.0).delete()
# Reversal.
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
if connection.features.supports_expression_indexes:
self.assertIndexNameExists(table_name, constraint_name)
with self.assertRaises(IntegrityError):
Pony.objects.create(weight=-4.0)
else:
self.assertIndexNameNotExists(table_name, constraint_name)
Pony.objects.create(weight=-4.0)
# Deconstruction.
definition = operation.deconstruct()
self.assertEqual(definition[0], "RemoveConstraint")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"model_name": "Pony", "name": constraint_name})
def test_alter_model_options(self):
"""
Tests the AlterModelOptions operation.
"""
project_state = self.set_up_test_model("test_almoop")
# Test the state alteration (no DB alteration to test)
operation = migrations.AlterModelOptions(
"Pony", {"permissions": [("can_groom", "Can groom")]}
)
self.assertEqual(operation.describe(), "Change Meta options on Pony")
self.assertEqual(operation.migration_name_fragment, "alter_pony_options")
new_state = project_state.clone()
operation.state_forwards("test_almoop", new_state)
self.assertEqual(
len(
project_state.models["test_almoop", "pony"].options.get(
"permissions", []
)
),
0,
)
self.assertEqual(
len(new_state.models["test_almoop", "pony"].options.get("permissions", [])),
1,
)
self.assertEqual(
new_state.models["test_almoop", "pony"].options["permissions"][0][0],
"can_groom",
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelOptions")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2],
{"name": "Pony", "options": {"permissions": [("can_groom", "Can groom")]}},
)
def test_alter_model_options_emptying(self):
"""
The AlterModelOptions operation removes keys from the dict (#23121)
"""
project_state = self.set_up_test_model("test_almoop", options=True)
# Test the state alteration (no DB alteration to test)
operation = migrations.AlterModelOptions("Pony", {})
self.assertEqual(operation.describe(), "Change Meta options on Pony")
new_state = project_state.clone()
operation.state_forwards("test_almoop", new_state)
self.assertEqual(
len(
project_state.models["test_almoop", "pony"].options.get(
"permissions", []
)
),
1,
)
self.assertEqual(
len(new_state.models["test_almoop", "pony"].options.get("permissions", [])),
0,
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterModelOptions")
self.assertEqual(definition[1], [])
self.assertEqual(definition[2], {"name": "Pony", "options": {}})
def test_alter_order_with_respect_to(self):
"""
Tests the AlterOrderWithRespectTo operation.
"""
project_state = self.set_up_test_model("test_alorwrtto", related_model=True)
# Test the state alteration
operation = migrations.AlterOrderWithRespectTo("Rider", "pony")
self.assertEqual(
operation.describe(), "Set order_with_respect_to on Rider to pony"
)
self.assertEqual(
operation.migration_name_fragment,
"alter_rider_order_with_respect_to",
)
new_state = project_state.clone()
operation.state_forwards("test_alorwrtto", new_state)
self.assertIsNone(
project_state.models["test_alorwrtto", "rider"].options.get(
"order_with_respect_to", None
)
)
self.assertEqual(
new_state.models["test_alorwrtto", "rider"].options.get(
"order_with_respect_to", None
),
"pony",
)
# Make sure there's no matching index
self.assertColumnNotExists("test_alorwrtto_rider", "_order")
# Create some rows before alteration
rendered_state = project_state.apps
pony = rendered_state.get_model("test_alorwrtto", "Pony").objects.create(
weight=50
)
rider1 = rendered_state.get_model("test_alorwrtto", "Rider").objects.create(
pony=pony
)
rider1.friend = rider1
rider1.save()
rider2 = rendered_state.get_model("test_alorwrtto", "Rider").objects.create(
pony=pony
)
rider2.friend = rider2
rider2.save()
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_alorwrtto", editor, project_state, new_state
)
self.assertColumnExists("test_alorwrtto_rider", "_order")
# Check for correct value in rows
updated_riders = new_state.apps.get_model(
"test_alorwrtto", "Rider"
).objects.all()
self.assertEqual(updated_riders[0]._order, 0)
self.assertEqual(updated_riders[1]._order, 0)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_alorwrtto", editor, new_state, project_state
)
self.assertColumnNotExists("test_alorwrtto_rider", "_order")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "AlterOrderWithRespectTo")
self.assertEqual(definition[1], [])
self.assertEqual(
definition[2], {"name": "Rider", "order_with_respect_to": "pony"}
)
def test_alter_model_managers(self):
"""
The managers on a model are set.
"""
project_state = self.set_up_test_model("test_almoma")
# Test the state alteration
operation = migrations.AlterModelManagers(
"Pony",
managers=[
("food_qs", FoodQuerySet.as_manager()),
("food_mgr", FoodManager("a", "b")),
("food_mgr_kwargs", FoodManager("x", "y", 3, 4)),
],
)
self.assertEqual(operation.describe(), "Change managers on Pony")
self.assertEqual(operation.migration_name_fragment, "alter_pony_managers")
managers = project_state.models["test_almoma", "pony"].managers
self.assertEqual(managers, [])
new_state = project_state.clone()
operation.state_forwards("test_almoma", new_state)
self.assertIn(("test_almoma", "pony"), new_state.models)
managers = new_state.models["test_almoma", "pony"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
rendered_state = new_state.apps
model = rendered_state.get_model("test_almoma", "pony")
self.assertIsInstance(model.food_qs, models.Manager)
self.assertIsInstance(model.food_mgr, FoodManager)
self.assertIsInstance(model.food_mgr_kwargs, FoodManager)
def test_alter_model_managers_emptying(self):
"""
The managers on a model are set.
"""
project_state = self.set_up_test_model("test_almomae", manager_model=True)
# Test the state alteration
operation = migrations.AlterModelManagers("Food", managers=[])
self.assertEqual(operation.describe(), "Change managers on Food")
self.assertIn(("test_almomae", "food"), project_state.models)
managers = project_state.models["test_almomae", "food"].managers
self.assertEqual(managers[0][0], "food_qs")
self.assertIsInstance(managers[0][1], models.Manager)
self.assertEqual(managers[1][0], "food_mgr")
self.assertIsInstance(managers[1][1], FoodManager)
self.assertEqual(managers[1][1].args, ("a", "b", 1, 2))
self.assertEqual(managers[2][0], "food_mgr_kwargs")
self.assertIsInstance(managers[2][1], FoodManager)
self.assertEqual(managers[2][1].args, ("x", "y", 3, 4))
new_state = project_state.clone()
operation.state_forwards("test_almomae", new_state)
managers = new_state.models["test_almomae", "food"].managers
self.assertEqual(managers, [])
def test_alter_fk(self):
"""
Creating and then altering an FK works correctly
and deals with the pending SQL (#23091)
"""
project_state = self.set_up_test_model("test_alfk")
# Test adding and then altering the FK in one go
create_operation = migrations.CreateModel(
name="Rider",
fields=[
("id", models.AutoField(primary_key=True)),
("pony", models.ForeignKey("Pony", models.CASCADE)),
],
)
create_state = project_state.clone()
create_operation.state_forwards("test_alfk", create_state)
alter_operation = migrations.AlterField(
model_name="Rider",
name="pony",
field=models.ForeignKey("Pony", models.CASCADE, editable=False),
)
alter_state = create_state.clone()
alter_operation.state_forwards("test_alfk", alter_state)
with connection.schema_editor() as editor:
create_operation.database_forwards(
"test_alfk", editor, project_state, create_state
)
alter_operation.database_forwards(
"test_alfk", editor, create_state, alter_state
)
def test_alter_fk_non_fk(self):
"""
Altering an FK to a non-FK works (#23244)
"""
# Test the state alteration
operation = migrations.AlterField(
model_name="Rider",
name="pony",
field=models.FloatField(),
)
project_state, new_state = self.make_test_state(
"test_afknfk", operation, related_model=True
)
# Test the database alteration
self.assertColumnExists("test_afknfk_rider", "pony_id")
self.assertColumnNotExists("test_afknfk_rider", "pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_afknfk", editor, project_state, new_state)
self.assertColumnExists("test_afknfk_rider", "pony")
self.assertColumnNotExists("test_afknfk_rider", "pony_id")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_afknfk", editor, new_state, project_state
)
self.assertColumnExists("test_afknfk_rider", "pony_id")
self.assertColumnNotExists("test_afknfk_rider", "pony")
def test_run_sql(self):
"""
Tests the RunSQL operation.
"""
project_state = self.set_up_test_model("test_runsql")
# Create the operation
operation = migrations.RunSQL(
# Use a multi-line string with a comment to test splitting on
# SQLite and MySQL respectively.
"CREATE TABLE i_love_ponies (id int, special_thing varchar(15));\n"
"INSERT INTO i_love_ponies (id, special_thing) "
"VALUES (1, 'i love ponies'); -- this is magic!\n"
"INSERT INTO i_love_ponies (id, special_thing) "
"VALUES (2, 'i love django');\n"
"UPDATE i_love_ponies SET special_thing = 'Ponies' "
"WHERE special_thing LIKE '%%ponies';"
"UPDATE i_love_ponies SET special_thing = 'Django' "
"WHERE special_thing LIKE '%django';",
# Run delete queries to test for parameter substitution failure
# reported in #23426
"DELETE FROM i_love_ponies WHERE special_thing LIKE '%Django%';"
"DELETE FROM i_love_ponies WHERE special_thing LIKE '%%Ponies%%';"
"DROP TABLE i_love_ponies",
state_operations=[
migrations.CreateModel(
"SomethingElse", [("id", models.AutoField(primary_key=True))]
)
],
)
self.assertEqual(operation.describe(), "Raw SQL operation")
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_runsql", new_state)
self.assertEqual(
len(new_state.models["test_runsql", "somethingelse"].fields), 1
)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
# Test SQL collection
with connection.schema_editor(collect_sql=True) as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
self.assertIn("LIKE '%%ponies';", "\n".join(editor.collected_sql))
operation.database_backwards(
"test_runsql", editor, project_state, new_state
)
self.assertIn("LIKE '%%Ponies%%';", "\n".join(editor.collected_sql))
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
self.assertTableExists("i_love_ponies")
# Make sure all the SQL was processed
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 2)
cursor.execute(
"SELECT COUNT(*) FROM i_love_ponies WHERE special_thing = 'Django'"
)
self.assertEqual(cursor.fetchall()[0][0], 1)
cursor.execute(
"SELECT COUNT(*) FROM i_love_ponies WHERE special_thing = 'Ponies'"
)
self.assertEqual(cursor.fetchall()[0][0], 1)
# And test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards(
"test_runsql", editor, new_state, project_state
)
self.assertTableNotExists("i_love_ponies")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunSQL")
self.assertEqual(definition[1], [])
self.assertEqual(
sorted(definition[2]), ["reverse_sql", "sql", "state_operations"]
)
# And elidable reduction
self.assertIs(False, operation.reduce(operation, []))
elidable_operation = migrations.RunSQL("SELECT 1 FROM void;", elidable=True)
self.assertEqual(elidable_operation.reduce(operation, []), [operation])
def test_run_sql_params(self):
"""
#23426 - RunSQL should accept parameters.
"""
project_state = self.set_up_test_model("test_runsql")
# Create the operation
operation = migrations.RunSQL(
["CREATE TABLE i_love_ponies (id int, special_thing varchar(15));"],
["DROP TABLE i_love_ponies"],
)
param_operation = migrations.RunSQL(
# forwards
(
"INSERT INTO i_love_ponies (id, special_thing) VALUES (1, 'Django');",
[
"INSERT INTO i_love_ponies (id, special_thing) VALUES (2, %s);",
["Ponies"],
],
(
"INSERT INTO i_love_ponies (id, special_thing) VALUES (%s, %s);",
(
3,
"Python",
),
),
),
# backwards
[
"DELETE FROM i_love_ponies WHERE special_thing = 'Django';",
["DELETE FROM i_love_ponies WHERE special_thing = 'Ponies';", None],
(
"DELETE FROM i_love_ponies WHERE id = %s OR special_thing = %s;",
[3, "Python"],
),
],
)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
new_state = project_state.clone()
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, project_state, new_state)
# Test parameter passing
with connection.schema_editor() as editor:
param_operation.database_forwards(
"test_runsql", editor, project_state, new_state
)
# Make sure all the SQL was processed
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 3)
with connection.schema_editor() as editor:
param_operation.database_backwards(
"test_runsql", editor, new_state, project_state
)
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM i_love_ponies")
self.assertEqual(cursor.fetchall()[0][0], 0)
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_runsql", editor, new_state, project_state
)
self.assertTableNotExists("i_love_ponies")
def test_run_sql_params_invalid(self):
"""
#23426 - RunSQL should fail when a list of statements with an incorrect
number of tuples is given.
"""
project_state = self.set_up_test_model("test_runsql")
new_state = project_state.clone()
operation = migrations.RunSQL(
# forwards
[["INSERT INTO foo (bar) VALUES ('buz');"]],
# backwards
(("DELETE FROM foo WHERE bar = 'buz';", "invalid", "parameter count"),),
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, "Expected a 2-tuple but got 1"):
operation.database_forwards(
"test_runsql", editor, project_state, new_state
)
with connection.schema_editor() as editor:
with self.assertRaisesMessage(ValueError, "Expected a 2-tuple but got 3"):
operation.database_backwards(
"test_runsql", editor, new_state, project_state
)
def test_run_sql_noop(self):
"""
#24098 - Tests no-op RunSQL operations.
"""
operation = migrations.RunSQL(migrations.RunSQL.noop, migrations.RunSQL.noop)
with connection.schema_editor() as editor:
operation.database_forwards("test_runsql", editor, None, None)
operation.database_backwards("test_runsql", editor, None, None)
def test_run_sql_add_missing_semicolon_on_collect_sql(self):
project_state = self.set_up_test_model("test_runsql")
new_state = project_state.clone()
tests = [
"INSERT INTO test_runsql_pony (pink, weight) VALUES (1, 1);\n",
"INSERT INTO test_runsql_pony (pink, weight) VALUES (1, 1)\n",
]
for sql in tests:
with self.subTest(sql=sql):
operation = migrations.RunSQL(sql, migrations.RunPython.noop)
with connection.schema_editor(collect_sql=True) as editor:
operation.database_forwards(
"test_runsql", editor, project_state, new_state
)
collected_sql = "\n".join(editor.collected_sql)
self.assertEqual(collected_sql.count(";"), 1)
def test_run_python(self):
"""
Tests the RunPython operation
"""
project_state = self.set_up_test_model("test_runpython", mti_model=True)
# Create the operation
def inner_method(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
Pony.objects.create(pink=1, weight=3.55)
Pony.objects.create(weight=5)
def inner_method_reverse(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
Pony.objects.filter(pink=1, weight=3.55).delete()
Pony.objects.filter(weight=5).delete()
operation = migrations.RunPython(
inner_method, reverse_code=inner_method_reverse
)
self.assertEqual(operation.describe(), "Raw Python operation")
# Test the state alteration does nothing
new_state = project_state.clone()
operation.state_forwards("test_runpython", new_state)
self.assertEqual(new_state, project_state)
# Test the database alteration
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 0
)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 2
)
# Now test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards(
"test_runpython", editor, project_state, new_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 0
)
# Now test we can't use a string
with self.assertRaisesMessage(
ValueError, "RunPython must be supplied with a callable"
):
migrations.RunPython("print 'ahahaha'")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["code", "reverse_code"])
# Also test reversal fails, with an operation identical to above but
# without reverse_code set.
no_reverse_operation = migrations.RunPython(inner_method)
self.assertFalse(no_reverse_operation.reversible)
with connection.schema_editor() as editor:
no_reverse_operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
with self.assertRaises(NotImplementedError):
no_reverse_operation.database_backwards(
"test_runpython", editor, new_state, project_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 2
)
def create_ponies(models, schema_editor):
Pony = models.get_model("test_runpython", "Pony")
pony1 = Pony.objects.create(pink=1, weight=3.55)
self.assertIsNot(pony1.pk, None)
pony2 = Pony.objects.create(weight=5)
self.assertIsNot(pony2.pk, None)
self.assertNotEqual(pony1.pk, pony2.pk)
operation = migrations.RunPython(create_ponies)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 4
)
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["code"])
def create_shetlandponies(models, schema_editor):
ShetlandPony = models.get_model("test_runpython", "ShetlandPony")
pony1 = ShetlandPony.objects.create(weight=4.0)
self.assertIsNot(pony1.pk, None)
pony2 = ShetlandPony.objects.create(weight=5.0)
self.assertIsNot(pony2.pk, None)
self.assertNotEqual(pony1.pk, pony2.pk)
operation = migrations.RunPython(create_shetlandponies)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
self.assertEqual(
project_state.apps.get_model("test_runpython", "Pony").objects.count(), 6
)
self.assertEqual(
project_state.apps.get_model(
"test_runpython", "ShetlandPony"
).objects.count(),
2,
)
# And elidable reduction
self.assertIs(False, operation.reduce(operation, []))
elidable_operation = migrations.RunPython(inner_method, elidable=True)
self.assertEqual(elidable_operation.reduce(operation, []), [operation])
def test_run_python_atomic(self):
"""
Tests the RunPython operation correctly handles the "atomic" keyword
"""
project_state = self.set_up_test_model("test_runpythonatomic", mti_model=True)
def inner_method(models, schema_editor):
Pony = models.get_model("test_runpythonatomic", "Pony")
Pony.objects.create(pink=1, weight=3.55)
raise ValueError("Adrian hates ponies.")
# Verify atomicity when applying.
atomic_migration = Migration("test", "test_runpythonatomic")
atomic_migration.operations = [
migrations.RunPython(inner_method, reverse_code=inner_method)
]
non_atomic_migration = Migration("test", "test_runpythonatomic")
non_atomic_migration.operations = [
migrations.RunPython(inner_method, reverse_code=inner_method, atomic=False)
]
# If we're a fully-transactional database, both versions should rollback
if connection.features.can_rollback_ddl:
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.apply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.apply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
# Otherwise, the non-atomic operation should leave a row there
else:
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.apply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.apply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
1,
)
# Reset object count to zero and verify atomicity when unapplying.
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.all().delete()
# On a fully-transactional database, both versions rollback.
if connection.features.can_rollback_ddl:
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.unapply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.unapply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
# Otherwise, the non-atomic operation leaves a row there.
else:
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
atomic_migration.unapply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
0,
)
with self.assertRaises(ValueError):
with connection.schema_editor() as editor:
non_atomic_migration.unapply(project_state, editor)
self.assertEqual(
project_state.apps.get_model(
"test_runpythonatomic", "Pony"
).objects.count(),
1,
)
# Verify deconstruction.
definition = non_atomic_migration.operations[0].deconstruct()
self.assertEqual(definition[0], "RunPython")
self.assertEqual(definition[1], [])
self.assertEqual(sorted(definition[2]), ["atomic", "code", "reverse_code"])
def test_run_python_related_assignment(self):
"""
#24282 - Model changes to a FK reverse side update the model
on the FK side as well.
"""
def inner_method(models, schema_editor):
Author = models.get_model("test_authors", "Author")
Book = models.get_model("test_books", "Book")
author = Author.objects.create(name="Hemingway")
Book.objects.create(title="Old Man and The Sea", author=author)
create_author = migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
],
options={},
)
create_book = migrations.CreateModel(
"Book",
[
("id", models.AutoField(primary_key=True)),
("title", models.CharField(max_length=100)),
("author", models.ForeignKey("test_authors.Author", models.CASCADE)),
],
options={},
)
add_hometown = migrations.AddField(
"Author",
"hometown",
models.CharField(max_length=100),
)
create_old_man = migrations.RunPython(inner_method, inner_method)
project_state = ProjectState()
new_state = project_state.clone()
with connection.schema_editor() as editor:
create_author.state_forwards("test_authors", new_state)
create_author.database_forwards(
"test_authors", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_book.state_forwards("test_books", new_state)
create_book.database_forwards(
"test_books", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
add_hometown.state_forwards("test_authors", new_state)
add_hometown.database_forwards(
"test_authors", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_old_man.state_forwards("test_books", new_state)
create_old_man.database_forwards(
"test_books", editor, project_state, new_state
)
def test_model_with_bigautofield(self):
"""
A model with BigAutoField can be created.
"""
def create_data(models, schema_editor):
Author = models.get_model("test_author", "Author")
Book = models.get_model("test_book", "Book")
author1 = Author.objects.create(name="Hemingway")
Book.objects.create(title="Old Man and The Sea", author=author1)
Book.objects.create(id=2**33, title="A farewell to arms", author=author1)
author2 = Author.objects.create(id=2**33, name="Remarque")
Book.objects.create(title="All quiet on the western front", author=author2)
Book.objects.create(title="Arc de Triomphe", author=author2)
create_author = migrations.CreateModel(
"Author",
[
("id", models.BigAutoField(primary_key=True)),
("name", models.CharField(max_length=100)),
],
options={},
)
create_book = migrations.CreateModel(
"Book",
[
("id", models.BigAutoField(primary_key=True)),
("title", models.CharField(max_length=100)),
(
"author",
models.ForeignKey(
to="test_author.Author", on_delete=models.CASCADE
),
),
],
options={},
)
fill_data = migrations.RunPython(create_data)
project_state = ProjectState()
new_state = project_state.clone()
with connection.schema_editor() as editor:
create_author.state_forwards("test_author", new_state)
create_author.database_forwards(
"test_author", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_book.state_forwards("test_book", new_state)
create_book.database_forwards("test_book", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
fill_data.state_forwards("fill_data", new_state)
fill_data.database_forwards("fill_data", editor, project_state, new_state)
def _test_autofield_foreignfield_growth(
self, source_field, target_field, target_value
):
"""
A field may be migrated in the following ways:
- AutoField to BigAutoField
- SmallAutoField to AutoField
- SmallAutoField to BigAutoField
"""
def create_initial_data(models, schema_editor):
Article = models.get_model("test_article", "Article")
Blog = models.get_model("test_blog", "Blog")
blog = Blog.objects.create(name="web development done right")
Article.objects.create(name="Frameworks", blog=blog)
Article.objects.create(name="Programming Languages", blog=blog)
def create_big_data(models, schema_editor):
Article = models.get_model("test_article", "Article")
Blog = models.get_model("test_blog", "Blog")
blog2 = Blog.objects.create(name="Frameworks", id=target_value)
Article.objects.create(name="Django", blog=blog2)
Article.objects.create(id=target_value, name="Django2", blog=blog2)
create_blog = migrations.CreateModel(
"Blog",
[
("id", source_field(primary_key=True)),
("name", models.CharField(max_length=100)),
],
options={},
)
create_article = migrations.CreateModel(
"Article",
[
("id", source_field(primary_key=True)),
(
"blog",
models.ForeignKey(to="test_blog.Blog", on_delete=models.CASCADE),
),
("name", models.CharField(max_length=100)),
("data", models.TextField(default="")),
],
options={},
)
fill_initial_data = migrations.RunPython(
create_initial_data, create_initial_data
)
fill_big_data = migrations.RunPython(create_big_data, create_big_data)
grow_article_id = migrations.AlterField(
"Article", "id", target_field(primary_key=True)
)
grow_blog_id = migrations.AlterField(
"Blog", "id", target_field(primary_key=True)
)
project_state = ProjectState()
new_state = project_state.clone()
with connection.schema_editor() as editor:
create_blog.state_forwards("test_blog", new_state)
create_blog.database_forwards("test_blog", editor, project_state, new_state)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
create_article.state_forwards("test_article", new_state)
create_article.database_forwards(
"test_article", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
fill_initial_data.state_forwards("fill_initial_data", new_state)
fill_initial_data.database_forwards(
"fill_initial_data", editor, project_state, new_state
)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
grow_article_id.state_forwards("test_article", new_state)
grow_article_id.database_forwards(
"test_article", editor, project_state, new_state
)
state = new_state.clone()
article = state.apps.get_model("test_article.Article")
self.assertIsInstance(article._meta.pk, target_field)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
grow_blog_id.state_forwards("test_blog", new_state)
grow_blog_id.database_forwards(
"test_blog", editor, project_state, new_state
)
state = new_state.clone()
blog = state.apps.get_model("test_blog.Blog")
self.assertIsInstance(blog._meta.pk, target_field)
project_state = new_state
new_state = new_state.clone()
with connection.schema_editor() as editor:
fill_big_data.state_forwards("fill_big_data", new_state)
fill_big_data.database_forwards(
"fill_big_data", editor, project_state, new_state
)
def test_autofield__bigautofield_foreignfield_growth(self):
"""A field may be migrated from AutoField to BigAutoField."""
self._test_autofield_foreignfield_growth(
models.AutoField,
models.BigAutoField,
2**33,
)
def test_smallfield_autofield_foreignfield_growth(self):
"""A field may be migrated from SmallAutoField to AutoField."""
self._test_autofield_foreignfield_growth(
models.SmallAutoField,
models.AutoField,
2**22,
)
def test_smallfield_bigautofield_foreignfield_growth(self):
"""A field may be migrated from SmallAutoField to BigAutoField."""
self._test_autofield_foreignfield_growth(
models.SmallAutoField,
models.BigAutoField,
2**33,
)
def test_run_python_noop(self):
"""
#24098 - Tests no-op RunPython operations.
"""
project_state = ProjectState()
new_state = project_state.clone()
operation = migrations.RunPython(
migrations.RunPython.noop, migrations.RunPython.noop
)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_runpython", editor, project_state, new_state
)
operation.database_backwards(
"test_runpython", editor, new_state, project_state
)
def test_separate_database_and_state(self):
"""
Tests the SeparateDatabaseAndState operation.
"""
project_state = self.set_up_test_model("test_separatedatabaseandstate")
# Create the operation
database_operation = migrations.RunSQL(
"CREATE TABLE i_love_ponies (id int, special_thing int);",
"DROP TABLE i_love_ponies;",
)
state_operation = migrations.CreateModel(
"SomethingElse", [("id", models.AutoField(primary_key=True))]
)
operation = migrations.SeparateDatabaseAndState(
state_operations=[state_operation], database_operations=[database_operation]
)
self.assertEqual(
operation.describe(), "Custom state/database change combination"
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards("test_separatedatabaseandstate", new_state)
self.assertEqual(
len(
new_state.models[
"test_separatedatabaseandstate", "somethingelse"
].fields
),
1,
)
# Make sure there's no table
self.assertTableNotExists("i_love_ponies")
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(
"test_separatedatabaseandstate", editor, project_state, new_state
)
self.assertTableExists("i_love_ponies")
# And test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards(
"test_separatedatabaseandstate", editor, new_state, project_state
)
self.assertTableNotExists("i_love_ponies")
# And deconstruction
definition = operation.deconstruct()
self.assertEqual(definition[0], "SeparateDatabaseAndState")
self.assertEqual(definition[1], [])
self.assertEqual(
sorted(definition[2]), ["database_operations", "state_operations"]
)
def test_separate_database_and_state2(self):
"""
A complex SeparateDatabaseAndState operation: Multiple operations both
for state and database. Verify the state dependencies within each list
and that state ops don't affect the database.
"""
app_label = "test_separatedatabaseandstate2"
project_state = self.set_up_test_model(app_label)
# Create the operation
database_operations = [
migrations.CreateModel(
"ILovePonies",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "iloveponies"},
),
migrations.CreateModel(
"ILoveMorePonies",
# We use IntegerField and not AutoField because
# the model is going to be deleted immediately
# and with an AutoField this fails on Oracle
[("id", models.IntegerField(primary_key=True))],
options={"db_table": "ilovemoreponies"},
),
migrations.DeleteModel("ILoveMorePonies"),
migrations.CreateModel(
"ILoveEvenMorePonies",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "iloveevenmoreponies"},
),
]
state_operations = [
migrations.CreateModel(
"SomethingElse",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "somethingelse"},
),
migrations.DeleteModel("SomethingElse"),
migrations.CreateModel(
"SomethingCompletelyDifferent",
[("id", models.AutoField(primary_key=True))],
options={"db_table": "somethingcompletelydifferent"},
),
]
operation = migrations.SeparateDatabaseAndState(
state_operations=state_operations,
database_operations=database_operations,
)
# Test the state alteration
new_state = project_state.clone()
operation.state_forwards(app_label, new_state)
def assertModelsAndTables(after_db):
# Tables and models exist, or don't, as they should:
self.assertNotIn((app_label, "somethingelse"), new_state.models)
self.assertEqual(
len(new_state.models[app_label, "somethingcompletelydifferent"].fields),
1,
)
self.assertNotIn((app_label, "iloveponiesonies"), new_state.models)
self.assertNotIn((app_label, "ilovemoreponies"), new_state.models)
self.assertNotIn((app_label, "iloveevenmoreponies"), new_state.models)
self.assertTableNotExists("somethingelse")
self.assertTableNotExists("somethingcompletelydifferent")
self.assertTableNotExists("ilovemoreponies")
if after_db:
self.assertTableExists("iloveponies")
self.assertTableExists("iloveevenmoreponies")
else:
self.assertTableNotExists("iloveponies")
self.assertTableNotExists("iloveevenmoreponies")
assertModelsAndTables(after_db=False)
# Test the database alteration
with connection.schema_editor() as editor:
operation.database_forwards(app_label, editor, project_state, new_state)
assertModelsAndTables(after_db=True)
# And test reversal
self.assertTrue(operation.reversible)
with connection.schema_editor() as editor:
operation.database_backwards(app_label, editor, new_state, project_state)
assertModelsAndTables(after_db=False)
class SwappableOperationTests(OperationTestBase):
"""
Key operations ignore swappable models
(we don't want to replicate all of them here, as the functionality
is in a common base class anyway)
"""
available_apps = ["migrations"]
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_create_ignore_swapped(self):
"""
The CreateTable operation ignores swapped models.
"""
operation = migrations.CreateModel(
"Pony",
[
("id", models.AutoField(primary_key=True)),
("pink", models.IntegerField(default=1)),
],
options={
"swappable": "TEST_SWAP_MODEL",
},
)
# Test the state alteration (it should still be there!)
project_state = ProjectState()
new_state = project_state.clone()
operation.state_forwards("test_crigsw", new_state)
self.assertEqual(new_state.models["test_crigsw", "pony"].name, "Pony")
self.assertEqual(len(new_state.models["test_crigsw", "pony"].fields), 2)
# Test the database alteration
self.assertTableNotExists("test_crigsw_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_crigsw", editor, project_state, new_state)
self.assertTableNotExists("test_crigsw_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_crigsw", editor, new_state, project_state
)
self.assertTableNotExists("test_crigsw_pony")
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_delete_ignore_swapped(self):
"""
Tests the DeleteModel operation ignores swapped models.
"""
operation = migrations.DeleteModel("Pony")
project_state, new_state = self.make_test_state("test_dligsw", operation)
# Test the database alteration
self.assertTableNotExists("test_dligsw_pony")
with connection.schema_editor() as editor:
operation.database_forwards("test_dligsw", editor, project_state, new_state)
self.assertTableNotExists("test_dligsw_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_dligsw", editor, new_state, project_state
)
self.assertTableNotExists("test_dligsw_pony")
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_add_field_ignore_swapped(self):
"""
Tests the AddField operation.
"""
# Test the state alteration
operation = migrations.AddField(
"Pony",
"height",
models.FloatField(null=True, default=5),
)
project_state, new_state = self.make_test_state("test_adfligsw", operation)
# Test the database alteration
self.assertTableNotExists("test_adfligsw_pony")
with connection.schema_editor() as editor:
operation.database_forwards(
"test_adfligsw", editor, project_state, new_state
)
self.assertTableNotExists("test_adfligsw_pony")
# And test reversal
with connection.schema_editor() as editor:
operation.database_backwards(
"test_adfligsw", editor, new_state, project_state
)
self.assertTableNotExists("test_adfligsw_pony")
@override_settings(TEST_SWAP_MODEL="migrations.SomeFakeModel")
def test_indexes_ignore_swapped(self):
"""
Add/RemoveIndex operations ignore swapped models.
"""
operation = migrations.AddIndex(
"Pony", models.Index(fields=["pink"], name="my_name_idx")
)
project_state, new_state = self.make_test_state("test_adinigsw", operation)
with connection.schema_editor() as editor:
# No database queries should be run for swapped models
operation.database_forwards(
"test_adinigsw", editor, project_state, new_state
)
operation.database_backwards(
"test_adinigsw", editor, new_state, project_state
)
operation = migrations.RemoveIndex(
"Pony", models.Index(fields=["pink"], name="my_name_idx")
)
project_state, new_state = self.make_test_state("test_rminigsw", operation)
with connection.schema_editor() as editor:
operation.database_forwards(
"test_rminigsw", editor, project_state, new_state
)
operation.database_backwards(
"test_rminigsw", editor, new_state, project_state
)
class TestCreateModel(SimpleTestCase):
def test_references_model_mixin(self):
migrations.CreateModel(
"name",
fields=[],
bases=(Mixin, models.Model),
).references_model("other_model", "migrations")
class FieldOperationTests(SimpleTestCase):
def test_references_model(self):
operation = FieldOperation(
"MoDel", "field", models.ForeignKey("Other", models.CASCADE)
)
# Model name match.
self.assertIs(operation.references_model("mOdEl", "migrations"), True)
# Referenced field.
self.assertIs(operation.references_model("oTher", "migrations"), True)
# Doesn't reference.
self.assertIs(operation.references_model("Whatever", "migrations"), False)
def test_references_field_by_name(self):
operation = FieldOperation("MoDel", "field", models.BooleanField(default=False))
self.assertIs(operation.references_field("model", "field", "migrations"), True)
def test_references_field_by_remote_field_model(self):
operation = FieldOperation(
"Model", "field", models.ForeignKey("Other", models.CASCADE)
)
self.assertIs(
operation.references_field("Other", "whatever", "migrations"), True
)
self.assertIs(
operation.references_field("Missing", "whatever", "migrations"), False
)
def test_references_field_by_from_fields(self):
operation = FieldOperation(
"Model",
"field",
models.fields.related.ForeignObject(
"Other", models.CASCADE, ["from"], ["to"]
),
)
self.assertIs(operation.references_field("Model", "from", "migrations"), True)
self.assertIs(operation.references_field("Model", "to", "migrations"), False)
self.assertIs(operation.references_field("Other", "from", "migrations"), False)
self.assertIs(operation.references_field("Model", "to", "migrations"), False)
def test_references_field_by_to_fields(self):
operation = FieldOperation(
"Model",
"field",
models.ForeignKey("Other", models.CASCADE, to_field="field"),
)
self.assertIs(operation.references_field("Other", "field", "migrations"), True)
self.assertIs(
operation.references_field("Other", "whatever", "migrations"), False
)
self.assertIs(
operation.references_field("Missing", "whatever", "migrations"), False
)
def test_references_field_by_through(self):
operation = FieldOperation(
"Model", "field", models.ManyToManyField("Other", through="Through")
)
self.assertIs(
operation.references_field("Other", "whatever", "migrations"), True
)
self.assertIs(
operation.references_field("Through", "whatever", "migrations"), True
)
self.assertIs(
operation.references_field("Missing", "whatever", "migrations"), False
)
def test_reference_field_by_through_fields(self):
operation = FieldOperation(
"Model",
"field",
models.ManyToManyField(
"Other", through="Through", through_fields=("first", "second")
),
)
self.assertIs(
operation.references_field("Other", "whatever", "migrations"), True
)
self.assertIs(
operation.references_field("Through", "whatever", "migrations"), False
)
self.assertIs(
operation.references_field("Through", "first", "migrations"), True
)
self.assertIs(
operation.references_field("Through", "second", "migrations"), True
)
|
14c1a3694da1c774f9574752d09bd6318b6c252aac222121cea2e0a9e5066fec | from unittest import mock
from django.conf.global_settings import PASSWORD_HASHERS
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.hashers import get_hasher
from django.contrib.auth.models import (
AnonymousUser,
Group,
Permission,
User,
UserManager,
)
from django.contrib.contenttypes.models import ContentType
from django.core import mail
from django.db import connection, migrations
from django.db.migrations.state import ModelState, ProjectState
from django.db.models.signals import post_save
from django.test import SimpleTestCase, TestCase, TransactionTestCase, override_settings
from django.test.utils import ignore_warnings
from django.utils.deprecation import RemovedInDjango51Warning
from .models import CustomEmailField, IntegerUsernameUser
class NaturalKeysTestCase(TestCase):
def test_user_natural_key(self):
staff_user = User.objects.create_user(username="staff")
self.assertEqual(User.objects.get_by_natural_key("staff"), staff_user)
self.assertEqual(staff_user.natural_key(), ("staff",))
def test_group_natural_key(self):
users_group = Group.objects.create(name="users")
self.assertEqual(Group.objects.get_by_natural_key("users"), users_group)
class LoadDataWithoutNaturalKeysTestCase(TestCase):
fixtures = ["regular.json"]
def test_user_is_created_and_added_to_group(self):
user = User.objects.get(username="my_username")
group = Group.objects.get(name="my_group")
self.assertEqual(group, user.groups.get())
class LoadDataWithNaturalKeysTestCase(TestCase):
fixtures = ["natural.json"]
def test_user_is_created_and_added_to_group(self):
user = User.objects.get(username="my_username")
group = Group.objects.get(name="my_group")
self.assertEqual(group, user.groups.get())
class LoadDataWithNaturalKeysAndMultipleDatabasesTestCase(TestCase):
databases = {"default", "other"}
def test_load_data_with_user_permissions(self):
# Create test contenttypes for both databases
default_objects = [
ContentType.objects.db_manager("default").create(
model="examplemodela",
app_label="app_a",
),
ContentType.objects.db_manager("default").create(
model="examplemodelb",
app_label="app_b",
),
]
other_objects = [
ContentType.objects.db_manager("other").create(
model="examplemodelb",
app_label="app_b",
),
ContentType.objects.db_manager("other").create(
model="examplemodela",
app_label="app_a",
),
]
# Now we create the test UserPermission
Permission.objects.db_manager("default").create(
name="Can delete example model b",
codename="delete_examplemodelb",
content_type=default_objects[1],
)
Permission.objects.db_manager("other").create(
name="Can delete example model b",
codename="delete_examplemodelb",
content_type=other_objects[0],
)
perm_default = Permission.objects.get_by_natural_key(
"delete_examplemodelb",
"app_b",
"examplemodelb",
)
perm_other = Permission.objects.db_manager("other").get_by_natural_key(
"delete_examplemodelb",
"app_b",
"examplemodelb",
)
self.assertEqual(perm_default.content_type_id, default_objects[1].id)
self.assertEqual(perm_other.content_type_id, other_objects[0].id)
class UserManagerTestCase(TransactionTestCase):
available_apps = [
"auth_tests",
"django.contrib.auth",
"django.contrib.contenttypes",
]
def test_create_user(self):
email_lowercase = "[email protected]"
user = User.objects.create_user("user", email_lowercase)
self.assertEqual(user.email, email_lowercase)
self.assertEqual(user.username, "user")
self.assertFalse(user.has_usable_password())
def test_create_user_email_domain_normalize_rfc3696(self):
# According to RFC 3696 Section 3 the "@" symbol can be part of the
# local part of an email address.
returned = UserManager.normalize_email(r"Abc\@[email protected]")
self.assertEqual(returned, r"Abc\@[email protected]")
def test_create_user_email_domain_normalize(self):
returned = UserManager.normalize_email("[email protected]")
self.assertEqual(returned, "[email protected]")
def test_create_user_email_domain_normalize_with_whitespace(self):
returned = UserManager.normalize_email(r"email\ [email protected]")
self.assertEqual(returned, r"email\ [email protected]")
def test_empty_username(self):
with self.assertRaisesMessage(ValueError, "The given username must be set"):
User.objects.create_user(username="")
def test_create_user_is_staff(self):
email = "[email protected]"
user = User.objects.create_user("user", email, is_staff=True)
self.assertEqual(user.email, email)
self.assertEqual(user.username, "user")
self.assertTrue(user.is_staff)
def test_create_super_user_raises_error_on_false_is_superuser(self):
with self.assertRaisesMessage(
ValueError, "Superuser must have is_superuser=True."
):
User.objects.create_superuser(
username="test",
email="[email protected]",
password="test",
is_superuser=False,
)
def test_create_superuser_raises_error_on_false_is_staff(self):
with self.assertRaisesMessage(ValueError, "Superuser must have is_staff=True."):
User.objects.create_superuser(
username="test",
email="[email protected]",
password="test",
is_staff=False,
)
@ignore_warnings(category=RemovedInDjango51Warning)
def test_make_random_password(self):
allowed_chars = "abcdefg"
password = UserManager().make_random_password(5, allowed_chars)
self.assertEqual(len(password), 5)
for char in password:
self.assertIn(char, allowed_chars)
def test_make_random_password_warning(self):
msg = "BaseUserManager.make_random_password() is deprecated."
with self.assertWarnsMessage(RemovedInDjango51Warning, msg):
UserManager().make_random_password()
def test_runpython_manager_methods(self):
def forwards(apps, schema_editor):
UserModel = apps.get_model("auth", "User")
user = UserModel.objects.create_user("user1", password="secure")
self.assertIsInstance(user, UserModel)
operation = migrations.RunPython(forwards, migrations.RunPython.noop)
project_state = ProjectState()
project_state.add_model(ModelState.from_model(User))
project_state.add_model(ModelState.from_model(Group))
project_state.add_model(ModelState.from_model(Permission))
project_state.add_model(ModelState.from_model(ContentType))
new_state = project_state.clone()
with connection.schema_editor() as editor:
operation.state_forwards("test_manager_methods", new_state)
operation.database_forwards(
"test_manager_methods",
editor,
project_state,
new_state,
)
user = User.objects.get(username="user1")
self.assertTrue(user.check_password("secure"))
class AbstractBaseUserTests(SimpleTestCase):
def test_has_usable_password(self):
"""
Passwords are usable even if they don't correspond to a hasher in
settings.PASSWORD_HASHERS.
"""
self.assertIs(User(password="some-gibbberish").has_usable_password(), True)
def test_normalize_username(self):
self.assertEqual(IntegerUsernameUser().normalize_username(123), 123)
def test_clean_normalize_username(self):
# The normalization happens in AbstractBaseUser.clean()
ohm_username = "iamtheΩ" # U+2126 OHM SIGN
for model in ("auth.User", "auth_tests.CustomUser"):
with self.subTest(model=model), self.settings(AUTH_USER_MODEL=model):
User = get_user_model()
user = User(**{User.USERNAME_FIELD: ohm_username, "password": "foo"})
user.clean()
username = user.get_username()
self.assertNotEqual(username, ohm_username)
self.assertEqual(
username, "iamtheΩ"
) # U+03A9 GREEK CAPITAL LETTER OMEGA
def test_default_email(self):
self.assertEqual(AbstractBaseUser.get_email_field_name(), "email")
def test_custom_email(self):
user = CustomEmailField()
self.assertEqual(user.get_email_field_name(), "email_address")
class AbstractUserTestCase(TestCase):
def test_email_user(self):
# valid send_mail parameters
kwargs = {
"fail_silently": False,
"auth_user": None,
"auth_password": None,
"connection": None,
"html_message": None,
}
user = User(email="[email protected]")
user.email_user(
subject="Subject here",
message="This is a message",
from_email="[email protected]",
**kwargs,
)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0]
self.assertEqual(message.subject, "Subject here")
self.assertEqual(message.body, "This is a message")
self.assertEqual(message.from_email, "[email protected]")
self.assertEqual(message.to, [user.email])
def test_last_login_default(self):
user1 = User.objects.create(username="user1")
self.assertIsNone(user1.last_login)
user2 = User.objects.create_user(username="user2")
self.assertIsNone(user2.last_login)
def test_user_clean_normalize_email(self):
user = User(username="user", password="foo", email="[email protected]")
user.clean()
self.assertEqual(user.email, "[email protected]")
def test_user_double_save(self):
"""
Calling user.save() twice should trigger password_changed() once.
"""
user = User.objects.create_user(username="user", password="foo")
user.set_password("bar")
with mock.patch(
"django.contrib.auth.password_validation.password_changed"
) as pw_changed:
user.save()
self.assertEqual(pw_changed.call_count, 1)
user.save()
self.assertEqual(pw_changed.call_count, 1)
@override_settings(PASSWORD_HASHERS=PASSWORD_HASHERS)
def test_check_password_upgrade(self):
"""
password_changed() shouldn't be called if User.check_password()
triggers a hash iteration upgrade.
"""
user = User.objects.create_user(username="user", password="foo")
initial_password = user.password
self.assertTrue(user.check_password("foo"))
hasher = get_hasher("default")
self.assertEqual("pbkdf2_sha256", hasher.algorithm)
old_iterations = hasher.iterations
try:
# Upgrade the password iterations
hasher.iterations = old_iterations + 1
with mock.patch(
"django.contrib.auth.password_validation.password_changed"
) as pw_changed:
user.check_password("foo")
self.assertEqual(pw_changed.call_count, 0)
self.assertNotEqual(initial_password, user.password)
finally:
hasher.iterations = old_iterations
class CustomModelBackend(ModelBackend):
def with_perm(
self, perm, is_active=True, include_superusers=True, backend=None, obj=None
):
if obj is not None and obj.username == "charliebrown":
return User.objects.filter(pk=obj.pk)
return User.objects.filter(username__startswith="charlie")
class UserWithPermTestCase(TestCase):
@classmethod
def setUpTestData(cls):
content_type = ContentType.objects.get_for_model(Group)
cls.permission = Permission.objects.create(
name="test",
content_type=content_type,
codename="test",
)
# User with permission.
cls.user1 = User.objects.create_user("user 1", "[email protected]")
cls.user1.user_permissions.add(cls.permission)
# User with group permission.
group1 = Group.objects.create(name="group 1")
group1.permissions.add(cls.permission)
group2 = Group.objects.create(name="group 2")
group2.permissions.add(cls.permission)
cls.user2 = User.objects.create_user("user 2", "[email protected]")
cls.user2.groups.add(group1, group2)
# Users without permissions.
cls.user_charlie = User.objects.create_user("charlie", "[email protected]")
cls.user_charlie_b = User.objects.create_user(
"charliebrown", "[email protected]"
)
# Superuser.
cls.superuser = User.objects.create_superuser(
"superuser",
"[email protected]",
"superpassword",
)
# Inactive user with permission.
cls.inactive_user = User.objects.create_user(
"inactive_user",
"[email protected]",
is_active=False,
)
cls.inactive_user.user_permissions.add(cls.permission)
def test_invalid_permission_name(self):
msg = "Permission name should be in the form app_label.permission_codename."
for perm in ("nodots", "too.many.dots", "...", ""):
with self.subTest(perm), self.assertRaisesMessage(ValueError, msg):
User.objects.with_perm(perm)
def test_invalid_permission_type(self):
msg = "The `perm` argument must be a string or a permission instance."
for perm in (b"auth.test", object(), None):
with self.subTest(perm), self.assertRaisesMessage(TypeError, msg):
User.objects.with_perm(perm)
def test_invalid_backend_type(self):
msg = "backend must be a dotted import path string (got %r)."
for backend in (b"auth_tests.CustomModelBackend", object()):
with self.subTest(backend):
with self.assertRaisesMessage(TypeError, msg % backend):
User.objects.with_perm("auth.test", backend=backend)
def test_basic(self):
active_users = [self.user1, self.user2]
tests = [
({}, [*active_users, self.superuser]),
({"obj": self.user1}, []),
# Only inactive users.
({"is_active": False}, [self.inactive_user]),
# All users.
({"is_active": None}, [*active_users, self.superuser, self.inactive_user]),
# Exclude superusers.
({"include_superusers": False}, active_users),
(
{"include_superusers": False, "is_active": False},
[self.inactive_user],
),
(
{"include_superusers": False, "is_active": None},
[*active_users, self.inactive_user],
),
]
for kwargs, expected_users in tests:
for perm in ("auth.test", self.permission):
with self.subTest(perm=perm, **kwargs):
self.assertCountEqual(
User.objects.with_perm(perm, **kwargs),
expected_users,
)
@override_settings(
AUTHENTICATION_BACKENDS=["django.contrib.auth.backends.BaseBackend"]
)
def test_backend_without_with_perm(self):
self.assertSequenceEqual(User.objects.with_perm("auth.test"), [])
def test_nonexistent_permission(self):
self.assertSequenceEqual(User.objects.with_perm("auth.perm"), [self.superuser])
def test_nonexistent_backend(self):
with self.assertRaises(ImportError):
User.objects.with_perm(
"auth.test",
backend="invalid.backend.CustomModelBackend",
)
@override_settings(
AUTHENTICATION_BACKENDS=["auth_tests.test_models.CustomModelBackend"]
)
def test_custom_backend(self):
for perm in ("auth.test", self.permission):
with self.subTest(perm):
self.assertCountEqual(
User.objects.with_perm(perm),
[self.user_charlie, self.user_charlie_b],
)
@override_settings(
AUTHENTICATION_BACKENDS=["auth_tests.test_models.CustomModelBackend"]
)
def test_custom_backend_pass_obj(self):
for perm in ("auth.test", self.permission):
with self.subTest(perm):
self.assertSequenceEqual(
User.objects.with_perm(perm, obj=self.user_charlie_b),
[self.user_charlie_b],
)
@override_settings(
AUTHENTICATION_BACKENDS=[
"auth_tests.test_models.CustomModelBackend",
"django.contrib.auth.backends.ModelBackend",
]
)
def test_multiple_backends(self):
msg = (
"You have multiple authentication backends configured and "
"therefore must provide the `backend` argument."
)
with self.assertRaisesMessage(ValueError, msg):
User.objects.with_perm("auth.test")
backend = "auth_tests.test_models.CustomModelBackend"
self.assertCountEqual(
User.objects.with_perm("auth.test", backend=backend),
[self.user_charlie, self.user_charlie_b],
)
class IsActiveTestCase(TestCase):
"""
Tests the behavior of the guaranteed is_active attribute
"""
def test_builtin_user_isactive(self):
user = User.objects.create(username="foo", email="[email protected]")
# is_active is true by default
self.assertIs(user.is_active, True)
user.is_active = False
user.save()
user_fetched = User.objects.get(pk=user.pk)
# the is_active flag is saved
self.assertFalse(user_fetched.is_active)
@override_settings(AUTH_USER_MODEL="auth_tests.IsActiveTestUser1")
def test_is_active_field_default(self):
"""
tests that the default value for is_active is provided
"""
UserModel = get_user_model()
user = UserModel(username="foo")
self.assertIs(user.is_active, True)
# you can set the attribute - but it will not save
user.is_active = False
# there should be no problem saving - but the attribute is not saved
user.save()
user_fetched = UserModel._default_manager.get(pk=user.pk)
# the attribute is always true for newly retrieved instance
self.assertIs(user_fetched.is_active, True)
class TestCreateSuperUserSignals(TestCase):
"""
Simple test case for ticket #20541
"""
def post_save_listener(self, *args, **kwargs):
self.signals_count += 1
def setUp(self):
self.signals_count = 0
post_save.connect(self.post_save_listener, sender=User)
def tearDown(self):
post_save.disconnect(self.post_save_listener, sender=User)
def test_create_user(self):
User.objects.create_user("JohnDoe")
self.assertEqual(self.signals_count, 1)
def test_create_superuser(self):
User.objects.create_superuser("JohnDoe", "[email protected]", "1")
self.assertEqual(self.signals_count, 1)
class AnonymousUserTests(SimpleTestCase):
no_repr_msg = "Django doesn't provide a DB representation for AnonymousUser."
def setUp(self):
self.user = AnonymousUser()
def test_properties(self):
self.assertIsNone(self.user.pk)
self.assertEqual(self.user.username, "")
self.assertEqual(self.user.get_username(), "")
self.assertIs(self.user.is_anonymous, True)
self.assertIs(self.user.is_authenticated, False)
self.assertIs(self.user.is_staff, False)
self.assertIs(self.user.is_active, False)
self.assertIs(self.user.is_superuser, False)
self.assertEqual(self.user.groups.count(), 0)
self.assertEqual(self.user.user_permissions.count(), 0)
self.assertEqual(self.user.get_user_permissions(), set())
self.assertEqual(self.user.get_group_permissions(), set())
def test_str(self):
self.assertEqual(str(self.user), "AnonymousUser")
def test_eq(self):
self.assertEqual(self.user, AnonymousUser())
self.assertNotEqual(self.user, User("super", "[email protected]", "super"))
def test_hash(self):
self.assertEqual(hash(self.user), 1)
def test_int(self):
msg = (
"Cannot cast AnonymousUser to int. Are you trying to use it in "
"place of User?"
)
with self.assertRaisesMessage(TypeError, msg):
int(self.user)
def test_delete(self):
with self.assertRaisesMessage(NotImplementedError, self.no_repr_msg):
self.user.delete()
def test_save(self):
with self.assertRaisesMessage(NotImplementedError, self.no_repr_msg):
self.user.save()
def test_set_password(self):
with self.assertRaisesMessage(NotImplementedError, self.no_repr_msg):
self.user.set_password("password")
def test_check_password(self):
with self.assertRaisesMessage(NotImplementedError, self.no_repr_msg):
self.user.check_password("password")
class GroupTests(SimpleTestCase):
def test_str(self):
g = Group(name="Users")
self.assertEqual(str(g), "Users")
class PermissionTests(TestCase):
def test_str(self):
p = Permission.objects.get(codename="view_customemailfield")
self.assertEqual(
str(p), "auth_tests | custom email field | Can view custom email field"
)
|
afd1cbf3e97e73404ed7487e31c5d6b04c7f0d4e1f5b894421d6e58f188f5956 | import os
import shutil
import sys
import tempfile
import threading
import time
import unittest
from datetime import datetime, timedelta
from datetime import timezone as datetime_timezone
from io import StringIO
from pathlib import Path
from urllib.request import urlopen
from django.core.cache import cache
from django.core.exceptions import SuspiciousFileOperation
from django.core.files.base import ContentFile, File
from django.core.files.storage import FileSystemStorage
from django.core.files.storage import Storage as BaseStorage
from django.core.files.storage import default_storage, get_storage_class
from django.core.files.uploadedfile import (
InMemoryUploadedFile,
SimpleUploadedFile,
TemporaryUploadedFile,
)
from django.db.models import FileField
from django.db.models.fields.files import FileDescriptor
from django.test import LiveServerTestCase, SimpleTestCase, TestCase, override_settings
from django.test.utils import requires_tz_support
from django.urls import NoReverseMatch, reverse_lazy
from django.utils import timezone
from django.utils._os import symlinks_supported
from .models import Storage, callable_storage, temp_storage, temp_storage_location
FILE_SUFFIX_REGEX = "[A-Za-z0-9]{7}"
class GetStorageClassTests(SimpleTestCase):
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class("django.core.files.storage.FileSystemStorage"),
FileSystemStorage,
)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
with self.assertRaisesMessage(ImportError, "No module named 'storage'"):
get_storage_class("storage.NonexistentStorage")
def test_get_nonexistent_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
with self.assertRaises(ImportError):
get_storage_class("django.core.files.storage.NonexistentStorage")
def test_get_nonexistent_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
with self.assertRaisesMessage(
ImportError, "No module named 'django.core.files.nonexistent_storage'"
):
get_storage_class(
"django.core.files.nonexistent_storage.NonexistentStorage"
)
class FileSystemStorageTests(unittest.TestCase):
def test_deconstruction(self):
path, args, kwargs = temp_storage.deconstruct()
self.assertEqual(path, "django.core.files.storage.FileSystemStorage")
self.assertEqual(args, ())
self.assertEqual(kwargs, {"location": temp_storage_location})
kwargs_orig = {
"location": temp_storage_location,
"base_url": "http://myfiles.example.com/",
}
storage = FileSystemStorage(**kwargs_orig)
path, args, kwargs = storage.deconstruct()
self.assertEqual(kwargs, kwargs_orig)
def test_lazy_base_url_init(self):
"""
FileSystemStorage.__init__() shouldn't evaluate base_url.
"""
storage = FileSystemStorage(base_url=reverse_lazy("app:url"))
with self.assertRaises(NoReverseMatch):
storage.url(storage.base_url)
class FileStorageTests(SimpleTestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = self.storage_class(
location=self.temp_dir, base_url="/test_media_url/"
)
# Set up a second temporary directory which is ensured to have a mixed
# case name.
self.temp_dir2 = tempfile.mkdtemp(suffix="aBc")
def tearDown(self):
shutil.rmtree(self.temp_dir)
shutil.rmtree(self.temp_dir2)
def test_empty_location(self):
"""
Makes sure an exception is raised if the location is empty
"""
storage = self.storage_class(location="")
self.assertEqual(storage.base_location, "")
self.assertEqual(storage.location, os.getcwd())
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.assertFalse(self.storage.exists("storage_test"))
f = self.storage.open("storage_test", "w")
f.write("storage contents")
f.close()
self.assertTrue(self.storage.exists("storage_test"))
f = self.storage.open("storage_test", "r")
self.assertEqual(f.read(), "storage contents")
f.close()
self.storage.delete("storage_test")
self.assertFalse(self.storage.exists("storage_test"))
def _test_file_time_getter(self, getter):
# Check for correct behavior under both USE_TZ=True and USE_TZ=False.
# The tests are similar since they both set up a situation where the
# system time zone, Django's TIME_ZONE, and UTC are distinct.
self._test_file_time_getter_tz_handling_on(getter)
self._test_file_time_getter_tz_handling_off(getter)
@override_settings(USE_TZ=True, TIME_ZONE="Africa/Algiers")
def _test_file_time_getter_tz_handling_on(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5. The following will be aware in UTC.
now = timezone.now()
self.assertFalse(self.storage.exists("test.file.tz.on"))
f = ContentFile("custom contents")
f_name = self.storage.save("test.file.tz.on", f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be aware, in UTC
self.assertTrue(timezone.is_aware(dt))
self.assertEqual(now.tzname(), dt.tzname())
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = datetime_timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and now should be the same effective time.
self.assertLess(abs(dt - now), timedelta(seconds=2))
@override_settings(USE_TZ=False, TIME_ZONE="Africa/Algiers")
def _test_file_time_getter_tz_handling_off(self, getter):
# Django's TZ (and hence the system TZ) is set to Africa/Algiers which
# is UTC+1 and has no DST change. We can set the Django TZ to something
# else so that UTC, Django's TIME_ZONE, and the system timezone are all
# different.
now_in_algiers = timezone.make_aware(datetime.now())
with timezone.override(timezone.get_fixed_timezone(-300)):
# At this point the system TZ is +1 and the Django TZ
# is -5.
self.assertFalse(self.storage.exists("test.file.tz.off"))
f = ContentFile("custom contents")
f_name = self.storage.save("test.file.tz.off", f)
self.addCleanup(self.storage.delete, f_name)
dt = getter(f_name)
# dt should be naive, in system (+1) TZ
self.assertTrue(timezone.is_naive(dt))
# The three timezones are indeed distinct.
naive_now = datetime.now()
algiers_offset = now_in_algiers.tzinfo.utcoffset(naive_now)
django_offset = timezone.get_current_timezone().utcoffset(naive_now)
utc_offset = datetime_timezone.utc.utcoffset(naive_now)
self.assertGreater(algiers_offset, utc_offset)
self.assertLess(django_offset, utc_offset)
# dt and naive_now should be the same effective time.
self.assertLess(abs(dt - naive_now), timedelta(seconds=2))
# If we convert dt to an aware object using the Algiers
# timezone then it should be the same effective time to
# now_in_algiers.
_dt = timezone.make_aware(dt, now_in_algiers.tzinfo)
self.assertLess(abs(_dt - now_in_algiers), timedelta(seconds=2))
def test_file_get_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.assertFalse(self.storage.exists("test.file"))
f = ContentFile("custom contents")
f_name = self.storage.save("test.file", f)
self.addCleanup(self.storage.delete, f_name)
atime = self.storage.get_accessed_time(f_name)
self.assertEqual(
atime, datetime.fromtimestamp(os.path.getatime(self.storage.path(f_name)))
)
self.assertLess(
timezone.now() - self.storage.get_accessed_time(f_name),
timedelta(seconds=2),
)
@requires_tz_support
def test_file_get_accessed_time_timezone(self):
self._test_file_time_getter(self.storage.get_accessed_time)
def test_file_get_created_time(self):
"""
File storage returns a datetime for the creation time of a file.
"""
self.assertFalse(self.storage.exists("test.file"))
f = ContentFile("custom contents")
f_name = self.storage.save("test.file", f)
self.addCleanup(self.storage.delete, f_name)
ctime = self.storage.get_created_time(f_name)
self.assertEqual(
ctime, datetime.fromtimestamp(os.path.getctime(self.storage.path(f_name)))
)
self.assertLess(
timezone.now() - self.storage.get_created_time(f_name), timedelta(seconds=2)
)
@requires_tz_support
def test_file_get_created_time_timezone(self):
self._test_file_time_getter(self.storage.get_created_time)
def test_file_get_modified_time(self):
"""
File storage returns a datetime for the last modified time of a file.
"""
self.assertFalse(self.storage.exists("test.file"))
f = ContentFile("custom contents")
f_name = self.storage.save("test.file", f)
self.addCleanup(self.storage.delete, f_name)
mtime = self.storage.get_modified_time(f_name)
self.assertEqual(
mtime, datetime.fromtimestamp(os.path.getmtime(self.storage.path(f_name)))
)
self.assertLess(
timezone.now() - self.storage.get_modified_time(f_name),
timedelta(seconds=2),
)
@requires_tz_support
def test_file_get_modified_time_timezone(self):
self._test_file_time_getter(self.storage.get_modified_time)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.assertFalse(self.storage.exists("test.file"))
f = ContentFile("custom contents")
f.name = "test.file"
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_save_with_path(self):
"""
Saving a pathname should create intermediate directories as necessary.
"""
self.assertFalse(self.storage.exists("path/to"))
self.storage.save("path/to/test.file", ContentFile("file saved with path"))
self.assertTrue(self.storage.exists("path/to"))
with self.storage.open("path/to/test.file") as f:
self.assertEqual(f.read(), b"file saved with path")
self.assertTrue(
os.path.exists(os.path.join(self.temp_dir, "path", "to", "test.file"))
)
self.storage.delete("path/to/test.file")
def test_file_save_abs_path(self):
test_name = "path/to/test.file"
f = ContentFile("file saved with path")
f_name = self.storage.save(os.path.join(self.temp_dir, test_name), f)
self.assertEqual(f_name, test_name)
@unittest.skipUnless(
symlinks_supported(), "Must be able to symlink to run this test."
)
def test_file_save_broken_symlink(self):
"""A new path is created on save when a broken symlink is supplied."""
nonexistent_file_path = os.path.join(self.temp_dir, "nonexistent.txt")
broken_symlink_path = os.path.join(self.temp_dir, "symlink.txt")
os.symlink(nonexistent_file_path, broken_symlink_path)
f = ContentFile("some content")
f_name = self.storage.save(broken_symlink_path, f)
self.assertIs(os.path.exists(os.path.join(self.temp_dir, f_name)), True)
def test_save_doesnt_close(self):
with TemporaryUploadedFile("test", "text/plain", 1, "utf8") as file:
file.write(b"1")
file.seek(0)
self.assertFalse(file.closed)
self.storage.save("path/to/test.file", file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
file = InMemoryUploadedFile(StringIO("1"), "", "test", "text/plain", 1, "utf8")
with file:
self.assertFalse(file.closed)
self.storage.save("path/to/test.file", file)
self.assertFalse(file.closed)
self.assertFalse(file.file.closed)
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.assertFalse(self.storage.exists("test.file"))
f = ContentFile("custom contents")
f_name = self.storage.save("test.file", f)
self.assertEqual(self.storage.path(f_name), os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the web.
"""
self.assertEqual(
self.storage.url("test.file"), self.storage.base_url + "test.file"
)
# should encode special chars except ~!*()'
# like encodeURIComponent() JavaScript function do
self.assertEqual(
self.storage.url(r"~!*()'@#$%^&*abc`+ =.file"),
"/test_media_url/~!*()'%40%23%24%25%5E%26*abc%60%2B%20%3D.file",
)
self.assertEqual(self.storage.url("ab\0c"), "/test_media_url/ab%00c")
# should translate os path separator(s) to the url path separator
self.assertEqual(
self.storage.url("""a/b\\c.file"""), "/test_media_url/a/b/c.file"
)
# #25905: remove leading slashes from file names to prevent unsafe url output
self.assertEqual(self.storage.url("/evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url("///evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(r"\\\evil.com"), "/test_media_url/evil.com")
self.assertEqual(self.storage.url(None), "/test_media_url/")
def test_base_url(self):
"""
File storage returns a url even when its base_url is unset or modified.
"""
self.storage.base_url = None
with self.assertRaises(ValueError):
self.storage.url("test.file")
# #22717: missing ending slash in base_url should be auto-corrected
storage = self.storage_class(
location=self.temp_dir, base_url="/no_ending_slash"
)
self.assertEqual(
storage.url("test.file"), "%s%s" % (storage.base_url, "test.file")
)
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.assertFalse(self.storage.exists("storage_test_1"))
self.assertFalse(self.storage.exists("storage_test_2"))
self.assertFalse(self.storage.exists("storage_dir_1"))
self.storage.save("storage_test_1", ContentFile("custom content"))
self.storage.save("storage_test_2", ContentFile("custom content"))
os.mkdir(os.path.join(self.temp_dir, "storage_dir_1"))
self.addCleanup(self.storage.delete, "storage_test_1")
self.addCleanup(self.storage.delete, "storage_test_2")
for directory in ("", Path("")):
with self.subTest(directory=directory):
dirs, files = self.storage.listdir(directory)
self.assertEqual(set(dirs), {"storage_dir_1"})
self.assertEqual(set(files), {"storage_test_1", "storage_test_2"})
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
with self.assertRaises(SuspiciousFileOperation):
self.storage.exists("..")
with self.assertRaises(SuspiciousFileOperation):
self.storage.exists("/etc/passwd")
def test_file_storage_preserves_filename_case(self):
"""The storage backend should preserve case of filenames."""
# Create a storage backend associated with the mixed case name
# directory.
other_temp_storage = self.storage_class(location=self.temp_dir2)
# Ask that storage backend to store a file with a mixed case filename.
mixed_case = "CaSe_SeNsItIvE"
file = other_temp_storage.open(mixed_case, "w")
file.write("storage contents")
file.close()
self.assertEqual(
os.path.join(self.temp_dir2, mixed_case),
other_temp_storage.path(mixed_case),
)
other_temp_storage.delete(mixed_case)
def test_makedirs_race_handling(self):
"""
File storage should be robust against directory creation race conditions.
"""
real_makedirs = os.makedirs
# Monkey-patch os.makedirs, to simulate a normal call, a raced call,
# and an error.
def fake_makedirs(path, mode=0o777, exist_ok=False):
if path == os.path.join(self.temp_dir, "normal"):
real_makedirs(path, mode, exist_ok)
elif path == os.path.join(self.temp_dir, "raced"):
real_makedirs(path, mode, exist_ok)
if not exist_ok:
raise FileExistsError()
elif path == os.path.join(self.temp_dir, "error"):
raise PermissionError()
else:
self.fail("unexpected argument %r" % path)
try:
os.makedirs = fake_makedirs
self.storage.save("normal/test.file", ContentFile("saved normally"))
with self.storage.open("normal/test.file") as f:
self.assertEqual(f.read(), b"saved normally")
self.storage.save("raced/test.file", ContentFile("saved with race"))
with self.storage.open("raced/test.file") as f:
self.assertEqual(f.read(), b"saved with race")
# Exceptions aside from FileExistsError are raised.
with self.assertRaises(PermissionError):
self.storage.save("error/test.file", ContentFile("not saved"))
finally:
os.makedirs = real_makedirs
def test_remove_race_handling(self):
"""
File storage should be robust against file removal race conditions.
"""
real_remove = os.remove
# Monkey-patch os.remove, to simulate a normal call, a raced call,
# and an error.
def fake_remove(path):
if path == os.path.join(self.temp_dir, "normal.file"):
real_remove(path)
elif path == os.path.join(self.temp_dir, "raced.file"):
real_remove(path)
raise FileNotFoundError()
elif path == os.path.join(self.temp_dir, "error.file"):
raise PermissionError()
else:
self.fail("unexpected argument %r" % path)
try:
os.remove = fake_remove
self.storage.save("normal.file", ContentFile("delete normally"))
self.storage.delete("normal.file")
self.assertFalse(self.storage.exists("normal.file"))
self.storage.save("raced.file", ContentFile("delete with race"))
self.storage.delete("raced.file")
self.assertFalse(self.storage.exists("normal.file"))
# Exceptions aside from FileNotFoundError are raised.
self.storage.save("error.file", ContentFile("delete with error"))
with self.assertRaises(PermissionError):
self.storage.delete("error.file")
finally:
os.remove = real_remove
def test_file_chunks_error(self):
"""
Test behavior when file.chunks() is raising an error
"""
f1 = ContentFile("chunks fails")
def failing_chunks():
raise OSError
f1.chunks = failing_chunks
with self.assertRaises(OSError):
self.storage.save("error.file", f1)
def test_delete_no_name(self):
"""
Calling delete with an empty name should not try to remove the base
storage directory, but fail loudly (#20660).
"""
msg = "The name must be given to delete()."
with self.assertRaisesMessage(ValueError, msg):
self.storage.delete(None)
with self.assertRaisesMessage(ValueError, msg):
self.storage.delete("")
def test_delete_deletes_directories(self):
tmp_dir = tempfile.mkdtemp(dir=self.storage.location)
self.storage.delete(tmp_dir)
self.assertFalse(os.path.exists(tmp_dir))
@override_settings(
MEDIA_ROOT="media_root",
MEDIA_URL="media_url/",
FILE_UPLOAD_PERMISSIONS=0o777,
FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777,
)
def test_setting_changed(self):
"""
Properties using settings values as defaults should be updated on
referenced settings change while specified values should be unchanged.
"""
storage = self.storage_class(
location="explicit_location",
base_url="explicit_base_url/",
file_permissions_mode=0o666,
directory_permissions_mode=0o666,
)
defaults_storage = self.storage_class()
settings = {
"MEDIA_ROOT": "overridden_media_root",
"MEDIA_URL": "/overridden_media_url/",
"FILE_UPLOAD_PERMISSIONS": 0o333,
"FILE_UPLOAD_DIRECTORY_PERMISSIONS": 0o333,
}
with self.settings(**settings):
self.assertEqual(storage.base_location, "explicit_location")
self.assertIn("explicit_location", storage.location)
self.assertEqual(storage.base_url, "explicit_base_url/")
self.assertEqual(storage.file_permissions_mode, 0o666)
self.assertEqual(storage.directory_permissions_mode, 0o666)
self.assertEqual(defaults_storage.base_location, settings["MEDIA_ROOT"])
self.assertIn(settings["MEDIA_ROOT"], defaults_storage.location)
self.assertEqual(defaults_storage.base_url, settings["MEDIA_URL"])
self.assertEqual(
defaults_storage.file_permissions_mode,
settings["FILE_UPLOAD_PERMISSIONS"],
)
self.assertEqual(
defaults_storage.directory_permissions_mode,
settings["FILE_UPLOAD_DIRECTORY_PERMISSIONS"],
)
def test_file_methods_pathlib_path(self):
p = Path("test.file")
self.assertFalse(self.storage.exists(p))
f = ContentFile("custom contents")
f_name = self.storage.save(p, f)
# Storage basic methods.
self.assertEqual(self.storage.path(p), os.path.join(self.temp_dir, p))
self.assertEqual(self.storage.size(p), 15)
self.assertEqual(self.storage.url(p), self.storage.base_url + f_name)
with self.storage.open(p) as f:
self.assertEqual(f.read(), b"custom contents")
self.addCleanup(self.storage.delete, p)
class CustomStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
basename, *ext = os.path.splitext(name)
number = 2
while self.exists(name):
name = "".join([basename, ".", str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save("custom_storage", ContentFile("custom contents"))
self.assertEqual(first, "custom_storage")
second = self.storage.save("custom_storage", ContentFile("more contents"))
self.assertEqual(second, "custom_storage.2")
self.storage.delete(first)
self.storage.delete(second)
class OverwritingStorage(FileSystemStorage):
"""
Overwrite existing files instead of appending a suffix to generate an
unused name.
"""
# Mask out O_EXCL so os.open() doesn't raise OSError if the file exists.
OS_OPEN_FLAGS = FileSystemStorage.OS_OPEN_FLAGS & ~os.O_EXCL
def get_available_name(self, name, max_length=None):
"""Override the effort to find an used name."""
return name
class OverwritingStorageTests(FileStorageTests):
storage_class = OverwritingStorage
def test_save_overwrite_behavior(self):
"""Saving to same file name twice overwrites the first file."""
name = "test.file"
self.assertFalse(self.storage.exists(name))
content_1 = b"content one"
content_2 = b"second content"
f_1 = ContentFile(content_1)
f_2 = ContentFile(content_2)
stored_name_1 = self.storage.save(name, f_1)
try:
self.assertEqual(stored_name_1, name)
self.assertTrue(self.storage.exists(name))
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, name)))
with self.storage.open(name) as fp:
self.assertEqual(fp.read(), content_1)
stored_name_2 = self.storage.save(name, f_2)
self.assertEqual(stored_name_2, name)
self.assertTrue(self.storage.exists(name))
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, name)))
with self.storage.open(name) as fp:
self.assertEqual(fp.read(), content_2)
finally:
self.storage.delete(name)
class DiscardingFalseContentStorage(FileSystemStorage):
def _save(self, name, content):
if content:
return super()._save(name, content)
return ""
class DiscardingFalseContentStorageTests(FileStorageTests):
storage_class = DiscardingFalseContentStorage
def test_custom_storage_discarding_empty_content(self):
"""
When Storage.save() wraps a file-like object in File, it should include
the name argument so that bool(file) evaluates to True (#26495).
"""
output = StringIO("content")
self.storage.save("tests/stringio", output)
self.assertTrue(self.storage.exists("tests/stringio"))
with self.storage.open("tests/stringio") as f:
self.assertEqual(f.read(), b"content")
class FileFieldStorageTests(TestCase):
def tearDown(self):
shutil.rmtree(temp_storage_location)
def _storage_max_filename_length(self, storage):
"""
Query filesystem for maximum filename length (e.g. AUFS has 242).
"""
dir_to_test = storage.location
while not os.path.exists(dir_to_test):
dir_to_test = os.path.dirname(dir_to_test)
try:
return os.pathconf(dir_to_test, "PC_NAME_MAX")
except Exception:
return 255 # Should be safe on most backends
def test_files(self):
self.assertIsInstance(Storage.normal, FileDescriptor)
# An object without a file has limited functionality.
obj1 = Storage()
self.assertEqual(obj1.normal.name, "")
with self.assertRaises(ValueError):
obj1.normal.size
# Saving a file enables full functionality.
obj1.normal.save("django_test.txt", ContentFile("content"))
self.assertEqual(obj1.normal.name, "tests/django_test.txt")
self.assertEqual(obj1.normal.size, 7)
self.assertEqual(obj1.normal.read(), b"content")
obj1.normal.close()
# File objects can be assigned to FileField attributes, but shouldn't
# get committed until the model it's attached to is saved.
obj1.normal = SimpleUploadedFile("assignment.txt", b"content")
dirs, files = temp_storage.listdir("tests")
self.assertEqual(dirs, [])
self.assertNotIn("assignment.txt", files)
obj1.save()
dirs, files = temp_storage.listdir("tests")
self.assertEqual(sorted(files), ["assignment.txt", "django_test.txt"])
# Save another file with the same name.
obj2 = Storage()
obj2.normal.save("django_test.txt", ContentFile("more content"))
obj2_name = obj2.normal.name
self.assertRegex(obj2_name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX)
self.assertEqual(obj2.normal.size, 12)
obj2.normal.close()
# Deleting an object does not delete the file it uses.
obj2.delete()
obj2.normal.save("django_test.txt", ContentFile("more content"))
self.assertNotEqual(obj2_name, obj2.normal.name)
self.assertRegex(
obj2.normal.name, "tests/django_test_%s.txt" % FILE_SUFFIX_REGEX
)
obj2.normal.close()
def test_filefield_read(self):
# Files can be read in a little at a time, if necessary.
obj = Storage.objects.create(
normal=SimpleUploadedFile("assignment.txt", b"content")
)
obj.normal.open()
self.assertEqual(obj.normal.read(3), b"con")
self.assertEqual(obj.normal.read(), b"tent")
self.assertEqual(
list(obj.normal.chunks(chunk_size=2)), [b"co", b"nt", b"en", b"t"]
)
obj.normal.close()
def test_filefield_write(self):
# Files can be written to.
obj = Storage.objects.create(
normal=SimpleUploadedFile("rewritten.txt", b"content")
)
with obj.normal as normal:
normal.open("wb")
normal.write(b"updated")
obj.refresh_from_db()
self.assertEqual(obj.normal.read(), b"updated")
obj.normal.close()
def test_filefield_reopen(self):
obj = Storage.objects.create(
normal=SimpleUploadedFile("reopen.txt", b"content")
)
with obj.normal as normal:
normal.open()
obj.normal.open()
obj.normal.file.seek(0)
obj.normal.close()
def test_duplicate_filename(self):
# Multiple files with the same name get _(7 random chars) appended to them.
objs = [Storage() for i in range(2)]
for o in objs:
o.normal.save("multiple_files.txt", ContentFile("Same Content"))
try:
names = [o.normal.name for o in objs]
self.assertEqual(names[0], "tests/multiple_files.txt")
self.assertRegex(
names[1], "tests/multiple_files_%s.txt" % FILE_SUFFIX_REGEX
)
finally:
for o in objs:
o.delete()
def test_file_truncation(self):
# Given the max_length is limited, when multiple files get uploaded
# under the same name, then the filename get truncated in order to fit
# in _(7 random chars). When most of the max_length is taken by
# dirname + extension and there are not enough characters in the
# filename to truncate, an exception should be raised.
objs = [Storage() for i in range(2)]
filename = "filename.ext"
for o in objs:
o.limited_length.save(filename, ContentFile("Same Content"))
try:
# Testing truncation.
names = [o.limited_length.name for o in objs]
self.assertEqual(names[0], "tests/%s" % filename)
self.assertRegex(names[1], "tests/fi_%s.ext" % FILE_SUFFIX_REGEX)
# Testing exception is raised when filename is too short to truncate.
filename = "short.longext"
objs[0].limited_length.save(filename, ContentFile("Same Content"))
with self.assertRaisesMessage(
SuspiciousFileOperation, "Storage can not find an available filename"
):
objs[1].limited_length.save(*(filename, ContentFile("Same Content")))
finally:
for o in objs:
o.delete()
@unittest.skipIf(
sys.platform == "win32",
"Windows supports at most 260 characters in a path.",
)
def test_extended_length_storage(self):
# Testing FileField with max_length > 255. Most systems have filename
# length limitation of 255. Path takes extra chars.
filename = (
self._storage_max_filename_length(temp_storage) - 4
) * "a" # 4 chars for extension.
obj = Storage()
obj.extended_length.save("%s.txt" % filename, ContentFile("Same Content"))
self.assertEqual(obj.extended_length.name, "tests/%s.txt" % filename)
self.assertEqual(obj.extended_length.read(), b"Same Content")
obj.extended_length.close()
def test_filefield_default(self):
# Default values allow an object to access a single file.
temp_storage.save("tests/default.txt", ContentFile("default content"))
obj = Storage.objects.create()
self.assertEqual(obj.default.name, "tests/default.txt")
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
# But it shouldn't be deleted, even if there are no more objects using
# it.
obj.delete()
obj = Storage()
self.assertEqual(obj.default.read(), b"default content")
obj.default.close()
def test_empty_upload_to(self):
# upload_to can be empty, meaning it does not use subdirectory.
obj = Storage()
obj.empty.save("django_test.txt", ContentFile("more content"))
self.assertEqual(obj.empty.name, "django_test.txt")
self.assertEqual(obj.empty.read(), b"more content")
obj.empty.close()
def test_pathlib_upload_to(self):
obj = Storage()
obj.pathlib_callable.save("some_file1.txt", ContentFile("some content"))
self.assertEqual(obj.pathlib_callable.name, "bar/some_file1.txt")
obj.pathlib_direct.save("some_file2.txt", ContentFile("some content"))
self.assertEqual(obj.pathlib_direct.name, "bar/some_file2.txt")
obj.random.close()
def test_random_upload_to(self):
# Verify the fix for #5655, making sure the directory is only
# determined once.
obj = Storage()
obj.random.save("random_file", ContentFile("random content"))
self.assertTrue(obj.random.name.endswith("/random_file"))
obj.random.close()
def test_custom_valid_name_callable_upload_to(self):
"""
Storage.get_valid_name() should be called when upload_to is a callable.
"""
obj = Storage()
obj.custom_valid_name.save("random_file", ContentFile("random content"))
# CustomValidNameStorage.get_valid_name() appends '_valid' to the name
self.assertTrue(obj.custom_valid_name.name.endswith("/random_file_valid"))
obj.custom_valid_name.close()
def test_filefield_pickling(self):
# Push an object into the cache to make sure it pickles properly
obj = Storage()
obj.normal.save("django_test.txt", ContentFile("more content"))
obj.normal.close()
cache.set("obj", obj)
self.assertEqual(cache.get("obj").normal.name, "tests/django_test.txt")
def test_file_object(self):
# Create sample file
temp_storage.save("tests/example.txt", ContentFile("some content"))
# Load it as Python file object
with open(temp_storage.path("tests/example.txt")) as file_obj:
# Save it using storage and read its content
temp_storage.save("tests/file_obj", file_obj)
self.assertTrue(temp_storage.exists("tests/file_obj"))
with temp_storage.open("tests/file_obj") as f:
self.assertEqual(f.read(), b"some content")
def test_stringio(self):
# Test passing StringIO instance as content argument to save
output = StringIO()
output.write("content")
output.seek(0)
# Save it and read written file
temp_storage.save("tests/stringio", output)
self.assertTrue(temp_storage.exists("tests/stringio"))
with temp_storage.open("tests/stringio") as f:
self.assertEqual(f.read(), b"content")
class FieldCallableFileStorageTests(SimpleTestCase):
def setUp(self):
self.temp_storage_location = tempfile.mkdtemp(
suffix="filefield_callable_storage"
)
def tearDown(self):
shutil.rmtree(self.temp_storage_location)
def test_callable_base_class_error_raises(self):
class NotStorage:
pass
msg = (
"FileField.storage must be a subclass/instance of "
"django.core.files.storage.base.Storage"
)
for invalid_type in (NotStorage, str, list, set, tuple):
with self.subTest(invalid_type=invalid_type):
with self.assertRaisesMessage(TypeError, msg):
FileField(storage=invalid_type)
def test_file_field_storage_none_uses_default_storage(self):
self.assertEqual(FileField().storage, default_storage)
def test_callable_function_storage_file_field(self):
storage = FileSystemStorage(location=self.temp_storage_location)
def get_storage():
return storage
obj = FileField(storage=get_storage)
self.assertEqual(obj.storage, storage)
self.assertEqual(obj.storage.location, storage.location)
def test_callable_class_storage_file_field(self):
class GetStorage(FileSystemStorage):
pass
obj = FileField(storage=GetStorage)
self.assertIsInstance(obj.storage, BaseStorage)
def test_callable_storage_file_field_in_model(self):
obj = Storage()
self.assertEqual(obj.storage_callable.storage, temp_storage)
self.assertEqual(obj.storage_callable.storage.location, temp_storage_location)
self.assertIsInstance(obj.storage_callable_class.storage, BaseStorage)
def test_deconstruction(self):
"""
Deconstructing gives the original callable, not the evaluated value.
"""
obj = Storage()
*_, kwargs = obj._meta.get_field("storage_callable").deconstruct()
storage = kwargs["storage"]
self.assertIs(storage, callable_storage)
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super().chunks()
class FileSaveRaceConditionTest(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=["conflict"])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile(b"Data"))
def test_race_condition(self):
self.thread.start()
self.save_file("conflict")
self.thread.join()
files = sorted(os.listdir(self.storage_dir))
self.assertEqual(files[0], "conflict")
self.assertRegex(files[1], "conflict_%s" % FILE_SUFFIX_REGEX)
@unittest.skipIf(
sys.platform == "win32", "Windows only partially supports umasks and chmod."
)
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.umask = 0o027
self.old_umask = os.umask(self.umask)
self.storage_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.storage_dir)
os.umask(self.old_umask)
@override_settings(FILE_UPLOAD_PERMISSIONS=0o654)
def test_file_upload_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0o777
self.assertEqual(actual_mode, 0o654)
@override_settings(FILE_UPLOAD_PERMISSIONS=None)
def test_file_upload_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
fname = self.storage.save("some_file", ContentFile("data"))
mode = os.stat(self.storage.path(fname))[0] & 0o777
self.assertEqual(mode, 0o666 & ~self.umask)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765)
def test_file_upload_directory_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/subdir/the_file", ContentFile("data"))
file_path = Path(self.storage.path(name))
self.assertEqual(file_path.parent.stat().st_mode & 0o777, 0o765)
self.assertEqual(file_path.parent.parent.stat().st_mode & 0o777, 0o765)
@override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None)
def test_file_upload_directory_default_permissions(self):
self.storage = FileSystemStorage(self.storage_dir)
name = self.storage.save("the_directory/subdir/the_file", ContentFile("data"))
file_path = Path(self.storage.path(name))
expected_mode = 0o777 & ~self.umask
self.assertEqual(file_path.parent.stat().st_mode & 0o777, expected_mode)
self.assertEqual(file_path.parent.parent.stat().st_mode & 0o777, expected_mode)
class FileStoragePathParsing(SimpleTestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save("dotted.path/test", ContentFile("1"))
self.storage.save("dotted.path/test", ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, "dotted.path")))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, "dotted_.path")))
self.assertEqual(files[0], "test")
self.assertRegex(files[1], "test_%s" % FILE_SUFFIX_REGEX)
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save("dotted.path/.test", ContentFile("1"))
self.storage.save("dotted.path/.test", ContentFile("2"))
files = sorted(os.listdir(os.path.join(self.storage_dir, "dotted.path")))
self.assertFalse(os.path.exists(os.path.join(self.storage_dir, "dotted_.path")))
self.assertEqual(files[0], ".test")
self.assertRegex(files[1], ".test_%s" % FILE_SUFFIX_REGEX)
class ContentFileStorageTestCase(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_content_saving(self):
"""
ContentFile can be saved correctly with the filesystem storage,
if it was initialized with either bytes or unicode content.
"""
self.storage.save("bytes.txt", ContentFile(b"content"))
self.storage.save("unicode.txt", ContentFile("español"))
@override_settings(ROOT_URLCONF="file_storage.urls")
class FileLikeObjectTestCase(LiveServerTestCase):
"""
Test file-like objects (#15644).
"""
available_apps = []
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(location=self.temp_dir)
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_urllib_request_urlopen(self):
"""
Test the File storage API with a file-like object coming from
urllib.request.urlopen().
"""
file_like_object = urlopen(self.live_server_url + "/")
f = File(file_like_object)
stored_filename = self.storage.save("remote_file.html", f)
remote_file = urlopen(self.live_server_url + "/")
with self.storage.open(stored_filename) as stored_file:
self.assertEqual(stored_file.read(), remote_file.read())
|
bc13aae130a63dd3abb01d90e9b9d7dd11dead788cdf50300e6a609225f05d19 | import datetime
import math
import re
from decimal import Decimal
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
Avg,
Case,
Count,
DateField,
DateTimeField,
DecimalField,
DurationField,
Exists,
F,
FloatField,
IntegerField,
Max,
Min,
OuterRef,
Q,
StdDev,
Subquery,
Sum,
TimeField,
Value,
Variance,
When,
)
from django.db.models.expressions import Func, RawSQL
from django.db.models.functions import (
Cast,
Coalesce,
Greatest,
Lower,
Now,
Pi,
TruncDate,
TruncHour,
)
from django.test import TestCase
from django.test.testcases import skipUnlessDBFeature
from django.test.utils import Approximate, CaptureQueriesContext
from django.utils import timezone
from .models import Author, Book, Publisher, Store
class NowUTC(Now):
template = "CURRENT_TIMESTAMP"
output_field = DateTimeField()
def as_sql(self, compiler, connection, **extra_context):
if connection.features.test_now_utc_template:
extra_context["template"] = connection.features.test_now_utc_template
return super().as_sql(compiler, connection, **extra_context)
class AggregateTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name="Adrian Holovaty", age=34)
cls.a2 = Author.objects.create(name="Jacob Kaplan-Moss", age=35)
cls.a3 = Author.objects.create(name="Brad Dayley", age=45)
cls.a4 = Author.objects.create(name="James Bennett", age=29)
cls.a5 = Author.objects.create(name="Jeffrey Forcier", age=37)
cls.a6 = Author.objects.create(name="Paul Bissex", age=29)
cls.a7 = Author.objects.create(name="Wesley J. Chun", age=25)
cls.a8 = Author.objects.create(name="Peter Norvig", age=57)
cls.a9 = Author.objects.create(name="Stuart Russell", age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(
name="Apress", num_awards=3, duration=datetime.timedelta(days=1)
)
cls.p2 = Publisher.objects.create(
name="Sams", num_awards=1, duration=datetime.timedelta(days=2)
)
cls.p3 = Publisher.objects.create(name="Prentice Hall", num_awards=7)
cls.p4 = Publisher.objects.create(name="Morgan Kaufmann", num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn="159059725",
name="The Definitive Guide to Django: Web Development Done Right",
pages=447,
rating=4.5,
price=Decimal("30.00"),
contact=cls.a1,
publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6),
)
cls.b2 = Book.objects.create(
isbn="067232959",
name="Sams Teach Yourself Django in 24 Hours",
pages=528,
rating=3.0,
price=Decimal("23.09"),
contact=cls.a3,
publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3),
)
cls.b3 = Book.objects.create(
isbn="159059996",
name="Practical Django Projects",
pages=300,
rating=4.0,
price=Decimal("29.69"),
contact=cls.a4,
publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23),
)
cls.b4 = Book.objects.create(
isbn="013235613",
name="Python Web Development with Django",
pages=350,
rating=4.0,
price=Decimal("29.69"),
contact=cls.a5,
publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3),
)
cls.b5 = Book.objects.create(
isbn="013790395",
name="Artificial Intelligence: A Modern Approach",
pages=1132,
rating=4.0,
price=Decimal("82.80"),
contact=cls.a8,
publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15),
)
cls.b6 = Book.objects.create(
isbn="155860191",
name=(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp"
),
pages=946,
rating=5.0,
price=Decimal("75.00"),
contact=cls.a8,
publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15),
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name="Amazon.com",
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59),
)
s2 = Store.objects.create(
name="Books.com",
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59),
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30),
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_empty_aggregate(self):
self.assertEqual(Author.objects.aggregate(), {})
def test_aggregate_in_order_by(self):
msg = (
"Using an aggregate in order_by() without also including it in "
"annotate() is not allowed: Avg(F(book__rating)"
)
with self.assertRaisesMessage(FieldError, msg):
Author.objects.values("age").order_by(Avg("book__rating"))
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(
vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)}
)
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(vals, {"age__sum": 254})
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(vals, {"friends__age__avg": Approximate(34.07, places=2)})
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(vals, {"authors__age__avg": Approximate(38.2857, places=2)})
vals = Author.objects.filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(vals, {"book__rating__avg": 4.0})
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(vals, {"publisher__num_awards__sum": 30})
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(vals, {"book__price__sum": Decimal("270.27")})
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(vals, {"books__authors__age__max": 57})
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(vals, {"book__publisher__num_awards__min": 1})
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(
amazon_mean=Avg("books__rating")
)
self.assertEqual(vals, {"amazon_mean": Approximate(4.08, places=2)})
def test_aggregate_transform(self):
vals = Store.objects.aggregate(min_month=Min("original_opening__month"))
self.assertEqual(vals, {"min_month": 3})
def test_aggregate_join_transform(self):
vals = Publisher.objects.aggregate(min_year=Min("book__pubdate__year"))
self.assertEqual(vals, {"min_year": 1991})
def test_annotate_basic(self):
self.assertQuerySetEqual(
Book.objects.annotate().order_by("pk"),
[
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
],
lambda b: b.name,
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=self.b1.pk)
self.assertEqual(
b.name, "The Definitive Guide to Django: Web Development Done Right"
)
self.assertEqual(b.mean_age, 34.5)
def test_annotate_defer(self):
qs = (
Book.objects.annotate(page_sum=Sum("pages"))
.defer("name")
.filter(pk=self.b1.pk)
)
rows = [
(
self.b1.id,
"159059725",
447,
"The Definitive Guide to Django: Web Development Done Right",
)
]
self.assertQuerySetEqual(
qs.order_by("pk"), rows, lambda r: (r.id, r.isbn, r.page_sum, r.name)
)
def test_annotate_defer_select_related(self):
qs = (
Book.objects.select_related("contact")
.annotate(page_sum=Sum("pages"))
.defer("name")
.filter(pk=self.b1.pk)
)
rows = [
(
self.b1.id,
"159059725",
447,
"Adrian Holovaty",
"The Definitive Guide to Django: Web Development Done Right",
)
]
self.assertQuerySetEqual(
qs.order_by("pk"),
rows,
lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name),
)
def test_annotate_m2m(self):
books = (
Book.objects.filter(rating__lt=4.5)
.annotate(Avg("authors__age"))
.order_by("name")
)
self.assertQuerySetEqual(
books,
[
("Artificial Intelligence: A Modern Approach", 51.5),
("Practical Django Projects", 29.0),
("Python Web Development with Django", Approximate(30.3, places=1)),
("Sams Teach Yourself Django in 24 Hours", 45.0),
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerySetEqual(
books,
[
("Artificial Intelligence: A Modern Approach", 2),
(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
1,
),
("Practical Django Projects", 1),
("Python Web Development with Django", 3),
("Sams Teach Yourself Django in 24 Hours", 1),
("The Definitive Guide to Django: Web Development Done Right", 2),
],
lambda b: (b.name, b.num_authors),
)
def test_backwards_m2m_annotate(self):
authors = (
Author.objects.filter(name__contains="a")
.annotate(Avg("book__rating"))
.order_by("name")
)
self.assertQuerySetEqual(
authors,
[
("Adrian Holovaty", 4.5),
("Brad Dayley", 3.0),
("Jacob Kaplan-Moss", 4.5),
("James Bennett", 4.0),
("Paul Bissex", 4.0),
("Stuart Russell", 4.0),
],
lambda a: (a.name, a.book__rating__avg),
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerySetEqual(
authors,
[
("Adrian Holovaty", 1),
("Brad Dayley", 1),
("Jacob Kaplan-Moss", 1),
("James Bennett", 1),
("Jeffrey Forcier", 1),
("Paul Bissex", 1),
("Peter Norvig", 2),
("Stuart Russell", 1),
("Wesley J. Chun", 1),
],
lambda a: (a.name, a.num_books),
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerySetEqual(
books,
[
("Artificial Intelligence: A Modern Approach", 7),
(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
9,
),
("Practical Django Projects", 3),
("Python Web Development with Django", 7),
("Sams Teach Yourself Django in 24 Hours", 1),
("The Definitive Guide to Django: Web Development Done Right", 3),
],
lambda b: (b.name, b.publisher__num_awards__sum),
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerySetEqual(
publishers,
[
("Apress", Decimal("59.69")),
("Jonno's House of Books", None),
("Morgan Kaufmann", Decimal("75.00")),
("Prentice Hall", Decimal("112.49")),
("Sams", Decimal("23.09")),
],
lambda p: (p.name, p.book__price__sum),
)
def test_annotate_values(self):
books = list(
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values()
)
self.assertEqual(
books,
[
{
"contact_id": self.a1.id,
"id": self.b1.id,
"isbn": "159059725",
"mean_age": 34.5,
"name": (
"The Definitive Guide to Django: Web Development Done Right"
),
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": self.p1.id,
"rating": 4.5,
}
],
)
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values("pk", "isbn", "mean_age")
)
self.assertEqual(
list(books),
[
{
"pk": self.b1.pk,
"isbn": "159059725",
"mean_age": 34.5,
}
],
)
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values("name")
)
self.assertEqual(
list(books),
[{"name": "The Definitive Guide to Django: Web Development Done Right"}],
)
books = (
Book.objects.filter(pk=self.b1.pk)
.values()
.annotate(mean_age=Avg("authors__age"))
)
self.assertEqual(
list(books),
[
{
"contact_id": self.a1.id,
"id": self.b1.id,
"isbn": "159059725",
"mean_age": 34.5,
"name": (
"The Definitive Guide to Django: Web Development Done Right"
),
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": self.p1.id,
"rating": 4.5,
}
],
)
books = (
Book.objects.values("rating")
.annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age"))
.order_by("rating")
)
self.assertEqual(
list(books),
[
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1),
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
},
],
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertQuerySetEqual(
authors,
[
("Adrian Holovaty", 32.0),
("Brad Dayley", None),
("Jacob Kaplan-Moss", 29.5),
("James Bennett", 34.0),
("Jeffrey Forcier", 27.0),
("Paul Bissex", 31.0),
("Peter Norvig", 46.0),
("Stuart Russell", 57.0),
("Wesley J. Chun", Approximate(33.66, places=1)),
],
lambda a: (a.name, a.friends__age__avg),
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
def test_count_star(self):
with self.assertNumQueries(1) as ctx:
Book.objects.aggregate(n=Count("*"))
sql = ctx.captured_queries[0]["sql"]
self.assertIn("SELECT COUNT(*) ", sql)
def test_count_distinct_expression(self):
aggs = Book.objects.aggregate(
distinct_ratings=Count(
Case(When(pages__gt=300, then="rating")), distinct=True
),
)
self.assertEqual(aggs["distinct_ratings"], 4)
def test_distinct_on_aggregate(self):
for aggregate, expected_result in (
(Avg, 4.125),
(Count, 4),
(Sum, 16.5),
):
with self.subTest(aggregate=aggregate.__name__):
books = Book.objects.aggregate(
ratings=aggregate("rating", distinct=True)
)
self.assertEqual(books["ratings"], expected_result)
def test_non_grouped_annotation_not_in_group_by(self):
"""
An annotation not included in values() before an aggregate should be
excluded from the group by clause.
"""
qs = (
Book.objects.annotate(xprice=F("price"))
.filter(rating=4.0)
.values("rating")
.annotate(count=Count("publisher_id", distinct=True))
.values("count", "rating")
.order_by("count")
)
self.assertEqual(list(qs), [{"rating": 4.0, "count": 2}])
def test_grouped_annotation_in_group_by(self):
"""
An annotation included in values() before an aggregate should be
included in the group by clause.
"""
qs = (
Book.objects.annotate(xprice=F("price"))
.filter(rating=4.0)
.values("rating", "xprice")
.annotate(count=Count("publisher_id", distinct=True))
.values("count", "rating")
.order_by("count")
)
self.assertEqual(
list(qs),
[
{"rating": 4.0, "count": 1},
{"rating": 4.0, "count": 2},
],
)
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count("book__id")))
implicit = list(Author.objects.annotate(Count("book")))
self.assertCountEqual(explicit, implicit)
def test_annotate_ordering(self):
books = (
Book.objects.values("rating")
.annotate(oldest=Max("authors__age"))
.order_by("oldest", "rating")
)
self.assertEqual(
list(books),
[
{"rating": 4.5, "oldest": 35},
{"rating": 3.0, "oldest": 45},
{"rating": 4.0, "oldest": 57},
{"rating": 5.0, "oldest": 57},
],
)
books = (
Book.objects.values("rating")
.annotate(oldest=Max("authors__age"))
.order_by("-oldest", "-rating")
)
self.assertEqual(
list(books),
[
{"rating": 5.0, "oldest": 57},
{"rating": 4.0, "oldest": 57},
{"rating": 3.0, "oldest": 45},
{"rating": 4.5, "oldest": 35},
],
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(
Avg("num_authors")
)
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_avg_duration_field(self):
# Explicit `output_field`.
self.assertEqual(
Publisher.objects.aggregate(Avg("duration", output_field=DurationField())),
{"duration__avg": datetime.timedelta(days=1, hours=12)},
)
# Implicit `output_field`.
self.assertEqual(
Publisher.objects.aggregate(Avg("duration")),
{"duration__avg": datetime.timedelta(days=1, hours=12)},
)
def test_sum_duration_field(self):
self.assertEqual(
Publisher.objects.aggregate(Sum("duration", output_field=DurationField())),
{"duration__sum": datetime.timedelta(days=3)},
)
def test_sum_distinct_aggregate(self):
"""
Sum on a distinct() QuerySet should aggregate only the distinct items.
"""
authors = Author.objects.filter(book__in=[self.b5, self.b6])
self.assertEqual(authors.count(), 3)
distinct_authors = authors.distinct()
self.assertEqual(distinct_authors.count(), 2)
# Selected author ages are 57 and 46
age_sum = distinct_authors.aggregate(Sum("age"))
self.assertEqual(age_sum["age__sum"], 103)
def test_filtering(self):
p = Publisher.objects.create(name="Expensive Publisher", num_awards=0)
Book.objects.create(
name="ExpensiveBook1",
pages=1,
isbn="111",
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 1),
)
Book.objects.create(
name="ExpensiveBook2",
pages=1,
isbn="222",
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 2),
)
Book.objects.create(
name="ExpensiveBook3",
pages=1,
isbn="333",
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 3),
)
publishers = (
Publisher.objects.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerySetEqual(
publishers,
["Apress", "Prentice Hall", "Expensive Publisher"],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by(
"pk"
)
self.assertQuerySetEqual(
publishers,
[
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = (
Publisher.objects.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1, book__price__lt=Decimal("40.0"))
.order_by("pk")
)
self.assertQuerySetEqual(
publishers,
["Apress", "Prentice Hall", "Expensive Publisher"],
lambda p: p.name,
)
publishers = (
Publisher.objects.filter(book__price__lt=Decimal("40.0"))
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerySetEqual(publishers, ["Apress"], lambda p: p.name)
publishers = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_books__range=[1, 3])
.order_by("pk")
)
self.assertQuerySetEqual(
publishers,
[
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_books__range=[1, 2])
.order_by("pk")
)
self.assertQuerySetEqual(
publishers,
["Apress", "Sams", "Prentice Hall", "Morgan Kaufmann"],
lambda p: p.name,
)
publishers = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_books__in=[1, 3])
.order_by("pk")
)
self.assertQuerySetEqual(
publishers,
["Sams", "Morgan Kaufmann", "Expensive Publisher"],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(
num_books__isnull=True
)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=self.a1.pk).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = (
Book.objects.annotate(num_authors=Count("authors__name"))
.filter(num_authors__exact=2)
.order_by("pk")
)
self.assertQuerySetEqual(
books,
[
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name,
)
authors = (
Author.objects.annotate(num_friends=Count("friends__id", distinct=True))
.filter(num_friends=0)
.order_by("pk")
)
self.assertQuerySetEqual(authors, ["Brad Dayley"], lambda a: a.name)
publishers = (
Publisher.objects.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerySetEqual(
publishers, ["Apress", "Prentice Hall"], lambda p: p.name
)
publishers = (
Publisher.objects.filter(book__price__lt=Decimal("40.0"))
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
)
self.assertQuerySetEqual(publishers, ["Apress"], lambda p: p.name)
books = Book.objects.annotate(num_authors=Count("authors__id")).filter(
authors__name__contains="Norvig", num_authors__gt=1
)
self.assertQuerySetEqual(
books, ["Artificial Intelligence: A Modern Approach"], lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains="Norvig")
b = Book.objects.get(name__contains="Done Right")
b.authors.add(a)
b.save()
vals = (
Book.objects.annotate(num_authors=Count("authors__id"))
.filter(authors__name__contains="Norvig", num_authors__gt=1)
.aggregate(Avg("rating"))
)
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = (
Publisher.objects.annotate(
earliest_book=Min("book__pubdate"),
)
.exclude(earliest_book=None)
.order_by("earliest_book")
.values(
"earliest_book",
"num_awards",
"id",
"name",
)
)
self.assertEqual(
list(publishers),
[
{
"earliest_book": datetime.date(1991, 10, 15),
"num_awards": 9,
"id": self.p4.id,
"name": "Morgan Kaufmann",
},
{
"earliest_book": datetime.date(1995, 1, 15),
"num_awards": 7,
"id": self.p3.id,
"name": "Prentice Hall",
},
{
"earliest_book": datetime.date(2007, 12, 6),
"num_awards": 3,
"id": self.p1.id,
"name": "Apress",
},
{
"earliest_book": datetime.date(2008, 3, 3),
"num_awards": 1,
"id": self.p2.id,
"name": "Sams",
},
],
)
vals = Store.objects.aggregate(
Max("friday_night_closing"), Min("original_opening")
)
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
)
def test_annotate_values_list(self):
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("pk", "isbn", "mean_age")
)
self.assertEqual(list(books), [(self.b1.id, "159059725", 34.5)])
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("isbn")
)
self.assertEqual(list(books), [("159059725",)])
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("mean_age")
)
self.assertEqual(list(books), [(34.5,)])
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("mean_age", flat=True)
)
self.assertEqual(list(books), [34.5])
books = (
Book.objects.values_list("price")
.annotate(count=Count("price"))
.order_by("-count", "price")
)
self.assertEqual(
list(books),
[
(Decimal("29.69"), 2),
(Decimal("23.09"), 1),
(Decimal("30"), 1),
(Decimal("75"), 1),
(Decimal("82.8"), 1),
],
)
def test_dates_with_aggregation(self):
"""
.dates() returns a distinct set of dates when applied to a
QuerySet with aggregation.
Refs #18056. Previously, .dates() would return distinct (date_kind,
aggregation) sets, in this case (year, num_authors), so 2008 would be
returned twice because there are books from 2008 with a different
number of authors.
"""
dates = Book.objects.annotate(num_authors=Count("authors")).dates(
"pubdate", "year"
)
self.assertSequenceEqual(
dates,
[
datetime.date(1991, 1, 1),
datetime.date(1995, 1, 1),
datetime.date(2007, 1, 1),
datetime.date(2008, 1, 1),
],
)
def test_values_aggregation(self):
# Refs #20782
max_rating = Book.objects.values("rating").aggregate(max_rating=Max("rating"))
self.assertEqual(max_rating["max_rating"], 5)
max_books_per_rating = (
Book.objects.values("rating")
.annotate(books_per_rating=Count("id"))
.aggregate(Max("books_per_rating"))
)
self.assertEqual(max_books_per_rating, {"books_per_rating__max": 3})
def test_ticket17424(self):
"""
Doing exclude() on a foreign model after annotate() doesn't crash.
"""
all_books = list(Book.objects.values_list("pk", flat=True).order_by("pk"))
annotated_books = Book.objects.order_by("pk").annotate(one=Count("id"))
# The value doesn't matter, we just need any negative
# constraint on a related model that's a noop.
excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__")
# Try to generate query tree
str(excluded_books.query)
self.assertQuerySetEqual(excluded_books, all_books, lambda x: x.pk)
# Check internal state
self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type)
self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type)
def test_ticket12886(self):
"""
Aggregation over sliced queryset works correctly.
"""
qs = Book.objects.order_by("-rating")[0:3]
vals = qs.aggregate(average_top3_rating=Avg("rating"))["average_top3_rating"]
self.assertAlmostEqual(vals, 4.5, places=2)
def test_ticket11881(self):
"""
Subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE or
select_related() stuff.
"""
qs = (
Book.objects.select_for_update()
.order_by("pk")
.select_related("publisher")
.annotate(max_pk=Max("pk"))
)
with CaptureQueriesContext(connection) as captured_queries:
qs.aggregate(avg_pk=Avg("max_pk"))
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]["sql"].lower()
self.assertNotIn("for update", qstr)
forced_ordering = connection.ops.force_no_ordering()
if forced_ordering:
# If the backend needs to force an ordering we make sure it's
# the only "ORDER BY" clause present in the query.
self.assertEqual(
re.findall(r"order by (\w+)", qstr),
[", ".join(f[1][0] for f in forced_ordering).lower()],
)
else:
self.assertNotIn("order by", qstr)
self.assertEqual(qstr.count(" join "), 0)
def test_decimal_max_digits_has_no_effect(self):
Book.objects.all().delete()
a1 = Author.objects.first()
p1 = Publisher.objects.first()
thedate = timezone.now()
for i in range(10):
Book.objects.create(
isbn="abcde{}".format(i),
name="none",
pages=10,
rating=4.0,
price=9999.98,
contact=a1,
publisher=p1,
pubdate=thedate,
)
book = Book.objects.aggregate(price_sum=Sum("price"))
self.assertEqual(book["price_sum"], Decimal("99999.80"))
def test_nonaggregate_aggregation_throws(self):
with self.assertRaisesMessage(TypeError, "fail is not an aggregate expression"):
Book.objects.aggregate(fail=F("price"))
def test_nonfield_annotation(self):
book = Book.objects.annotate(val=Max(Value(2))).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(
val=Max(Value(2), output_field=IntegerField())
).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(val=Max(2, output_field=IntegerField())).first()
self.assertEqual(book.val, 2)
def test_annotation_expressions(self):
authors = Author.objects.annotate(
combined_ages=Sum(F("age") + F("friends__age"))
).order_by("name")
authors2 = Author.objects.annotate(
combined_ages=Sum("age") + Sum("friends__age")
).order_by("name")
for qs in (authors, authors2):
self.assertQuerySetEqual(
qs,
[
("Adrian Holovaty", 132),
("Brad Dayley", None),
("Jacob Kaplan-Moss", 129),
("James Bennett", 63),
("Jeffrey Forcier", 128),
("Paul Bissex", 120),
("Peter Norvig", 103),
("Stuart Russell", 103),
("Wesley J. Chun", 176),
],
lambda a: (a.name, a.combined_ages),
)
def test_aggregation_expressions(self):
a1 = Author.objects.aggregate(av_age=Sum("age") / Count("*"))
a2 = Author.objects.aggregate(av_age=Sum("age") / Count("age"))
a3 = Author.objects.aggregate(av_age=Avg("age"))
self.assertEqual(a1, {"av_age": 37})
self.assertEqual(a2, {"av_age": 37})
self.assertEqual(a3, {"av_age": Approximate(37.4, places=1)})
def test_avg_decimal_field(self):
v = Book.objects.filter(rating=4).aggregate(avg_price=(Avg("price")))[
"avg_price"
]
self.assertIsInstance(v, Decimal)
self.assertEqual(v, Approximate(Decimal("47.39"), places=2))
def test_order_of_precedence(self):
p1 = Book.objects.filter(rating=4).aggregate(avg_price=(Avg("price") + 2) * 3)
self.assertEqual(p1, {"avg_price": Approximate(Decimal("148.18"), places=2)})
p2 = Book.objects.filter(rating=4).aggregate(avg_price=Avg("price") + 2 * 3)
self.assertEqual(p2, {"avg_price": Approximate(Decimal("53.39"), places=2)})
def test_combine_different_types(self):
msg = (
"Cannot infer type of '+' expression involving these types: FloatField, "
"DecimalField. You must set output_field."
)
qs = Book.objects.annotate(sums=Sum("rating") + Sum("pages") + Sum("price"))
with self.assertRaisesMessage(FieldError, msg):
qs.first()
with self.assertRaisesMessage(FieldError, msg):
qs.first()
b1 = Book.objects.annotate(
sums=Sum(F("rating") + F("pages") + F("price"), output_field=IntegerField())
).get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
b2 = Book.objects.annotate(
sums=Sum(F("rating") + F("pages") + F("price"), output_field=FloatField())
).get(pk=self.b4.pk)
self.assertEqual(b2.sums, 383.69)
b3 = Book.objects.annotate(
sums=Sum(F("rating") + F("pages") + F("price"), output_field=DecimalField())
).get(pk=self.b4.pk)
self.assertEqual(b3.sums, Approximate(Decimal("383.69"), places=2))
def test_complex_aggregations_require_kwarg(self):
with self.assertRaisesMessage(
TypeError, "Complex annotations require an alias"
):
Author.objects.annotate(Sum(F("age") + F("friends__age")))
with self.assertRaisesMessage(TypeError, "Complex aggregates require an alias"):
Author.objects.aggregate(Sum("age") / Count("age"))
with self.assertRaisesMessage(TypeError, "Complex aggregates require an alias"):
Author.objects.aggregate(Sum(1))
def test_aggregate_over_complex_annotation(self):
qs = Author.objects.annotate(combined_ages=Sum(F("age") + F("friends__age")))
age = qs.aggregate(max_combined_age=Max("combined_ages"))
self.assertEqual(age["max_combined_age"], 176)
age = qs.aggregate(max_combined_age_doubled=Max("combined_ages") * 2)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max("combined_ages") + Max("combined_ages")
)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max("combined_ages") + Max("combined_ages"),
sum_combined_age=Sum("combined_ages"),
)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
self.assertEqual(age["sum_combined_age"], 954)
age = qs.aggregate(
max_combined_age_doubled=Max("combined_ages") + Max("combined_ages"),
sum_combined_age_doubled=Sum("combined_ages") + Sum("combined_ages"),
)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
self.assertEqual(age["sum_combined_age_doubled"], 954 * 2)
def test_values_annotation_with_expression(self):
# ensure the F() is promoted to the group by clause
qs = Author.objects.values("name").annotate(another_age=Sum("age") + F("age"))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a["another_age"], 68)
qs = qs.annotate(friend_count=Count("friends"))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a["friend_count"], 2)
qs = (
qs.annotate(combined_age=Sum("age") + F("friends__age"))
.filter(name="Adrian Holovaty")
.order_by("-combined_age")
)
self.assertEqual(
list(qs),
[
{
"name": "Adrian Holovaty",
"another_age": 68,
"friend_count": 1,
"combined_age": 69,
},
{
"name": "Adrian Holovaty",
"another_age": 68,
"friend_count": 1,
"combined_age": 63,
},
],
)
vals = qs.values("name", "combined_age")
self.assertEqual(
list(vals),
[
{"name": "Adrian Holovaty", "combined_age": 69},
{"name": "Adrian Holovaty", "combined_age": 63},
],
)
def test_annotate_values_aggregate(self):
alias_age = (
Author.objects.annotate(age_alias=F("age"))
.values(
"age_alias",
)
.aggregate(sum_age=Sum("age_alias"))
)
age = Author.objects.values("age").aggregate(sum_age=Sum("age"))
self.assertEqual(alias_age["sum_age"], age["sum_age"])
def test_annotate_over_annotate(self):
author = (
Author.objects.annotate(age_alias=F("age"))
.annotate(sum_age=Sum("age_alias"))
.get(name="Adrian Holovaty")
)
other_author = Author.objects.annotate(sum_age=Sum("age")).get(
name="Adrian Holovaty"
)
self.assertEqual(author.sum_age, other_author.sum_age)
def test_aggregate_over_aggregate(self):
msg = "Cannot compute Avg('age'): 'age' is an aggregate"
with self.assertRaisesMessage(FieldError, msg):
Author.objects.annotate(age_alias=F("age"),).aggregate(
age=Sum(F("age")),
avg_age=Avg(F("age")),
)
def test_annotated_aggregate_over_annotated_aggregate(self):
with self.assertRaisesMessage(
FieldError, "Cannot compute Sum('id__max'): 'id__max' is an aggregate"
):
Book.objects.annotate(Max("id")).annotate(Sum("id__max"))
class MyMax(Max):
def as_sql(self, compiler, connection):
self.set_source_expressions(self.get_source_expressions()[0:1])
return super().as_sql(compiler, connection)
with self.assertRaisesMessage(
FieldError, "Cannot compute Max('id__max'): 'id__max' is an aggregate"
):
Book.objects.annotate(Max("id")).annotate(my_max=MyMax("id__max", "price"))
def test_multi_arg_aggregate(self):
class MyMax(Max):
output_field = DecimalField()
def as_sql(self, compiler, connection):
copy = self.copy()
copy.set_source_expressions(copy.get_source_expressions()[0:1])
return super(MyMax, copy).as_sql(compiler, connection)
with self.assertRaisesMessage(TypeError, "Complex aggregates require an alias"):
Book.objects.aggregate(MyMax("pages", "price"))
with self.assertRaisesMessage(
TypeError, "Complex annotations require an alias"
):
Book.objects.annotate(MyMax("pages", "price"))
Book.objects.aggregate(max_field=MyMax("pages", "price"))
def test_add_implementation(self):
class MySum(Sum):
pass
# test completely changing how the output is rendered
def lower_case_function_override(self, compiler, connection):
sql, params = compiler.compile(self.source_expressions[0])
substitutions = {
"function": self.function.lower(),
"expressions": sql,
"distinct": "",
}
substitutions.update(self.extra)
return self.template % substitutions, params
setattr(MySum, "as_" + connection.vendor, lower_case_function_override)
qs = Book.objects.annotate(
sums=MySum(
F("rating") + F("pages") + F("price"), output_field=IntegerField()
)
)
self.assertEqual(str(qs.query).count("sum("), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test changing the dict and delegating
def lower_case_function_super(self, compiler, connection):
self.extra["function"] = self.function.lower()
return super(MySum, self).as_sql(compiler, connection)
setattr(MySum, "as_" + connection.vendor, lower_case_function_super)
qs = Book.objects.annotate(
sums=MySum(
F("rating") + F("pages") + F("price"), output_field=IntegerField()
)
)
self.assertEqual(str(qs.query).count("sum("), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test overriding all parts of the template
def be_evil(self, compiler, connection):
substitutions = {"function": "MAX", "expressions": "2", "distinct": ""}
substitutions.update(self.extra)
return self.template % substitutions, ()
setattr(MySum, "as_" + connection.vendor, be_evil)
qs = Book.objects.annotate(
sums=MySum(
F("rating") + F("pages") + F("price"), output_field=IntegerField()
)
)
self.assertEqual(str(qs.query).count("MAX("), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 2)
def test_complex_values_aggregation(self):
max_rating = Book.objects.values("rating").aggregate(
double_max_rating=Max("rating") + Max("rating")
)
self.assertEqual(max_rating["double_max_rating"], 5 * 2)
max_books_per_rating = (
Book.objects.values("rating")
.annotate(books_per_rating=Count("id") + 5)
.aggregate(Max("books_per_rating"))
)
self.assertEqual(max_books_per_rating, {"books_per_rating__max": 3 + 5})
def test_expression_on_aggregation(self):
qs = (
Publisher.objects.annotate(
price_or_median=Greatest(
Avg("book__rating", output_field=DecimalField()), Avg("book__price")
)
)
.filter(price_or_median__gte=F("num_awards"))
.order_by("num_awards")
)
self.assertQuerySetEqual(qs, [1, 3, 7, 9], lambda v: v.num_awards)
qs2 = (
Publisher.objects.annotate(
rating_or_num_awards=Greatest(
Avg("book__rating"), F("num_awards"), output_field=FloatField()
)
)
.filter(rating_or_num_awards__gt=F("num_awards"))
.order_by("num_awards")
)
self.assertQuerySetEqual(qs2, [1, 3], lambda v: v.num_awards)
def test_arguments_must_be_expressions(self):
msg = "QuerySet.aggregate() received non-expression(s): %s."
with self.assertRaisesMessage(TypeError, msg % FloatField()):
Book.objects.aggregate(FloatField())
with self.assertRaisesMessage(TypeError, msg % True):
Book.objects.aggregate(is_book=True)
with self.assertRaisesMessage(
TypeError, msg % ", ".join([str(FloatField()), "True"])
):
Book.objects.aggregate(FloatField(), Avg("price"), is_book=True)
def test_aggregation_subquery_annotation(self):
"""Subquery annotations are excluded from the GROUP BY if they are
not explicitly grouped against."""
latest_book_pubdate_qs = (
Book.objects.filter(publisher=OuterRef("pk"))
.order_by("-pubdate")
.values("pubdate")[:1]
)
publisher_qs = Publisher.objects.annotate(
latest_book_pubdate=Subquery(latest_book_pubdate_qs),
).annotate(count=Count("book"))
with self.assertNumQueries(1) as ctx:
list(publisher_qs)
self.assertEqual(ctx[0]["sql"].count("SELECT"), 2)
# The GROUP BY should not be by alias either.
self.assertEqual(ctx[0]["sql"].lower().count("latest_book_pubdate"), 1)
def test_aggregation_subquery_annotation_exists(self):
latest_book_pubdate_qs = (
Book.objects.filter(publisher=OuterRef("pk"))
.order_by("-pubdate")
.values("pubdate")[:1]
)
publisher_qs = Publisher.objects.annotate(
latest_book_pubdate=Subquery(latest_book_pubdate_qs),
count=Count("book"),
)
self.assertTrue(publisher_qs.exists())
def test_aggregation_filter_exists(self):
publishers_having_more_than_one_book_qs = (
Book.objects.values("publisher")
.annotate(cnt=Count("isbn"))
.filter(cnt__gt=1)
)
query = publishers_having_more_than_one_book_qs.query.exists()
_, _, group_by = query.get_compiler(connection=connection).pre_sql_setup()
self.assertEqual(len(group_by), 1)
def test_aggregation_exists_annotation(self):
published_books = Book.objects.filter(publisher=OuterRef("pk"))
publisher_qs = Publisher.objects.annotate(
published_book=Exists(published_books),
count=Count("book"),
).values_list("name", flat=True)
self.assertCountEqual(
list(publisher_qs),
[
"Apress",
"Morgan Kaufmann",
"Jonno's House of Books",
"Prentice Hall",
"Sams",
],
)
def test_aggregation_subquery_annotation_values(self):
"""
Subquery annotations and external aliases are excluded from the GROUP
BY if they are not selected.
"""
books_qs = (
Book.objects.annotate(
first_author_the_same_age=Subquery(
Author.objects.filter(
age=OuterRef("contact__friends__age"),
)
.order_by("age")
.values("id")[:1],
)
)
.filter(
publisher=self.p1,
first_author_the_same_age__isnull=False,
)
.annotate(
min_age=Min("contact__friends__age"),
)
.values("name", "min_age")
.order_by("name")
)
self.assertEqual(
list(books_qs),
[
{"name": "Practical Django Projects", "min_age": 34},
{
"name": (
"The Definitive Guide to Django: Web Development Done Right"
),
"min_age": 29,
},
],
)
def test_aggregation_subquery_annotation_values_collision(self):
books_rating_qs = Book.objects.filter(
publisher=OuterRef("pk"),
price=Decimal("29.69"),
).values("rating")
publisher_qs = (
Publisher.objects.filter(
book__contact__age__gt=20,
name=self.p1.name,
)
.annotate(
rating=Subquery(books_rating_qs),
contacts_count=Count("book__contact"),
)
.values("rating")
.annotate(total_count=Count("rating"))
)
self.assertEqual(
list(publisher_qs),
[
{"rating": 4.0, "total_count": 2},
],
)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_aggregation_subquery_annotation_multivalued(self):
"""
Subquery annotations must be included in the GROUP BY if they use
potentially multivalued relations (contain the LOOKUP_SEP).
"""
subquery_qs = Author.objects.filter(
pk=OuterRef("pk"),
book__name=OuterRef("book__name"),
).values("pk")
author_qs = Author.objects.annotate(
subquery_id=Subquery(subquery_qs),
).annotate(count=Count("book"))
self.assertEqual(author_qs.count(), Author.objects.count())
def test_aggregation_order_by_not_selected_annotation_values(self):
result_asc = [
self.b4.pk,
self.b3.pk,
self.b1.pk,
self.b2.pk,
self.b5.pk,
self.b6.pk,
]
result_desc = result_asc[::-1]
tests = [
("min_related_age", result_asc),
("-min_related_age", result_desc),
(F("min_related_age"), result_asc),
(F("min_related_age").asc(), result_asc),
(F("min_related_age").desc(), result_desc),
]
for ordering, expected_result in tests:
with self.subTest(ordering=ordering):
books_qs = (
Book.objects.annotate(
min_age=Min("authors__age"),
)
.annotate(
min_related_age=Coalesce("min_age", "contact__age"),
)
.order_by(ordering)
.values_list("pk", flat=True)
)
self.assertEqual(list(books_qs), expected_result)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_group_by_subquery_annotation(self):
"""
Subquery annotations are included in the GROUP BY if they are
grouped against.
"""
long_books_count_qs = (
Book.objects.filter(
publisher=OuterRef("pk"),
pages__gt=400,
)
.values("publisher")
.annotate(count=Count("pk"))
.values("count")
)
groups = [
Subquery(long_books_count_qs),
long_books_count_qs,
long_books_count_qs.query,
]
for group in groups:
with self.subTest(group=group.__class__.__name__):
long_books_count_breakdown = Publisher.objects.values_list(
group,
).annotate(total=Count("*"))
self.assertEqual(dict(long_books_count_breakdown), {None: 1, 1: 4})
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_group_by_exists_annotation(self):
"""
Exists annotations are included in the GROUP BY if they are
grouped against.
"""
long_books_qs = Book.objects.filter(
publisher=OuterRef("pk"),
pages__gt=800,
)
has_long_books_breakdown = Publisher.objects.values_list(
Exists(long_books_qs),
).annotate(total=Count("*"))
self.assertEqual(dict(has_long_books_breakdown), {True: 2, False: 3})
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_aggregation_subquery_annotation_related_field(self):
publisher = Publisher.objects.create(name=self.a9.name, num_awards=2)
book = Book.objects.create(
isbn="159059999",
name="Test book.",
pages=819,
rating=2.5,
price=Decimal("14.44"),
contact=self.a9,
publisher=publisher,
pubdate=datetime.date(2019, 12, 6),
)
book.authors.add(self.a5, self.a6, self.a7)
books_qs = (
Book.objects.annotate(
contact_publisher=Subquery(
Publisher.objects.filter(
pk=OuterRef("publisher"),
name=OuterRef("contact__name"),
).values("name")[:1],
)
)
.filter(
contact_publisher__isnull=False,
)
.annotate(count=Count("authors"))
)
with self.assertNumQueries(1) as ctx:
self.assertSequenceEqual(books_qs, [book])
# Outerquery SELECT, annotation SELECT, and WHERE SELECT but GROUP BY
# selected alias, if allowed.
if connection.features.allows_group_by_refs:
self.assertEqual(ctx[0]["sql"].count("SELECT"), 3)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_aggregation_nested_subquery_outerref(self):
publisher_with_same_name = Publisher.objects.filter(
id__in=Subquery(
Publisher.objects.filter(
name=OuterRef(OuterRef("publisher__name")),
).values("id"),
),
).values(publisher_count=Count("id"))[:1]
books_breakdown = Book.objects.annotate(
publisher_count=Subquery(publisher_with_same_name),
authors_count=Count("authors"),
).values_list("publisher_count", flat=True)
self.assertSequenceEqual(books_breakdown, [1] * 6)
def test_aggregation_exists_multivalued_outeref(self):
self.assertCountEqual(
Publisher.objects.annotate(
books_exists=Exists(
Book.objects.filter(publisher=OuterRef("book__publisher"))
),
books_count=Count("book"),
),
Publisher.objects.all(),
)
def test_filter_in_subquery_or_aggregation(self):
"""
Filtering against an aggregate requires the usage of the HAVING clause.
If such a filter is unionized to a non-aggregate one the latter will
also need to be moved to the HAVING clause and have its grouping
columns used in the GROUP BY.
When this is done with a subquery the specialized logic in charge of
using outer reference columns to group should be used instead of the
subquery itself as the latter might return multiple rows.
"""
authors = Author.objects.annotate(
Count("book"),
).filter(Q(book__count__gt=0) | Q(pk__in=Book.objects.values("authors")))
self.assertCountEqual(authors, Author.objects.all())
def test_aggregation_random_ordering(self):
"""Random() is not included in the GROUP BY when used for ordering."""
authors = Author.objects.annotate(contact_count=Count("book")).order_by("?")
self.assertQuerySetEqual(
authors,
[
("Adrian Holovaty", 1),
("Jacob Kaplan-Moss", 1),
("Brad Dayley", 1),
("James Bennett", 1),
("Jeffrey Forcier", 1),
("Paul Bissex", 1),
("Wesley J. Chun", 1),
("Stuart Russell", 1),
("Peter Norvig", 2),
],
lambda a: (a.name, a.contact_count),
ordered=False,
)
def test_empty_result_optimization(self):
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Sum("num_awards"),
books_count=Count("book"),
),
{
"sum_awards": None,
"books_count": 0,
},
)
# Expression without empty_result_set_value forces queries to be
# executed even if they would return an empty result set.
raw_books_count = Func("book", function="COUNT")
raw_books_count.contains_aggregate = True
with self.assertNumQueries(1):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Sum("num_awards"),
books_count=raw_books_count,
),
{
"sum_awards": None,
"books_count": 0,
},
)
def test_coalesced_empty_result_set(self):
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Coalesce(Sum("num_awards"), 0),
)["sum_awards"],
0,
)
# Multiple expressions.
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Coalesce(Sum("num_awards"), None, 0),
)["sum_awards"],
0,
)
# Nested coalesce.
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Coalesce(Coalesce(Sum("num_awards"), None), 0),
)["sum_awards"],
0,
)
# Expression coalesce.
with self.assertNumQueries(1):
self.assertIsInstance(
Store.objects.none().aggregate(
latest_opening=Coalesce(
Max("original_opening"),
RawSQL("CURRENT_TIMESTAMP", []),
),
)["latest_opening"],
datetime.datetime,
)
def test_aggregation_default_unsupported_by_count(self):
msg = "Count does not allow default."
with self.assertRaisesMessage(TypeError, msg):
Count("age", default=0)
def test_aggregation_default_unset(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age"),
)
self.assertIsNone(result["value"])
def test_aggregation_default_zero(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age", default=0),
)
self.assertEqual(result["value"], 0)
def test_aggregation_default_integer(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age", default=21),
)
self.assertEqual(result["value"], 21)
def test_aggregation_default_expression(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age", default=Value(5) * Value(7)),
)
self.assertEqual(result["value"], 35)
def test_aggregation_default_group_by(self):
qs = (
Publisher.objects.values("name")
.annotate(
books=Count("book"),
pages=Sum("book__pages", default=0),
)
.filter(books=0)
)
self.assertSequenceEqual(
qs,
[{"name": "Jonno's House of Books", "books": 0, "pages": 0}],
)
def test_aggregation_default_compound_expression(self):
# Scale rating to a percentage; default to 50% if no books published.
formula = Avg("book__rating", default=2.5) * 20.0
queryset = Publisher.objects.annotate(rating=formula).order_by("name")
self.assertSequenceEqual(
queryset.values("name", "rating"),
[
{"name": "Apress", "rating": 85.0},
{"name": "Jonno's House of Books", "rating": 50.0},
{"name": "Morgan Kaufmann", "rating": 100.0},
{"name": "Prentice Hall", "rating": 80.0},
{"name": "Sams", "rating": 60.0},
],
)
def test_aggregation_default_using_time_from_python(self):
expr = Min(
"store__friday_night_closing",
filter=~Q(store__name="Amazon.com"),
default=datetime.time(17),
)
if connection.vendor == "mysql":
# Workaround for #30224 for MySQL & MariaDB.
expr.default = Cast(expr.default, TimeField())
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{"isbn": "013235613", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "013790395",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "067232959", "oldest_store_opening": datetime.time(17)},
{"isbn": "155860191", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "159059725",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "159059996", "oldest_store_opening": datetime.time(21, 30)},
],
)
def test_aggregation_default_using_time_from_database(self):
now = timezone.now().astimezone(datetime.timezone.utc)
expr = Min(
"store__friday_night_closing",
filter=~Q(store__name="Amazon.com"),
default=TruncHour(NowUTC(), output_field=TimeField()),
)
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{"isbn": "013235613", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "013790395",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "067232959", "oldest_store_opening": datetime.time(now.hour)},
{"isbn": "155860191", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "159059725",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "159059996", "oldest_store_opening": datetime.time(21, 30)},
],
)
def test_aggregation_default_using_date_from_python(self):
expr = Min("book__pubdate", default=datetime.date(1970, 1, 1))
if connection.vendor == "mysql":
# Workaround for #30224 for MySQL & MariaDB.
expr.default = Cast(expr.default, DateField())
queryset = Publisher.objects.annotate(earliest_pubdate=expr).order_by("name")
self.assertSequenceEqual(
queryset.values("name", "earliest_pubdate"),
[
{"name": "Apress", "earliest_pubdate": datetime.date(2007, 12, 6)},
{
"name": "Jonno's House of Books",
"earliest_pubdate": datetime.date(1970, 1, 1),
},
{
"name": "Morgan Kaufmann",
"earliest_pubdate": datetime.date(1991, 10, 15),
},
{
"name": "Prentice Hall",
"earliest_pubdate": datetime.date(1995, 1, 15),
},
{"name": "Sams", "earliest_pubdate": datetime.date(2008, 3, 3)},
],
)
def test_aggregation_default_using_date_from_database(self):
now = timezone.now().astimezone(datetime.timezone.utc)
expr = Min("book__pubdate", default=TruncDate(NowUTC()))
queryset = Publisher.objects.annotate(earliest_pubdate=expr).order_by("name")
self.assertSequenceEqual(
queryset.values("name", "earliest_pubdate"),
[
{"name": "Apress", "earliest_pubdate": datetime.date(2007, 12, 6)},
{"name": "Jonno's House of Books", "earliest_pubdate": now.date()},
{
"name": "Morgan Kaufmann",
"earliest_pubdate": datetime.date(1991, 10, 15),
},
{
"name": "Prentice Hall",
"earliest_pubdate": datetime.date(1995, 1, 15),
},
{"name": "Sams", "earliest_pubdate": datetime.date(2008, 3, 3)},
],
)
def test_aggregation_default_using_datetime_from_python(self):
expr = Min(
"store__original_opening",
filter=~Q(store__name="Amazon.com"),
default=datetime.datetime(1970, 1, 1),
)
if connection.vendor == "mysql":
# Workaround for #30224 for MySQL & MariaDB.
expr.default = Cast(expr.default, DateTimeField())
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{
"isbn": "013235613",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "013790395",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "067232959",
"oldest_store_opening": datetime.datetime(1970, 1, 1),
},
{
"isbn": "155860191",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "159059725",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "159059996",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
],
)
def test_aggregation_default_using_datetime_from_database(self):
now = timezone.now().astimezone(datetime.timezone.utc)
expr = Min(
"store__original_opening",
filter=~Q(store__name="Amazon.com"),
default=TruncHour(NowUTC(), output_field=DateTimeField()),
)
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{
"isbn": "013235613",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "013790395",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "067232959",
"oldest_store_opening": now.replace(
minute=0, second=0, microsecond=0, tzinfo=None
),
},
{
"isbn": "155860191",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "159059725",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "159059996",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
],
)
def test_aggregation_default_using_duration_from_python(self):
result = Publisher.objects.filter(num_awards__gt=3).aggregate(
value=Sum("duration", default=datetime.timedelta(0)),
)
self.assertEqual(result["value"], datetime.timedelta(0))
def test_aggregation_default_using_duration_from_database(self):
result = Publisher.objects.filter(num_awards__gt=3).aggregate(
value=Sum("duration", default=Now() - Now()),
)
self.assertEqual(result["value"], datetime.timedelta(0))
def test_aggregation_default_using_decimal_from_python(self):
result = Book.objects.filter(rating__lt=3.0).aggregate(
value=Sum("price", default=Decimal("0.00")),
)
self.assertEqual(result["value"], Decimal("0.00"))
def test_aggregation_default_using_decimal_from_database(self):
result = Book.objects.filter(rating__lt=3.0).aggregate(
value=Sum("price", default=Pi()),
)
self.assertAlmostEqual(result["value"], Decimal.from_float(math.pi), places=6)
def test_aggregation_default_passed_another_aggregate(self):
result = Book.objects.aggregate(
value=Sum("price", filter=Q(rating__lt=3.0), default=Avg("pages") / 10.0),
)
self.assertAlmostEqual(result["value"], Decimal("61.72"), places=2)
def test_aggregation_default_after_annotation(self):
result = Publisher.objects.annotate(
double_num_awards=F("num_awards") * 2,
).aggregate(value=Sum("double_num_awards", default=0))
self.assertEqual(result["value"], 40)
def test_aggregation_default_not_in_aggregate(self):
result = Publisher.objects.annotate(
avg_rating=Avg("book__rating", default=2.5),
).aggregate(Sum("num_awards"))
self.assertEqual(result["num_awards__sum"], 20)
def test_exists_none_with_aggregate(self):
qs = Book.objects.annotate(
count=Count("id"),
exists=Exists(Author.objects.none()),
)
self.assertEqual(len(qs), 6)
def test_alias_sql_injection(self):
crafted_alias = """injected_name" from "aggregation_author"; --"""
msg = (
"Column aliases cannot contain whitespace characters, quotation marks, "
"semicolons, or SQL comments."
)
with self.assertRaisesMessage(ValueError, msg):
Author.objects.aggregate(**{crafted_alias: Avg("age")})
def test_exists_extra_where_with_aggregate(self):
qs = Book.objects.annotate(
count=Count("id"),
exists=Exists(Author.objects.extra(where=["1=0"])),
)
self.assertEqual(len(qs), 6)
class AggregateAnnotationPruningTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(age=1)
cls.a2 = Author.objects.create(age=2)
cls.p1 = Publisher.objects.create(num_awards=1)
cls.p2 = Publisher.objects.create(num_awards=0)
cls.b1 = Book.objects.create(
name="b1",
publisher=cls.p1,
pages=100,
rating=4.5,
price=10,
contact=cls.a1,
pubdate=datetime.date.today(),
)
cls.b1.authors.add(cls.a1)
cls.b2 = Book.objects.create(
name="b2",
publisher=cls.p2,
pages=1000,
rating=3.2,
price=50,
contact=cls.a2,
pubdate=datetime.date.today(),
)
cls.b2.authors.add(cls.a1, cls.a2)
def test_unused_aliased_aggregate_pruned(self):
with CaptureQueriesContext(connection) as ctx:
cnt = Book.objects.alias(
authors_count=Count("authors"),
).count()
self.assertEqual(cnt, 2)
sql = ctx.captured_queries[0]["sql"].lower()
self.assertEqual(sql.count("select"), 2, "Subquery wrapping required")
self.assertNotIn("authors_count", sql)
def test_non_aggregate_annotation_pruned(self):
with CaptureQueriesContext(connection) as ctx:
Book.objects.annotate(
name_lower=Lower("name"),
).count()
sql = ctx.captured_queries[0]["sql"].lower()
self.assertEqual(sql.count("select"), 1, "No subquery wrapping required")
self.assertNotIn("name_lower", sql)
def test_unreferenced_aggregate_annotation_pruned(self):
with CaptureQueriesContext(connection) as ctx:
cnt = Book.objects.annotate(
authors_count=Count("authors"),
).count()
self.assertEqual(cnt, 2)
sql = ctx.captured_queries[0]["sql"].lower()
self.assertEqual(sql.count("select"), 2, "Subquery wrapping required")
self.assertNotIn("authors_count", sql)
def test_referenced_aggregate_annotation_kept(self):
with CaptureQueriesContext(connection) as ctx:
Book.objects.annotate(
authors_count=Count("authors"),
).aggregate(Avg("authors_count"))
sql = ctx.captured_queries[0]["sql"].lower()
self.assertEqual(sql.count("select"), 2, "Subquery wrapping required")
self.assertEqual(sql.count("authors_count"), 2)
|
01ded4fd8668dbfc20351d683d6f48e9d964277024ff573a152f84f5c86f6bb5 | from django.db.models import ProtectedError, Q, Sum
from django.forms.models import modelform_factory
from django.test import TestCase, skipIfDBFeature
from .models import (
A,
Address,
B,
Board,
C,
Cafe,
CharLink,
Company,
Contact,
Content,
D,
Guild,
HasLinkThing,
Link,
Node,
Note,
OddRelation1,
OddRelation2,
Organization,
Person,
Place,
Related,
Restaurant,
Tag,
Team,
TextLink,
)
class GenericRelationTests(TestCase):
def test_inherited_models_content_type(self):
"""
GenericRelations on inherited classes use the correct content type.
"""
p = Place.objects.create(name="South Park")
r = Restaurant.objects.create(name="Chubby's")
l1 = Link.objects.create(content_object=p)
l2 = Link.objects.create(content_object=r)
self.assertEqual(list(p.links.all()), [l1])
self.assertEqual(list(r.links.all()), [l2])
def test_reverse_relation_pk(self):
"""
The correct column name is used for the primary key on the
originating model of a query. See #12664.
"""
p = Person.objects.create(account=23, name="Chef")
Address.objects.create(
street="123 Anywhere Place",
city="Conifer",
state="CO",
zipcode="80433",
content_object=p,
)
qs = Person.objects.filter(addresses__zipcode="80433")
self.assertEqual(1, qs.count())
self.assertEqual("Chef", qs[0].name)
def test_charlink_delete(self):
oddrel = OddRelation1.objects.create(name="clink")
CharLink.objects.create(content_object=oddrel)
oddrel.delete()
def test_textlink_delete(self):
oddrel = OddRelation2.objects.create(name="tlink")
TextLink.objects.create(content_object=oddrel)
oddrel.delete()
def test_coerce_object_id_remote_field_cache_persistence(self):
restaurant = Restaurant.objects.create()
CharLink.objects.create(content_object=restaurant)
charlink = CharLink.objects.latest("pk")
self.assertIs(charlink.content_object, charlink.content_object)
# If the model (Cafe) uses more than one level of multi-table inheritance.
cafe = Cafe.objects.create()
CharLink.objects.create(content_object=cafe)
charlink = CharLink.objects.latest("pk")
self.assertIs(charlink.content_object, charlink.content_object)
def test_q_object_or(self):
"""
SQL query parameters for generic relations are properly
grouped when OR is used (#11535).
In this bug the first query (below) works while the second, with the
query parameters the same but in reverse order, does not.
The issue is that the generic relation conditions do not get properly
grouped in parentheses.
"""
note_contact = Contact.objects.create()
org_contact = Contact.objects.create()
Note.objects.create(note="note", content_object=note_contact)
org = Organization.objects.create(name="org name")
org.contacts.add(org_contact)
# search with a non-matching note and a matching org name
qs = Contact.objects.filter(
Q(notes__note__icontains=r"other note")
| Q(organizations__name__icontains=r"org name")
)
self.assertIn(org_contact, qs)
# search again, with the same query parameters, in reverse order
qs = Contact.objects.filter(
Q(organizations__name__icontains=r"org name")
| Q(notes__note__icontains=r"other note")
)
self.assertIn(org_contact, qs)
def test_join_reuse(self):
qs = Person.objects.filter(addresses__street="foo").filter(
addresses__street="bar"
)
self.assertEqual(str(qs.query).count("JOIN"), 2)
def test_generic_relation_ordering(self):
"""
Ordering over a generic relation does not include extraneous
duplicate results, nor excludes rows not participating in the relation.
"""
p1 = Place.objects.create(name="South Park")
p2 = Place.objects.create(name="The City")
c = Company.objects.create(name="Chubby's Intl.")
Link.objects.create(content_object=p1)
Link.objects.create(content_object=c)
places = list(Place.objects.order_by("links__id"))
def count_places(place):
return len([p for p in places if p.id == place.id])
self.assertEqual(len(places), 2)
self.assertEqual(count_places(p1), 1)
self.assertEqual(count_places(p2), 1)
def test_target_model_len_zero(self):
"""
Saving a model with a GenericForeignKey to a model instance whose
__len__ method returns 0 (Team.__len__() here) shouldn't fail (#13085).
"""
team1 = Team.objects.create(name="Backend devs")
note = Note(note="Deserve a bonus", content_object=team1)
note.save()
def test_target_model_bool_false(self):
"""
Saving a model with a GenericForeignKey to a model instance whose
__bool__ method returns False (Guild.__bool__() here) shouldn't fail
(#13085).
"""
g1 = Guild.objects.create(name="First guild")
note = Note(note="Note for guild", content_object=g1)
note.save()
@skipIfDBFeature("interprets_empty_strings_as_nulls")
def test_gfk_to_model_with_empty_pk(self):
"""Test related to #13085"""
# Saving model with GenericForeignKey to model instance with an
# empty CharField PK
b1 = Board.objects.create(name="")
tag = Tag(label="VP", content_object=b1)
tag.save()
def test_ticket_20378(self):
# Create a couple of extra HasLinkThing so that the autopk value
# isn't the same for Link and HasLinkThing.
hs1 = HasLinkThing.objects.create()
hs2 = HasLinkThing.objects.create()
hs3 = HasLinkThing.objects.create()
hs4 = HasLinkThing.objects.create()
l1 = Link.objects.create(content_object=hs3)
l2 = Link.objects.create(content_object=hs4)
self.assertSequenceEqual(HasLinkThing.objects.filter(links=l1), [hs3])
self.assertSequenceEqual(HasLinkThing.objects.filter(links=l2), [hs4])
self.assertSequenceEqual(
HasLinkThing.objects.exclude(links=l2), [hs1, hs2, hs3]
)
self.assertSequenceEqual(
HasLinkThing.objects.exclude(links=l1), [hs1, hs2, hs4]
)
def test_ticket_20564(self):
b1 = B.objects.create()
b2 = B.objects.create()
b3 = B.objects.create()
c1 = C.objects.create(b=b1)
c2 = C.objects.create(b=b2)
c3 = C.objects.create(b=b3)
A.objects.create(flag=None, content_object=b1)
A.objects.create(flag=True, content_object=b2)
self.assertSequenceEqual(C.objects.filter(b__a__flag=None), [c1, c3])
self.assertSequenceEqual(C.objects.exclude(b__a__flag=None), [c2])
def test_ticket_20564_nullable_fk(self):
b1 = B.objects.create()
b2 = B.objects.create()
b3 = B.objects.create()
d1 = D.objects.create(b=b1)
d2 = D.objects.create(b=b2)
d3 = D.objects.create(b=b3)
d4 = D.objects.create()
A.objects.create(flag=None, content_object=b1)
A.objects.create(flag=True, content_object=b1)
A.objects.create(flag=True, content_object=b2)
self.assertSequenceEqual(D.objects.exclude(b__a__flag=None), [d2])
self.assertSequenceEqual(D.objects.filter(b__a__flag=None), [d1, d3, d4])
self.assertSequenceEqual(B.objects.filter(a__flag=None), [b1, b3])
self.assertSequenceEqual(B.objects.exclude(a__flag=None), [b2])
def test_extra_join_condition(self):
# A crude check that content_type_id is taken in account in the
# join/subquery condition.
self.assertIn(
"content_type_id", str(B.objects.exclude(a__flag=None).query).lower()
)
# No need for any joins - the join from inner query can be trimmed in
# this case (but not in the above case as no a objects at all for given
# B would then fail).
self.assertNotIn(" join ", str(B.objects.exclude(a__flag=True).query).lower())
self.assertIn(
"content_type_id", str(B.objects.exclude(a__flag=True).query).lower()
)
def test_annotate(self):
hs1 = HasLinkThing.objects.create()
hs2 = HasLinkThing.objects.create()
HasLinkThing.objects.create()
b = Board.objects.create(name=str(hs1.pk))
Link.objects.create(content_object=hs2)
link = Link.objects.create(content_object=hs1)
Link.objects.create(content_object=b)
qs = HasLinkThing.objects.annotate(Sum("links")).filter(pk=hs1.pk)
# If content_type restriction isn't in the query's join condition,
# then wrong results are produced here as the link to b will also match
# (b and hs1 have equal pks).
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].links__sum, link.id)
link.delete()
# Now if we don't have proper left join, we will not produce any
# results at all here.
# clear cached results
qs = qs.all()
self.assertEqual(qs.count(), 1)
# Note - 0 here would be a nicer result...
self.assertIs(qs[0].links__sum, None)
# Finally test that filtering works.
self.assertEqual(qs.filter(links__sum__isnull=True).count(), 1)
self.assertEqual(qs.filter(links__sum__isnull=False).count(), 0)
def test_filter_targets_related_pk(self):
# Use hardcoded PKs to ensure different PKs for "link" and "hs2"
# objects.
HasLinkThing.objects.create(pk=1)
hs2 = HasLinkThing.objects.create(pk=2)
link = Link.objects.create(content_object=hs2, pk=1)
self.assertNotEqual(link.object_id, link.pk)
self.assertSequenceEqual(HasLinkThing.objects.filter(links=link.pk), [hs2])
def test_editable_generic_rel(self):
GenericRelationForm = modelform_factory(HasLinkThing, fields="__all__")
form = GenericRelationForm()
self.assertIn("links", form.fields)
form = GenericRelationForm({"links": None})
self.assertTrue(form.is_valid())
form.save()
links = HasLinkThing._meta.get_field("links")
self.assertEqual(links.save_form_data_calls, 1)
def test_ticket_22998(self):
related = Related.objects.create()
content = Content.objects.create(related_obj=related)
Node.objects.create(content=content)
# deleting the Related cascades to the Content cascades to the Node,
# where the pre_delete signal should fire and prevent deletion.
with self.assertRaises(ProtectedError):
related.delete()
def test_ticket_22982(self):
place = Place.objects.create(name="My Place")
self.assertIn("GenericRelatedObjectManager", str(place.links))
def test_filter_on_related_proxy_model(self):
place = Place.objects.create()
Link.objects.create(content_object=place)
self.assertEqual(Place.objects.get(link_proxy__object_id=place.id), place)
def test_generic_reverse_relation_with_mti(self):
"""
Filtering with a reverse generic relation, where the GenericRelation
comes from multi-table inheritance.
"""
place = Place.objects.create(name="Test Place")
link = Link.objects.create(content_object=place)
result = Link.objects.filter(places=place)
self.assertCountEqual(result, [link])
def test_generic_reverse_relation_with_abc(self):
"""
The reverse generic relation accessor (targets) is created if the
GenericRelation comes from an abstract base model (HasLinks).
"""
thing = HasLinkThing.objects.create()
link = Link.objects.create(content_object=thing)
self.assertCountEqual(link.targets.all(), [thing])
def test_generic_reverse_relation_exclude_filter(self):
place1 = Place.objects.create(name="Test Place 1")
place2 = Place.objects.create(name="Test Place 2")
Link.objects.create(content_object=place1)
link2 = Link.objects.create(content_object=place2)
qs = Link.objects.filter(~Q(places__name="Test Place 1"))
self.assertSequenceEqual(qs, [link2])
qs = Link.objects.exclude(places__name="Test Place 1")
self.assertSequenceEqual(qs, [link2])
|
97efce5647cb142026a6dbad2a0e3328baa5dabdb4e9339142f9fb955dc28de6 | import decimal
import enum
import json
import unittest
import uuid
from django import forms
from django.contrib.admin.utils import display_for_field
from django.core import checks, exceptions, serializers, validators
from django.core.exceptions import FieldError
from django.core.management import call_command
from django.db import IntegrityError, connection, models
from django.db.models.expressions import Exists, OuterRef, RawSQL, Value
from django.db.models.functions import Cast, JSONObject, Upper
from django.test import (
TransactionTestCase,
modify_settings,
override_settings,
skipUnlessDBFeature,
)
from django.test.utils import isolate_apps
from django.utils import timezone
from . import PostgreSQLSimpleTestCase, PostgreSQLTestCase, PostgreSQLWidgetTestCase
from .models import (
ArrayEnumModel,
ArrayFieldSubclass,
CharArrayModel,
DateTimeArrayModel,
IntegerArrayModel,
NestedIntegerArrayModel,
NullableIntegerArrayModel,
OtherTypesArrayModel,
PostgreSQLModel,
Tag,
)
try:
from psycopg2.extras import NumericRange
from django.contrib.postgres.aggregates import ArrayAgg
from django.contrib.postgres.expressions import ArraySubquery
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.fields.array import IndexTransform, SliceTransform
from django.contrib.postgres.forms import (
SimpleArrayField,
SplitArrayField,
SplitArrayWidget,
)
except ImportError:
pass
@isolate_apps("postgres_tests")
class BasicTests(PostgreSQLSimpleTestCase):
def test_get_field_display(self):
class MyModel(PostgreSQLModel):
field = ArrayField(
models.CharField(max_length=16),
choices=[
["Media", [(["vinyl", "cd"], "Audio")]],
(("mp3", "mp4"), "Digital"),
],
)
tests = (
(["vinyl", "cd"], "Audio"),
(("mp3", "mp4"), "Digital"),
(("a", "b"), "('a', 'b')"),
(["c", "d"], "['c', 'd']"),
)
for value, display in tests:
with self.subTest(value=value, display=display):
instance = MyModel(field=value)
self.assertEqual(instance.get_field_display(), display)
def test_get_field_display_nested_array(self):
class MyModel(PostgreSQLModel):
field = ArrayField(
ArrayField(models.CharField(max_length=16)),
choices=[
[
"Media",
[([["vinyl", "cd"], ("x",)], "Audio")],
],
((["mp3"], ("mp4",)), "Digital"),
],
)
tests = (
([["vinyl", "cd"], ("x",)], "Audio"),
((["mp3"], ("mp4",)), "Digital"),
((("a", "b"), ("c",)), "(('a', 'b'), ('c',))"),
([["a", "b"], ["c"]], "[['a', 'b'], ['c']]"),
)
for value, display in tests:
with self.subTest(value=value, display=display):
instance = MyModel(field=value)
self.assertEqual(instance.get_field_display(), display)
class TestSaveLoad(PostgreSQLTestCase):
def test_integer(self):
instance = IntegerArrayModel(field=[1, 2, 3])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_char(self):
instance = CharArrayModel(field=["hello", "goodbye"])
instance.save()
loaded = CharArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_dates(self):
instance = DateTimeArrayModel(
datetimes=[timezone.now()],
dates=[timezone.now().date()],
times=[timezone.now().time()],
)
instance.save()
loaded = DateTimeArrayModel.objects.get()
self.assertEqual(instance.datetimes, loaded.datetimes)
self.assertEqual(instance.dates, loaded.dates)
self.assertEqual(instance.times, loaded.times)
def test_tuples(self):
instance = IntegerArrayModel(field=(1,))
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertSequenceEqual(instance.field, loaded.field)
def test_integers_passed_as_strings(self):
# This checks that get_prep_value is deferred properly
instance = IntegerArrayModel(field=["1"])
instance.save()
loaded = IntegerArrayModel.objects.get()
self.assertEqual(loaded.field, [1])
def test_default_null(self):
instance = NullableIntegerArrayModel()
instance.save()
loaded = NullableIntegerArrayModel.objects.get(pk=instance.pk)
self.assertIsNone(loaded.field)
self.assertEqual(instance.field, loaded.field)
def test_null_handling(self):
instance = NullableIntegerArrayModel(field=None)
instance.save()
loaded = NullableIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
instance = IntegerArrayModel(field=None)
with self.assertRaises(IntegrityError):
instance.save()
def test_nested(self):
instance = NestedIntegerArrayModel(field=[[1, 2], [3, 4]])
instance.save()
loaded = NestedIntegerArrayModel.objects.get()
self.assertEqual(instance.field, loaded.field)
def test_other_array_types(self):
instance = OtherTypesArrayModel(
ips=["192.168.0.1", "::1"],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
tags=[Tag(1), Tag(2), Tag(3)],
json=[{"a": 1}, {"b": 2}],
int_ranges=[NumericRange(10, 20), NumericRange(30, 40)],
bigint_ranges=[
NumericRange(7000000000, 10000000000),
NumericRange(50000000000, 70000000000),
],
)
instance.save()
loaded = OtherTypesArrayModel.objects.get()
self.assertEqual(instance.ips, loaded.ips)
self.assertEqual(instance.uuids, loaded.uuids)
self.assertEqual(instance.decimals, loaded.decimals)
self.assertEqual(instance.tags, loaded.tags)
self.assertEqual(instance.json, loaded.json)
self.assertEqual(instance.int_ranges, loaded.int_ranges)
self.assertEqual(instance.bigint_ranges, loaded.bigint_ranges)
def test_null_from_db_value_handling(self):
instance = OtherTypesArrayModel.objects.create(
ips=["192.168.0.1", "::1"],
uuids=[uuid.uuid4()],
decimals=[decimal.Decimal(1.25), 1.75],
tags=None,
)
instance.refresh_from_db()
self.assertIsNone(instance.tags)
self.assertEqual(instance.json, [])
self.assertIsNone(instance.int_ranges)
self.assertIsNone(instance.bigint_ranges)
def test_model_set_on_base_field(self):
instance = IntegerArrayModel()
field = instance._meta.get_field("field")
self.assertEqual(field.model, IntegerArrayModel)
self.assertEqual(field.base_field.model, IntegerArrayModel)
def test_nested_nullable_base_field(self):
instance = NullableIntegerArrayModel.objects.create(
field_nested=[[None, None], [None, None]],
)
self.assertEqual(instance.field_nested, [[None, None], [None, None]])
class TestQuerying(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
cls.objs = NullableIntegerArrayModel.objects.bulk_create(
[
NullableIntegerArrayModel(order=1, field=[1]),
NullableIntegerArrayModel(order=2, field=[2]),
NullableIntegerArrayModel(order=3, field=[2, 3]),
NullableIntegerArrayModel(order=4, field=[20, 30, 40]),
NullableIntegerArrayModel(order=5, field=None),
]
)
def test_empty_list(self):
NullableIntegerArrayModel.objects.create(field=[])
obj = (
NullableIntegerArrayModel.objects.annotate(
empty_array=models.Value(
[], output_field=ArrayField(models.IntegerField())
),
)
.filter(field=models.F("empty_array"))
.get()
)
self.assertEqual(obj.field, [])
self.assertEqual(obj.empty_array, [])
def test_exact(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__exact=[1]), self.objs[:1]
)
def test_exact_null_only_array(self):
obj = NullableIntegerArrayModel.objects.create(
field=[None], field_nested=[None, None]
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__exact=[None]), [obj]
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field_nested__exact=[None, None]),
[obj],
)
def test_exact_null_only_nested_array(self):
obj1 = NullableIntegerArrayModel.objects.create(field_nested=[[None, None]])
obj2 = NullableIntegerArrayModel.objects.create(
field_nested=[[None, None], [None, None]],
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field_nested__exact=[[None, None]],
),
[obj1],
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field_nested__exact=[[None, None], [None, None]],
),
[obj2],
)
def test_exact_with_expression(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__exact=[Value(1)]),
self.objs[:1],
)
def test_exact_charfield(self):
instance = CharArrayModel.objects.create(field=["text"])
self.assertSequenceEqual(
CharArrayModel.objects.filter(field=["text"]), [instance]
)
def test_exact_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field=[[1, 2], [3, 4]]), [instance]
)
def test_isnull(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__isnull=True), self.objs[-1:]
)
def test_gt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__gt=[0]), self.objs[:4]
)
def test_lt(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__lt=[2]), self.objs[:1]
)
def test_in(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[1], [2]]),
self.objs[:2],
)
def test_in_subquery(self):
IntegerArrayModel.objects.create(field=[2, 3])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__in=IntegerArrayModel.objects.values_list("field", flat=True)
),
self.objs[2:3],
)
@unittest.expectedFailure
def test_in_including_F_object(self):
# This test asserts that Array objects passed to filters can be
# constructed to contain F objects. This currently doesn't work as the
# psycopg2 mogrify method that generates the ARRAY() syntax is
# expecting literals, not column references (#27095).
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[[models.F("id")]]),
self.objs[:2],
)
def test_in_as_F_object(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__in=[models.F("field")]),
self.objs[:4],
)
def test_contained_by(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contained_by=[1, 2]),
self.objs[:2],
)
def test_contained_by_including_F_object(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__contained_by=[models.F("order"), 2]
),
self.objs[:3],
)
def test_contains(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contains=[2]),
self.objs[1:3],
)
def test_contains_subquery(self):
IntegerArrayModel.objects.create(field=[2, 3])
inner_qs = IntegerArrayModel.objects.values_list("field", flat=True)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__contains=inner_qs[:1]),
self.objs[2:3],
)
inner_qs = IntegerArrayModel.objects.filter(field__contains=OuterRef("field"))
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(Exists(inner_qs)),
self.objs[1:3],
)
def test_contains_including_expression(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__contains=[2, Value(6) / Value(2)],
),
self.objs[2:3],
)
def test_icontains(self):
# Using the __icontains lookup with ArrayField is inefficient.
instance = CharArrayModel.objects.create(field=["FoO"])
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__icontains="foo"), [instance]
)
def test_contains_charfield(self):
# Regression for #22907
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contains=["text"]), []
)
def test_contained_by_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__contained_by=["text"]), []
)
def test_overlap_charfield(self):
self.assertSequenceEqual(
CharArrayModel.objects.filter(field__overlap=["text"]), []
)
def test_overlap_charfield_including_expression(self):
obj_1 = CharArrayModel.objects.create(field=["TEXT", "lower text"])
obj_2 = CharArrayModel.objects.create(field=["lower text", "TEXT"])
CharArrayModel.objects.create(field=["lower text", "text"])
self.assertSequenceEqual(
CharArrayModel.objects.filter(
field__overlap=[
Upper(Value("text")),
"other",
]
),
[obj_1, obj_2],
)
def test_overlap_values(self):
qs = NullableIntegerArrayModel.objects.filter(order__lt=3)
self.assertCountEqual(
NullableIntegerArrayModel.objects.filter(
field__overlap=qs.values_list("field"),
),
self.objs[:3],
)
self.assertCountEqual(
NullableIntegerArrayModel.objects.filter(
field__overlap=qs.values("field"),
),
self.objs[:3],
)
def test_lookups_autofield_array(self):
qs = (
NullableIntegerArrayModel.objects.filter(
field__0__isnull=False,
)
.values("field__0")
.annotate(
arrayagg=ArrayAgg("id"),
)
.order_by("field__0")
)
tests = (
("contained_by", [self.objs[1].pk, self.objs[2].pk, 0], [2]),
("contains", [self.objs[2].pk], [2]),
("exact", [self.objs[3].pk], [20]),
("overlap", [self.objs[1].pk, self.objs[3].pk], [2, 20]),
)
for lookup, value, expected in tests:
with self.subTest(lookup=lookup):
self.assertSequenceEqual(
qs.filter(
**{"arrayagg__" + lookup: value},
).values_list("field__0", flat=True),
expected,
)
@skipUnlessDBFeature("allows_group_by_refs")
def test_group_by_order_by_aliases(self):
with self.assertNumQueries(1) as ctx:
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__0__isnull=False,
)
.values("field__0")
.annotate(arrayagg=ArrayAgg("id"))
.order_by("field__0"),
[
{"field__0": 1, "arrayagg": [self.objs[0].pk]},
{"field__0": 2, "arrayagg": [self.objs[1].pk, self.objs[2].pk]},
{"field__0": 20, "arrayagg": [self.objs[3].pk]},
],
)
alias = connection.ops.quote_name("field__0")
sql = ctx[0]["sql"]
self.assertIn(f"GROUP BY {alias}", sql)
self.assertIn(f"ORDER BY {alias}", sql)
def test_index(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0=2), self.objs[1:3]
)
def test_index_chained(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0__lt=3), self.objs[0:3]
)
def test_index_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0=1), [instance]
)
@unittest.expectedFailure
def test_index_used_on_nested_data(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0=[1, 2]), [instance]
)
def test_index_transform_expression(self):
expr = RawSQL("string_to_array(%s, ';')", ["1;2"])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__0=Cast(
IndexTransform(1, models.IntegerField, expr),
output_field=models.IntegerField(),
),
),
self.objs[:1],
)
def test_index_annotation(self):
qs = NullableIntegerArrayModel.objects.annotate(second=models.F("field__1"))
self.assertCountEqual(
qs.values_list("second", flat=True),
[None, None, None, 3, 30],
)
def test_overlap(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__overlap=[1, 2]),
self.objs[0:3],
)
def test_len(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len__lte=2), self.objs[0:3]
)
def test_len_empty_array(self):
obj = NullableIntegerArrayModel.objects.create(field=[])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__len=0), [obj]
)
def test_slice(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_1=[2]), self.objs[1:3]
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(field__0_2=[2, 3]), self.objs[2:3]
)
def test_order_by_slice(self):
more_objs = (
NullableIntegerArrayModel.objects.create(field=[1, 637]),
NullableIntegerArrayModel.objects.create(field=[2, 1]),
NullableIntegerArrayModel.objects.create(field=[3, -98123]),
NullableIntegerArrayModel.objects.create(field=[4, 2]),
)
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.order_by("field__1"),
[
more_objs[2],
more_objs[1],
more_objs[3],
self.objs[2],
self.objs[3],
more_objs[0],
self.objs[4],
self.objs[1],
self.objs[0],
],
)
@unittest.expectedFailure
def test_slice_nested(self):
instance = NestedIntegerArrayModel.objects.create(field=[[1, 2], [3, 4]])
self.assertSequenceEqual(
NestedIntegerArrayModel.objects.filter(field__0__0_1=[1]), [instance]
)
def test_slice_transform_expression(self):
expr = RawSQL("string_to_array(%s, ';')", ["9;2;3"])
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
field__0_2=SliceTransform(2, 3, expr)
),
self.objs[2:3],
)
def test_slice_annotation(self):
qs = NullableIntegerArrayModel.objects.annotate(
first_two=models.F("field__0_2"),
)
self.assertCountEqual(
qs.values_list("first_two", flat=True),
[None, [1], [2], [2, 3], [20, 30]],
)
def test_usage_in_subquery(self):
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.filter(
id__in=NullableIntegerArrayModel.objects.filter(field__len=3)
),
[self.objs[3]],
)
def test_enum_lookup(self):
class TestEnum(enum.Enum):
VALUE_1 = "value_1"
instance = ArrayEnumModel.objects.create(array_of_enums=[TestEnum.VALUE_1])
self.assertSequenceEqual(
ArrayEnumModel.objects.filter(array_of_enums__contains=[TestEnum.VALUE_1]),
[instance],
)
def test_unsupported_lookup(self):
msg = (
"Unsupported lookup '0_bar' for ArrayField or join on the field not "
"permitted."
)
with self.assertRaisesMessage(FieldError, msg):
list(NullableIntegerArrayModel.objects.filter(field__0_bar=[2]))
msg = (
"Unsupported lookup '0bar' for ArrayField or join on the field not "
"permitted."
)
with self.assertRaisesMessage(FieldError, msg):
list(NullableIntegerArrayModel.objects.filter(field__0bar=[2]))
def test_grouping_by_annotations_with_array_field_param(self):
value = models.Value([1], output_field=ArrayField(models.IntegerField()))
self.assertEqual(
NullableIntegerArrayModel.objects.annotate(
array_length=models.Func(
value,
1,
function="ARRAY_LENGTH",
output_field=models.IntegerField(),
),
)
.values("array_length")
.annotate(
count=models.Count("pk"),
)
.get()["array_length"],
1,
)
def test_filter_by_array_subquery(self):
inner_qs = NullableIntegerArrayModel.objects.filter(
field__len=models.OuterRef("field__len"),
).values("field")
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.alias(
same_sized_fields=ArraySubquery(inner_qs),
).filter(same_sized_fields__len__gt=1),
self.objs[0:2],
)
def test_annotated_array_subquery(self):
inner_qs = NullableIntegerArrayModel.objects.exclude(
pk=models.OuterRef("pk")
).values("order")
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.annotate(
sibling_ids=ArraySubquery(inner_qs),
)
.get(order=1)
.sibling_ids,
[2, 3, 4, 5],
)
def test_group_by_with_annotated_array_subquery(self):
inner_qs = NullableIntegerArrayModel.objects.exclude(
pk=models.OuterRef("pk")
).values("order")
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.annotate(
sibling_ids=ArraySubquery(inner_qs),
sibling_count=models.Max("sibling_ids__len"),
).values_list("sibling_count", flat=True),
[len(self.objs) - 1] * len(self.objs),
)
def test_annotated_ordered_array_subquery(self):
inner_qs = NullableIntegerArrayModel.objects.order_by("-order").values("order")
self.assertSequenceEqual(
NullableIntegerArrayModel.objects.annotate(
ids=ArraySubquery(inner_qs),
)
.first()
.ids,
[5, 4, 3, 2, 1],
)
def test_annotated_array_subquery_with_json_objects(self):
inner_qs = NullableIntegerArrayModel.objects.exclude(
pk=models.OuterRef("pk")
).values(json=JSONObject(order="order", field="field"))
siblings_json = (
NullableIntegerArrayModel.objects.annotate(
siblings_json=ArraySubquery(inner_qs),
)
.values_list("siblings_json", flat=True)
.get(order=1)
)
self.assertSequenceEqual(
siblings_json,
[
{"field": [2], "order": 2},
{"field": [2, 3], "order": 3},
{"field": [20, 30, 40], "order": 4},
{"field": None, "order": 5},
],
)
class TestDateTimeExactQuerying(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
now = timezone.now()
cls.datetimes = [now]
cls.dates = [now.date()]
cls.times = [now.time()]
cls.objs = [
DateTimeArrayModel.objects.create(
datetimes=cls.datetimes, dates=cls.dates, times=cls.times
),
]
def test_exact_datetimes(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(datetimes=self.datetimes), self.objs
)
def test_exact_dates(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(dates=self.dates), self.objs
)
def test_exact_times(self):
self.assertSequenceEqual(
DateTimeArrayModel.objects.filter(times=self.times), self.objs
)
class TestOtherTypesExactQuerying(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
cls.ips = ["192.168.0.1", "::1"]
cls.uuids = [uuid.uuid4()]
cls.decimals = [decimal.Decimal(1.25), 1.75]
cls.tags = [Tag(1), Tag(2), Tag(3)]
cls.objs = [
OtherTypesArrayModel.objects.create(
ips=cls.ips,
uuids=cls.uuids,
decimals=cls.decimals,
tags=cls.tags,
)
]
def test_exact_ip_addresses(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(ips=self.ips), self.objs
)
def test_exact_uuids(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(uuids=self.uuids), self.objs
)
def test_exact_decimals(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(decimals=self.decimals), self.objs
)
def test_exact_tags(self):
self.assertSequenceEqual(
OtherTypesArrayModel.objects.filter(tags=self.tags), self.objs
)
@isolate_apps("postgres_tests")
class TestChecks(PostgreSQLSimpleTestCase):
def test_field_checks(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.CharField())
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
# The inner CharField is missing a max_length.
self.assertEqual(errors[0].id, "postgres.E001")
self.assertIn("max_length", errors[0].msg)
def test_invalid_base_fields(self):
class MyModel(PostgreSQLModel):
field = ArrayField(
models.ManyToManyField("postgres_tests.IntegerArrayModel")
)
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
self.assertEqual(errors[0].id, "postgres.E002")
def test_invalid_default(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.IntegerField(), default=[])
model = MyModel()
self.assertEqual(
model.check(),
[
checks.Warning(
msg=(
"ArrayField default should be a callable instead of an "
"instance so that it's not shared between all field "
"instances."
),
hint="Use a callable instead, e.g., use `list` instead of `[]`.",
obj=MyModel._meta.get_field("field"),
id="fields.E010",
)
],
)
def test_valid_default(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.IntegerField(), default=list)
model = MyModel()
self.assertEqual(model.check(), [])
def test_valid_default_none(self):
class MyModel(PostgreSQLModel):
field = ArrayField(models.IntegerField(), default=None)
model = MyModel()
self.assertEqual(model.check(), [])
def test_nested_field_checks(self):
"""
Nested ArrayFields are permitted.
"""
class MyModel(PostgreSQLModel):
field = ArrayField(ArrayField(models.CharField()))
model = MyModel()
errors = model.check()
self.assertEqual(len(errors), 1)
# The inner CharField is missing a max_length.
self.assertEqual(errors[0].id, "postgres.E001")
self.assertIn("max_length", errors[0].msg)
def test_choices_tuple_list(self):
class MyModel(PostgreSQLModel):
field = ArrayField(
models.CharField(max_length=16),
choices=[
[
"Media",
[(["vinyl", "cd"], "Audio"), (("vhs", "dvd"), "Video")],
],
(["mp3", "mp4"], "Digital"),
],
)
self.assertEqual(MyModel._meta.get_field("field").check(), [])
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific tests")
class TestMigrations(TransactionTestCase):
available_apps = ["postgres_tests"]
def test_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(type(new.base_field), type(field.base_field))
self.assertIsNot(new.base_field, field.base_field)
def test_deconstruct_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.size, field.size)
def test_deconstruct_args(self):
field = ArrayField(models.CharField(max_length=20))
name, path, args, kwargs = field.deconstruct()
new = ArrayField(*args, **kwargs)
self.assertEqual(new.base_field.max_length, field.base_field.max_length)
def test_subclass_deconstruct(self):
field = ArrayField(models.IntegerField())
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.contrib.postgres.fields.ArrayField")
field = ArrayFieldSubclass()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "postgres_tests.models.ArrayFieldSubclass")
@override_settings(
MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_default_migrations",
}
)
def test_adding_field_with_default(self):
# See #22962
table_name = "postgres_tests_integerarraydefaultmodel"
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
call_command("migrate", "postgres_tests", verbosity=0)
with connection.cursor() as cursor:
self.assertIn(table_name, connection.introspection.table_names(cursor))
call_command("migrate", "postgres_tests", "zero", verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
@override_settings(
MIGRATION_MODULES={
"postgres_tests": "postgres_tests.array_index_migrations",
}
)
def test_adding_arrayfield_with_index(self):
"""
ArrayField shouldn't have varchar_patterns_ops or text_patterns_ops indexes.
"""
table_name = "postgres_tests_chartextarrayindexmodel"
call_command("migrate", "postgres_tests", verbosity=0)
with connection.cursor() as cursor:
like_constraint_columns_list = [
v["columns"]
for k, v in list(
connection.introspection.get_constraints(cursor, table_name).items()
)
if k.endswith("_like")
]
# Only the CharField should have a LIKE index.
self.assertEqual(like_constraint_columns_list, [["char2"]])
# All fields should have regular indexes.
with connection.cursor() as cursor:
indexes = [
c["columns"][0]
for c in connection.introspection.get_constraints(
cursor, table_name
).values()
if c["index"] and len(c["columns"]) == 1
]
self.assertIn("char", indexes)
self.assertIn("char2", indexes)
self.assertIn("text", indexes)
call_command("migrate", "postgres_tests", "zero", verbosity=0)
with connection.cursor() as cursor:
self.assertNotIn(table_name, connection.introspection.table_names(cursor))
class TestSerialization(PostgreSQLSimpleTestCase):
test_data = (
'[{"fields": {"field": "[\\"1\\", \\"2\\", null]"}, '
'"model": "postgres_tests.integerarraymodel", "pk": null}]'
)
def test_dumping(self):
instance = IntegerArrayModel(field=[1, 2, None])
data = serializers.serialize("json", [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize("json", self.test_data))[0].object
self.assertEqual(instance.field, [1, 2, None])
class TestValidation(PostgreSQLSimpleTestCase):
def test_unbounded(self):
field = ArrayField(models.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, None], None)
self.assertEqual(cm.exception.code, "item_invalid")
self.assertEqual(
cm.exception.message % cm.exception.params,
"Item 2 in the array did not validate: This field cannot be null.",
)
def test_blank_true(self):
field = ArrayField(models.IntegerField(blank=True, null=True))
# This should not raise a validation error
field.clean([1, None], None)
def test_with_size(self):
field = ArrayField(models.IntegerField(), size=3)
field.clean([1, 2, 3], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([1, 2, 3, 4], None)
self.assertEqual(
cm.exception.messages[0],
"List contains 4 items, it should contain no more than 3.",
)
def test_nested_array_mismatch(self):
field = ArrayField(ArrayField(models.IntegerField()))
field.clean([[1, 2], [3, 4]], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([[1, 2], [3, 4, 5]], None)
self.assertEqual(cm.exception.code, "nested_array_mismatch")
self.assertEqual(
cm.exception.messages[0], "Nested arrays must have the same length."
)
def test_with_base_field_error_params(self):
field = ArrayField(models.CharField(max_length=2))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(["abc"], None)
self.assertEqual(len(cm.exception.error_list), 1)
exception = cm.exception.error_list[0]
self.assertEqual(
exception.message,
"Item 1 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 3).",
)
self.assertEqual(exception.code, "item_invalid")
self.assertEqual(
exception.params,
{"nth": 1, "value": "abc", "limit_value": 2, "show_value": 3},
)
def test_with_validators(self):
field = ArrayField(
models.IntegerField(validators=[validators.MinValueValidator(1)])
)
field.clean([1, 2], None)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean([0], None)
self.assertEqual(len(cm.exception.error_list), 1)
exception = cm.exception.error_list[0]
self.assertEqual(
exception.message,
"Item 1 in the array did not validate: Ensure this value is greater than "
"or equal to 1.",
)
self.assertEqual(exception.code, "item_invalid")
self.assertEqual(
exception.params, {"nth": 1, "value": 0, "limit_value": 1, "show_value": 0}
)
class TestSimpleFormField(PostgreSQLSimpleTestCase):
def test_valid(self):
field = SimpleArrayField(forms.CharField())
value = field.clean("a,b,c")
self.assertEqual(value, ["a", "b", "c"])
def test_to_python_fail(self):
field = SimpleArrayField(forms.IntegerField())
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean("a,b,9")
self.assertEqual(
cm.exception.messages[0],
"Item 1 in the array did not validate: Enter a whole number.",
)
def test_validate_fail(self):
field = SimpleArrayField(forms.CharField(required=True))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean("a,b,")
self.assertEqual(
cm.exception.messages[0],
"Item 3 in the array did not validate: This field is required.",
)
def test_validate_fail_base_field_error_params(self):
field = SimpleArrayField(forms.CharField(max_length=2))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean("abc,c,defg")
errors = cm.exception.error_list
self.assertEqual(len(errors), 2)
first_error = errors[0]
self.assertEqual(
first_error.message,
"Item 1 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 3).",
)
self.assertEqual(first_error.code, "item_invalid")
self.assertEqual(
first_error.params,
{"nth": 1, "value": "abc", "limit_value": 2, "show_value": 3},
)
second_error = errors[1]
self.assertEqual(
second_error.message,
"Item 3 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 4).",
)
self.assertEqual(second_error.code, "item_invalid")
self.assertEqual(
second_error.params,
{"nth": 3, "value": "defg", "limit_value": 2, "show_value": 4},
)
def test_validators_fail(self):
field = SimpleArrayField(forms.RegexField("[a-e]{2}"))
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean("a,bc,de")
self.assertEqual(
cm.exception.messages[0],
"Item 1 in the array did not validate: Enter a valid value.",
)
def test_delimiter(self):
field = SimpleArrayField(forms.CharField(), delimiter="|")
value = field.clean("a|b|c")
self.assertEqual(value, ["a", "b", "c"])
def test_delimiter_with_nesting(self):
field = SimpleArrayField(SimpleArrayField(forms.CharField()), delimiter="|")
value = field.clean("a,b|c,d")
self.assertEqual(value, [["a", "b"], ["c", "d"]])
def test_prepare_value(self):
field = SimpleArrayField(forms.CharField())
value = field.prepare_value(["a", "b", "c"])
self.assertEqual(value, "a,b,c")
def test_max_length(self):
field = SimpleArrayField(forms.CharField(), max_length=2)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean("a,b,c")
self.assertEqual(
cm.exception.messages[0],
"List contains 3 items, it should contain no more than 2.",
)
def test_min_length(self):
field = SimpleArrayField(forms.CharField(), min_length=4)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean("a,b,c")
self.assertEqual(
cm.exception.messages[0],
"List contains 3 items, it should contain no fewer than 4.",
)
def test_required(self):
field = SimpleArrayField(forms.CharField(), required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean("")
self.assertEqual(cm.exception.messages[0], "This field is required.")
def test_model_field_formfield(self):
model_field = ArrayField(models.CharField(max_length=27))
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertIsInstance(form_field.base_field, forms.CharField)
self.assertEqual(form_field.base_field.max_length, 27)
def test_model_field_formfield_size(self):
model_field = ArrayField(models.CharField(max_length=27), size=4)
form_field = model_field.formfield()
self.assertIsInstance(form_field, SimpleArrayField)
self.assertEqual(form_field.max_length, 4)
def test_model_field_choices(self):
model_field = ArrayField(models.IntegerField(choices=((1, "A"), (2, "B"))))
form_field = model_field.formfield()
self.assertEqual(form_field.clean("1,2"), [1, 2])
def test_already_converted_value(self):
field = SimpleArrayField(forms.CharField())
vals = ["a", "b", "c"]
self.assertEqual(field.clean(vals), vals)
def test_has_changed(self):
field = SimpleArrayField(forms.IntegerField())
self.assertIs(field.has_changed([1, 2], [1, 2]), False)
self.assertIs(field.has_changed([1, 2], "1,2"), False)
self.assertIs(field.has_changed([1, 2], "1,2,3"), True)
self.assertIs(field.has_changed([1, 2], "a,b"), True)
def test_has_changed_empty(self):
field = SimpleArrayField(forms.CharField())
self.assertIs(field.has_changed(None, None), False)
self.assertIs(field.has_changed(None, ""), False)
self.assertIs(field.has_changed(None, []), False)
self.assertIs(field.has_changed([], None), False)
self.assertIs(field.has_changed([], ""), False)
class TestSplitFormField(PostgreSQLSimpleTestCase):
def test_valid(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {"array_0": "a", "array_1": "b", "array_2": "c"}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {"array": ["a", "b", "c"]})
def test_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), required=True, size=3)
data = {"array_0": "", "array_1": "", "array_2": ""}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {"array": ["This field is required."]})
def test_remove_trailing_nulls(self):
class SplitForm(forms.Form):
array = SplitArrayField(
forms.CharField(required=False), size=5, remove_trailing_nulls=True
)
data = {
"array_0": "a",
"array_1": "",
"array_2": "b",
"array_3": "",
"array_4": "",
}
form = SplitForm(data)
self.assertTrue(form.is_valid(), form.errors)
self.assertEqual(form.cleaned_data, {"array": ["a", "", "b"]})
def test_remove_trailing_nulls_not_required(self):
class SplitForm(forms.Form):
array = SplitArrayField(
forms.CharField(required=False),
size=2,
remove_trailing_nulls=True,
required=False,
)
data = {"array_0": "", "array_1": ""}
form = SplitForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {"array": []})
def test_required_field(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
data = {"array_0": "a", "array_1": "b", "array_2": ""}
form = SplitForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
"array": [
"Item 3 in the array did not validate: This field is required."
]
},
)
def test_invalid_integer(self):
msg = (
"Item 2 in the array did not validate: Ensure this value is less than or "
"equal to 100."
)
with self.assertRaisesMessage(exceptions.ValidationError, msg):
SplitArrayField(forms.IntegerField(max_value=100), size=2).clean([0, 101])
# To locate the widget's template.
@modify_settings(INSTALLED_APPS={"append": "django.contrib.postgres"})
def test_rendering(self):
class SplitForm(forms.Form):
array = SplitArrayField(forms.CharField(), size=3)
self.assertHTMLEqual(
str(SplitForm()),
"""
<div>
<label for="id_array_0">Array:</label>
<input id="id_array_0" name="array_0" type="text" required>
<input id="id_array_1" name="array_1" type="text" required>
<input id="id_array_2" name="array_2" type="text" required>
</div>
""",
)
def test_invalid_char_length(self):
field = SplitArrayField(forms.CharField(max_length=2), size=3)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(["abc", "c", "defg"])
self.assertEqual(
cm.exception.messages,
[
"Item 1 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 3).",
"Item 3 in the array did not validate: Ensure this value has at most 2 "
"characters (it has 4).",
],
)
def test_splitarraywidget_value_omitted_from_data(self):
class Form(forms.ModelForm):
field = SplitArrayField(forms.IntegerField(), required=False, size=2)
class Meta:
model = IntegerArrayModel
fields = ("field",)
form = Form({"field_0": "1", "field_1": "2"})
self.assertEqual(form.errors, {})
obj = form.save(commit=False)
self.assertEqual(obj.field, [1, 2])
def test_splitarrayfield_has_changed(self):
class Form(forms.ModelForm):
field = SplitArrayField(forms.IntegerField(), required=False, size=2)
class Meta:
model = IntegerArrayModel
fields = ("field",)
tests = [
({}, {"field_0": "", "field_1": ""}, True),
({"field": None}, {"field_0": "", "field_1": ""}, True),
({"field": [1]}, {"field_0": "", "field_1": ""}, True),
({"field": [1]}, {"field_0": "1", "field_1": "0"}, True),
({"field": [1, 2]}, {"field_0": "1", "field_1": "2"}, False),
({"field": [1, 2]}, {"field_0": "a", "field_1": "b"}, True),
]
for initial, data, expected_result in tests:
with self.subTest(initial=initial, data=data):
obj = IntegerArrayModel(**initial)
form = Form(data, instance=obj)
self.assertIs(form.has_changed(), expected_result)
def test_splitarrayfield_remove_trailing_nulls_has_changed(self):
class Form(forms.ModelForm):
field = SplitArrayField(
forms.IntegerField(), required=False, size=2, remove_trailing_nulls=True
)
class Meta:
model = IntegerArrayModel
fields = ("field",)
tests = [
({}, {"field_0": "", "field_1": ""}, False),
({"field": None}, {"field_0": "", "field_1": ""}, False),
({"field": []}, {"field_0": "", "field_1": ""}, False),
({"field": [1]}, {"field_0": "1", "field_1": ""}, False),
]
for initial, data, expected_result in tests:
with self.subTest(initial=initial, data=data):
obj = IntegerArrayModel(**initial)
form = Form(data, instance=obj)
self.assertIs(form.has_changed(), expected_result)
class TestSplitFormWidget(PostgreSQLWidgetTestCase):
def test_get_context(self):
self.assertEqual(
SplitArrayWidget(forms.TextInput(), size=2).get_context(
"name", ["val1", "val2"]
),
{
"widget": {
"name": "name",
"is_hidden": False,
"required": False,
"value": "['val1', 'val2']",
"attrs": {},
"template_name": "postgres/widgets/split_array.html",
"subwidgets": [
{
"name": "name_0",
"is_hidden": False,
"required": False,
"value": "val1",
"attrs": {},
"template_name": "django/forms/widgets/text.html",
"type": "text",
},
{
"name": "name_1",
"is_hidden": False,
"required": False,
"value": "val2",
"attrs": {},
"template_name": "django/forms/widgets/text.html",
"type": "text",
},
],
}
},
)
def test_checkbox_get_context_attrs(self):
context = SplitArrayWidget(
forms.CheckboxInput(),
size=2,
).get_context("name", [True, False])
self.assertEqual(context["widget"]["value"], "[True, False]")
self.assertEqual(
[subwidget["attrs"] for subwidget in context["widget"]["subwidgets"]],
[{"checked": True}, {}],
)
def test_render(self):
self.check_html(
SplitArrayWidget(forms.TextInput(), size=2),
"array",
None,
"""
<input name="array_0" type="text">
<input name="array_1" type="text">
""",
)
def test_render_attrs(self):
self.check_html(
SplitArrayWidget(forms.TextInput(), size=2),
"array",
["val1", "val2"],
attrs={"id": "foo"},
html=(
"""
<input id="foo_0" name="array_0" type="text" value="val1">
<input id="foo_1" name="array_1" type="text" value="val2">
"""
),
)
def test_value_omitted_from_data(self):
widget = SplitArrayWidget(forms.TextInput(), size=2)
self.assertIs(widget.value_omitted_from_data({}, {}, "field"), True)
self.assertIs(
widget.value_omitted_from_data({"field_0": "value"}, {}, "field"), False
)
self.assertIs(
widget.value_omitted_from_data({"field_1": "value"}, {}, "field"), False
)
self.assertIs(
widget.value_omitted_from_data(
{"field_0": "value", "field_1": "value"}, {}, "field"
),
False,
)
class TestAdminUtils(PostgreSQLTestCase):
empty_value = "-empty-"
def test_array_display_for_field(self):
array_field = ArrayField(models.IntegerField())
display_value = display_for_field(
[1, 2],
array_field,
self.empty_value,
)
self.assertEqual(display_value, "1, 2")
def test_array_with_choices_display_for_field(self):
array_field = ArrayField(
models.IntegerField(),
choices=[
([1, 2, 3], "1st choice"),
([1, 2], "2nd choice"),
],
)
display_value = display_for_field(
[1, 2],
array_field,
self.empty_value,
)
self.assertEqual(display_value, "2nd choice")
display_value = display_for_field(
[99, 99],
array_field,
self.empty_value,
)
self.assertEqual(display_value, self.empty_value)
|
5f85d477ab348afcede79d21a7cb0b89f07efd9fbbedfc65335ce8ae3d483c61 | import datetime
from unittest import mock
from django.contrib.postgres.indexes import OpClass
from django.core.exceptions import ValidationError
from django.db import IntegrityError, NotSupportedError, connection, transaction
from django.db.models import (
CheckConstraint,
Deferrable,
F,
Func,
IntegerField,
Model,
Q,
UniqueConstraint,
)
from django.db.models.fields.json import KeyTextTransform
from django.db.models.functions import Cast, Left, Lower
from django.test import ignore_warnings, modify_settings, skipUnlessDBFeature
from django.test.utils import isolate_apps
from django.utils import timezone
from django.utils.deprecation import RemovedInDjango50Warning
from . import PostgreSQLTestCase
from .models import HotelReservation, IntegerArrayModel, RangesModel, Room, Scene
try:
from psycopg2.extras import DateRange, NumericRange
from django.contrib.postgres.constraints import ExclusionConstraint
from django.contrib.postgres.fields import (
DateTimeRangeField,
RangeBoundary,
RangeOperators,
)
except ImportError:
pass
@modify_settings(INSTALLED_APPS={"append": "django.contrib.postgres"})
class SchemaTests(PostgreSQLTestCase):
get_opclass_query = """
SELECT opcname, c.relname FROM pg_opclass AS oc
JOIN pg_index as i on oc.oid = ANY(i.indclass)
JOIN pg_class as c on c.oid = i.indexrelid
WHERE c.relname = %s
"""
def get_constraints(self, table):
"""Get the constraints on the table using a new cursor."""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def test_check_constraint_range_value(self):
constraint_name = "ints_between"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = CheckConstraint(
check=Q(ints__contained_by=NumericRange(10, 30)),
name=constraint_name,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(20, 50))
RangesModel.objects.create(ints=(10, 30))
def test_check_constraint_array_contains(self):
constraint = CheckConstraint(
check=Q(field__contains=[1]),
name="array_contains",
)
msg = f"Constraint “{constraint.name}” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(IntegerArrayModel, IntegerArrayModel())
constraint.validate(IntegerArrayModel, IntegerArrayModel(field=[1]))
def test_check_constraint_daterange_contains(self):
constraint_name = "dates_contains"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = CheckConstraint(
check=Q(dates__contains=F("dates_inner")),
name=constraint_name,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
date_1 = datetime.date(2016, 1, 1)
date_2 = datetime.date(2016, 1, 4)
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(
dates=(date_1, date_2),
dates_inner=(date_1, date_2.replace(day=5)),
)
RangesModel.objects.create(
dates=(date_1, date_2),
dates_inner=(date_1, date_2),
)
def test_check_constraint_datetimerange_contains(self):
constraint_name = "timestamps_contains"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = CheckConstraint(
check=Q(timestamps__contains=F("timestamps_inner")),
name=constraint_name,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
datetime_1 = datetime.datetime(2016, 1, 1)
datetime_2 = datetime.datetime(2016, 1, 2, 12)
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(
timestamps=(datetime_1, datetime_2),
timestamps_inner=(datetime_1, datetime_2.replace(hour=13)),
)
RangesModel.objects.create(
timestamps=(datetime_1, datetime_2),
timestamps_inner=(datetime_1, datetime_2),
)
def test_check_constraint_range_contains(self):
constraint = CheckConstraint(
check=Q(ints__contains=(1, 5)),
name="ints_contains",
)
msg = f"Constraint “{constraint.name}” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(RangesModel, RangesModel(ints=(6, 10)))
def test_check_constraint_range_lower_upper(self):
constraint = CheckConstraint(
check=Q(ints__startswith__gte=0) & Q(ints__endswith__lte=99),
name="ints_range_lower_upper",
)
msg = f"Constraint “{constraint.name}” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(RangesModel, RangesModel(ints=(-1, 20)))
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(RangesModel, RangesModel(ints=(0, 100)))
constraint.validate(RangesModel, RangesModel(ints=(0, 99)))
def test_check_constraint_range_lower_with_nulls(self):
constraint = CheckConstraint(
check=Q(ints__isnull=True) | Q(ints__startswith__gte=0),
name="ints_optional_positive_range",
)
constraint.validate(RangesModel, RangesModel())
constraint = CheckConstraint(
check=Q(ints__startswith__gte=0),
name="ints_positive_range",
)
constraint.validate(RangesModel, RangesModel())
def test_opclass(self):
constraint = UniqueConstraint(
name="test_opclass",
fields=["scene"],
opclasses=["varchar_pattern_ops"],
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
self.assertIn(constraint.name, self.get_constraints(Scene._meta.db_table))
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertEqual(
cursor.fetchall(),
[("varchar_pattern_ops", constraint.name)],
)
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Scene, constraint)
self.assertNotIn(constraint.name, self.get_constraints(Scene._meta.db_table))
def test_opclass_multiple_columns(self):
constraint = UniqueConstraint(
name="test_opclass_multiple",
fields=["scene", "setting"],
opclasses=["varchar_pattern_ops", "text_pattern_ops"],
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
expected_opclasses = (
("varchar_pattern_ops", constraint.name),
("text_pattern_ops", constraint.name),
)
self.assertCountEqual(cursor.fetchall(), expected_opclasses)
def test_opclass_partial(self):
constraint = UniqueConstraint(
name="test_opclass_partial",
fields=["scene"],
opclasses=["varchar_pattern_ops"],
condition=Q(setting__contains="Sir Bedemir's Castle"),
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertCountEqual(
cursor.fetchall(),
[("varchar_pattern_ops", constraint.name)],
)
@skipUnlessDBFeature("supports_covering_indexes")
def test_opclass_include(self):
constraint = UniqueConstraint(
name="test_opclass_include",
fields=["scene"],
opclasses=["varchar_pattern_ops"],
include=["setting"],
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertCountEqual(
cursor.fetchall(),
[("varchar_pattern_ops", constraint.name)],
)
@skipUnlessDBFeature("supports_expression_indexes")
def test_opclass_func(self):
constraint = UniqueConstraint(
OpClass(Lower("scene"), name="text_pattern_ops"),
name="test_opclass_func",
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
constraints = self.get_constraints(Scene._meta.db_table)
self.assertIs(constraints[constraint.name]["unique"], True)
self.assertIn(constraint.name, constraints)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query, [constraint.name])
self.assertEqual(
cursor.fetchall(),
[("text_pattern_ops", constraint.name)],
)
Scene.objects.create(scene="Scene 10", setting="The dark forest of Ewing")
with self.assertRaises(IntegrityError), transaction.atomic():
Scene.objects.create(scene="ScEnE 10", setting="Sir Bedemir's Castle")
Scene.objects.create(scene="Scene 5", setting="Sir Bedemir's Castle")
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(Scene, constraint)
self.assertNotIn(constraint.name, self.get_constraints(Scene._meta.db_table))
Scene.objects.create(scene="ScEnE 10", setting="Sir Bedemir's Castle")
@modify_settings(INSTALLED_APPS={"append": "django.contrib.postgres"})
class ExclusionConstraintTests(PostgreSQLTestCase):
def get_constraints(self, table):
"""Get the constraints on the table using a new cursor."""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def test_invalid_condition(self):
msg = "ExclusionConstraint.condition must be a Q instance."
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
index_type="GIST",
name="exclude_invalid_condition",
expressions=[(F("datespan"), RangeOperators.OVERLAPS)],
condition=F("invalid"),
)
def test_invalid_index_type(self):
msg = "Exclusion constraints only support GiST or SP-GiST indexes."
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
index_type="gin",
name="exclude_invalid_index_type",
expressions=[(F("datespan"), RangeOperators.OVERLAPS)],
)
def test_invalid_expressions(self):
msg = "The expressions must be a list of 2-tuples."
for expressions in (["foo"], [("foo")], [("foo_1", "foo_2", "foo_3")]):
with self.subTest(expressions), self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
index_type="GIST",
name="exclude_invalid_expressions",
expressions=expressions,
)
def test_empty_expressions(self):
msg = "At least one expression is required to define an exclusion constraint."
for empty_expressions in (None, []):
with self.subTest(empty_expressions), self.assertRaisesMessage(
ValueError, msg
):
ExclusionConstraint(
index_type="GIST",
name="exclude_empty_expressions",
expressions=empty_expressions,
)
def test_invalid_deferrable(self):
msg = "ExclusionConstraint.deferrable must be a Deferrable instance."
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name="exclude_invalid_deferrable",
expressions=[(F("datespan"), RangeOperators.OVERLAPS)],
deferrable="invalid",
)
def test_invalid_include_type(self):
msg = "ExclusionConstraint.include must be a list or tuple."
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name="exclude_invalid_include",
expressions=[(F("datespan"), RangeOperators.OVERLAPS)],
include="invalid",
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_invalid_opclasses_type(self):
msg = "ExclusionConstraint.opclasses must be a list or tuple."
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name="exclude_invalid_opclasses",
expressions=[(F("datespan"), RangeOperators.OVERLAPS)],
opclasses="invalid",
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_opclasses_and_expressions_same_length(self):
msg = (
"ExclusionConstraint.expressions and "
"ExclusionConstraint.opclasses must have the same number of "
"elements."
)
with self.assertRaisesMessage(ValueError, msg):
ExclusionConstraint(
name="exclude_invalid_expressions_opclasses_length",
expressions=[(F("datespan"), RangeOperators.OVERLAPS)],
opclasses=["foo", "bar"],
)
def test_repr(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
(F("datespan"), RangeOperators.OVERLAPS),
(F("room"), RangeOperators.EQUAL),
],
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='GIST' expressions=["
"(F(datespan), '&&'), (F(room), '=')] name='exclude_overlapping'>",
)
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[(F("datespan"), RangeOperators.ADJACENT_TO)],
condition=Q(cancelled=False),
index_type="SPGiST",
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='SPGiST' expressions=["
"(F(datespan), '-|-')] name='exclude_overlapping' "
"condition=(AND: ('cancelled', False))>",
)
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[(F("datespan"), RangeOperators.ADJACENT_TO)],
deferrable=Deferrable.IMMEDIATE,
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='GIST' expressions=["
"(F(datespan), '-|-')] name='exclude_overlapping' "
"deferrable=Deferrable.IMMEDIATE>",
)
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[(F("datespan"), RangeOperators.ADJACENT_TO)],
include=["cancelled", "room"],
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='GIST' expressions=["
"(F(datespan), '-|-')] name='exclude_overlapping' "
"include=('cancelled', 'room')>",
)
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
(OpClass("datespan", name="range_ops"), RangeOperators.ADJACENT_TO),
],
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='GIST' expressions=["
"(OpClass(F(datespan), name=range_ops), '-|-')] "
"name='exclude_overlapping'>",
)
def test_eq(self):
constraint_1 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
(F("datespan"), RangeOperators.OVERLAPS),
(F("room"), RangeOperators.EQUAL),
],
condition=Q(cancelled=False),
)
constraint_2 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
)
constraint_3 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[("datespan", RangeOperators.OVERLAPS)],
condition=Q(cancelled=False),
)
constraint_4 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
deferrable=Deferrable.DEFERRED,
)
constraint_5 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
deferrable=Deferrable.IMMEDIATE,
)
constraint_6 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
deferrable=Deferrable.IMMEDIATE,
include=["cancelled"],
)
constraint_7 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
include=["cancelled"],
)
with ignore_warnings(category=RemovedInDjango50Warning):
constraint_8 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
include=["cancelled"],
opclasses=["range_ops", "range_ops"],
)
constraint_9 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
opclasses=["range_ops", "range_ops"],
)
self.assertNotEqual(constraint_2, constraint_9)
self.assertNotEqual(constraint_7, constraint_8)
constraint_10 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
(F("datespan"), RangeOperators.OVERLAPS),
(F("room"), RangeOperators.EQUAL),
],
condition=Q(cancelled=False),
violation_error_message="custom error",
)
constraint_11 = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
(F("datespan"), RangeOperators.OVERLAPS),
(F("room"), RangeOperators.EQUAL),
],
condition=Q(cancelled=False),
violation_error_message="other custom error",
)
self.assertEqual(constraint_1, constraint_1)
self.assertEqual(constraint_1, mock.ANY)
self.assertNotEqual(constraint_1, constraint_2)
self.assertNotEqual(constraint_1, constraint_3)
self.assertNotEqual(constraint_1, constraint_4)
self.assertNotEqual(constraint_1, constraint_10)
self.assertNotEqual(constraint_2, constraint_3)
self.assertNotEqual(constraint_2, constraint_4)
self.assertNotEqual(constraint_2, constraint_7)
self.assertNotEqual(constraint_4, constraint_5)
self.assertNotEqual(constraint_5, constraint_6)
self.assertNotEqual(constraint_1, object())
self.assertNotEqual(constraint_10, constraint_11)
self.assertEqual(constraint_10, constraint_10)
def test_deconstruct(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(
path, "django.contrib.postgres.constraints.ExclusionConstraint"
)
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"name": "exclude_overlapping",
"expressions": [
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
},
)
def test_deconstruct_index_type(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
index_type="SPGIST",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(
path, "django.contrib.postgres.constraints.ExclusionConstraint"
)
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"name": "exclude_overlapping",
"index_type": "SPGIST",
"expressions": [
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
},
)
def test_deconstruct_condition(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
condition=Q(cancelled=False),
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(
path, "django.contrib.postgres.constraints.ExclusionConstraint"
)
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"name": "exclude_overlapping",
"expressions": [
("datespan", RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
"condition": Q(cancelled=False),
},
)
def test_deconstruct_deferrable(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[("datespan", RangeOperators.OVERLAPS)],
deferrable=Deferrable.DEFERRED,
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(
path, "django.contrib.postgres.constraints.ExclusionConstraint"
)
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"name": "exclude_overlapping",
"expressions": [("datespan", RangeOperators.OVERLAPS)],
"deferrable": Deferrable.DEFERRED,
},
)
def test_deconstruct_include(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[("datespan", RangeOperators.OVERLAPS)],
include=["cancelled", "room"],
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(
path, "django.contrib.postgres.constraints.ExclusionConstraint"
)
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"name": "exclude_overlapping",
"expressions": [("datespan", RangeOperators.OVERLAPS)],
"include": ("cancelled", "room"),
},
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_deconstruct_opclasses(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[("datespan", RangeOperators.OVERLAPS)],
opclasses=["range_ops"],
)
path, args, kwargs = constraint.deconstruct()
self.assertEqual(
path, "django.contrib.postgres.constraints.ExclusionConstraint"
)
self.assertEqual(args, ())
self.assertEqual(
kwargs,
{
"name": "exclude_overlapping",
"expressions": [("datespan", RangeOperators.OVERLAPS)],
"opclasses": ["range_ops"],
},
)
def _test_range_overlaps(self, constraint):
# Create exclusion constraint.
self.assertNotIn(
constraint.name, self.get_constraints(HotelReservation._meta.db_table)
)
with connection.schema_editor() as editor:
editor.add_constraint(HotelReservation, constraint)
self.assertIn(
constraint.name, self.get_constraints(HotelReservation._meta.db_table)
)
# Add initial reservations.
room101 = Room.objects.create(number=101)
room102 = Room.objects.create(number=102)
datetimes = [
timezone.datetime(2018, 6, 20),
timezone.datetime(2018, 6, 24),
timezone.datetime(2018, 6, 26),
timezone.datetime(2018, 6, 28),
timezone.datetime(2018, 6, 29),
]
reservation = HotelReservation.objects.create(
datespan=DateRange(datetimes[0].date(), datetimes[1].date()),
start=datetimes[0],
end=datetimes[1],
room=room102,
)
constraint.validate(HotelReservation, reservation)
HotelReservation.objects.create(
datespan=DateRange(datetimes[1].date(), datetimes[3].date()),
start=datetimes[1],
end=datetimes[3],
room=room102,
)
HotelReservation.objects.create(
datespan=DateRange(datetimes[3].date(), datetimes[4].date()),
start=datetimes[3],
end=datetimes[4],
room=room102,
cancelled=True,
)
# Overlap dates.
with self.assertRaises(IntegrityError), transaction.atomic():
reservation = HotelReservation(
datespan=(datetimes[1].date(), datetimes[2].date()),
start=datetimes[1],
end=datetimes[2],
room=room102,
)
msg = f"Constraint “{constraint.name}” is violated."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(HotelReservation, reservation)
reservation.save()
# Valid range.
other_valid_reservations = [
# Other room.
HotelReservation(
datespan=(datetimes[1].date(), datetimes[2].date()),
start=datetimes[1],
end=datetimes[2],
room=room101,
),
# Cancelled reservation.
HotelReservation(
datespan=(datetimes[1].date(), datetimes[1].date()),
start=datetimes[1],
end=datetimes[2],
room=room102,
cancelled=True,
),
# Other adjacent dates.
HotelReservation(
datespan=(datetimes[3].date(), datetimes[4].date()),
start=datetimes[3],
end=datetimes[4],
room=room102,
),
]
for reservation in other_valid_reservations:
constraint.validate(HotelReservation, reservation)
HotelReservation.objects.bulk_create(other_valid_reservations)
# Excluded fields.
constraint.validate(
HotelReservation,
HotelReservation(
datespan=(datetimes[1].date(), datetimes[2].date()),
start=datetimes[1],
end=datetimes[2],
room=room102,
),
exclude={"room"},
)
constraint.validate(
HotelReservation,
HotelReservation(
datespan=(datetimes[1].date(), datetimes[2].date()),
start=datetimes[1],
end=datetimes[2],
room=room102,
),
exclude={"datespan", "start", "end", "room"},
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_range_overlaps_custom_opclasses(self):
class TsTzRange(Func):
function = "TSTZRANGE"
output_field = DateTimeRangeField()
constraint = ExclusionConstraint(
name="exclude_overlapping_reservations_custom",
expressions=[
(TsTzRange("start", "end", RangeBoundary()), RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
condition=Q(cancelled=False),
opclasses=["range_ops", "gist_int4_ops"],
)
self._test_range_overlaps(constraint)
def test_range_overlaps_custom(self):
class TsTzRange(Func):
function = "TSTZRANGE"
output_field = DateTimeRangeField()
constraint = ExclusionConstraint(
name="exclude_overlapping_reservations_custom_opclass",
expressions=[
(
OpClass(TsTzRange("start", "end", RangeBoundary()), "range_ops"),
RangeOperators.OVERLAPS,
),
(OpClass("room", "gist_int4_ops"), RangeOperators.EQUAL),
],
condition=Q(cancelled=False),
)
self._test_range_overlaps(constraint)
def test_range_overlaps(self):
constraint = ExclusionConstraint(
name="exclude_overlapping_reservations",
expressions=[
(F("datespan"), RangeOperators.OVERLAPS),
("room", RangeOperators.EQUAL),
],
condition=Q(cancelled=False),
)
self._test_range_overlaps(constraint)
def test_range_adjacent(self):
constraint_name = "ints_adjacent"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
RangesModel.objects.create(ints=(20, 50))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(10, 20))
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(RangesModel, constraint)
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
def test_validate_range_adjacent(self):
constraint = ExclusionConstraint(
name="ints_adjacent",
expressions=[("ints", RangeOperators.ADJACENT_TO)],
violation_error_message="Custom error message.",
)
range_obj = RangesModel.objects.create(ints=(20, 50))
constraint.validate(RangesModel, range_obj)
msg = "Custom error message."
with self.assertRaisesMessage(ValidationError, msg):
constraint.validate(RangesModel, RangesModel(ints=(10, 20)))
constraint.validate(RangesModel, RangesModel(ints=(10, 19)))
constraint.validate(RangesModel, RangesModel(ints=(51, 60)))
constraint.validate(RangesModel, RangesModel(ints=(10, 20)), exclude={"ints"})
def test_expressions_with_params(self):
constraint_name = "scene_left_equal"
self.assertNotIn(constraint_name, self.get_constraints(Scene._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[(Left("scene", 4), RangeOperators.EQUAL)],
)
with connection.schema_editor() as editor:
editor.add_constraint(Scene, constraint)
self.assertIn(constraint_name, self.get_constraints(Scene._meta.db_table))
def test_expressions_with_key_transform(self):
constraint_name = "exclude_overlapping_reservations_smoking"
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[
(F("datespan"), RangeOperators.OVERLAPS),
(KeyTextTransform("smoking", "requirements"), RangeOperators.EQUAL),
],
)
with connection.schema_editor() as editor:
editor.add_constraint(HotelReservation, constraint)
self.assertIn(
constraint_name,
self.get_constraints(HotelReservation._meta.db_table),
)
def test_index_transform(self):
constraint_name = "first_index_equal"
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("field__0", RangeOperators.EQUAL)],
)
with connection.schema_editor() as editor:
editor.add_constraint(IntegerArrayModel, constraint)
self.assertIn(
constraint_name,
self.get_constraints(IntegerArrayModel._meta.db_table),
)
def test_range_adjacent_initially_deferred(self):
constraint_name = "ints_adjacent_deferred"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
deferrable=Deferrable.DEFERRED,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
RangesModel.objects.create(ints=(20, 50))
adjacent_range = RangesModel.objects.create(ints=(10, 20))
# Constraint behavior can be changed with SET CONSTRAINTS.
with self.assertRaises(IntegrityError):
with transaction.atomic(), connection.cursor() as cursor:
quoted_name = connection.ops.quote_name(constraint_name)
cursor.execute("SET CONSTRAINTS %s IMMEDIATE" % quoted_name)
# Remove adjacent range before the end of transaction.
adjacent_range.delete()
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
def test_range_adjacent_initially_deferred_with_condition(self):
constraint_name = "ints_adjacent_deferred_with_condition"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
condition=Q(ints__lt=(100, 200)),
deferrable=Deferrable.DEFERRED,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
RangesModel.objects.create(ints=(20, 50))
adjacent_range = RangesModel.objects.create(ints=(10, 20))
# Constraint behavior can be changed with SET CONSTRAINTS.
with self.assertRaises(IntegrityError):
with transaction.atomic(), connection.cursor() as cursor:
quoted_name = connection.ops.quote_name(constraint_name)
cursor.execute(f"SET CONSTRAINTS {quoted_name} IMMEDIATE")
# Remove adjacent range before the end of transaction.
adjacent_range.delete()
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
# Add adjacent range that doesn't match the condition.
RangesModel.objects.create(ints=(200, 500))
adjacent_range = RangesModel.objects.create(ints=(100, 200))
# Constraint behavior can be changed with SET CONSTRAINTS.
with transaction.atomic(), connection.cursor() as cursor:
quoted_name = connection.ops.quote_name(constraint_name)
cursor.execute(f"SET CONSTRAINTS {quoted_name} IMMEDIATE")
def test_range_adjacent_gist_include(self):
constraint_name = "ints_adjacent_gist_include"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="gist",
include=["decimals", "ints"],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
RangesModel.objects.create(ints=(20, 50))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(10, 20))
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
@skipUnlessDBFeature("supports_covering_spgist_indexes")
def test_range_adjacent_spgist_include(self):
constraint_name = "ints_adjacent_spgist_include"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="spgist",
include=["decimals", "ints"],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
RangesModel.objects.create(ints=(20, 50))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(10, 20))
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
def test_range_adjacent_gist_include_condition(self):
constraint_name = "ints_adjacent_gist_include_condition"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="gist",
include=["decimals"],
condition=Q(id__gte=100),
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@skipUnlessDBFeature("supports_covering_spgist_indexes")
def test_range_adjacent_spgist_include_condition(self):
constraint_name = "ints_adjacent_spgist_include_condition"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="spgist",
include=["decimals"],
condition=Q(id__gte=100),
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_range_adjacent_gist_include_deferrable(self):
constraint_name = "ints_adjacent_gist_include_deferrable"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="gist",
include=["decimals"],
deferrable=Deferrable.DEFERRED,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@skipUnlessDBFeature("supports_covering_spgist_indexes")
def test_range_adjacent_spgist_include_deferrable(self):
constraint_name = "ints_adjacent_spgist_include_deferrable"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="spgist",
include=["decimals"],
deferrable=Deferrable.DEFERRED,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_spgist_include_not_supported(self):
constraint_name = "ints_adjacent_spgist_include_not_supported"
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="spgist",
include=["id"],
)
msg = (
"Covering exclusion constraints using an SP-GiST index require "
"PostgreSQL 14+."
)
with connection.schema_editor() as editor:
with mock.patch(
"django.db.backends.postgresql.features.DatabaseFeatures."
"supports_covering_spgist_indexes",
False,
):
with self.assertRaisesMessage(NotSupportedError, msg):
editor.add_constraint(RangesModel, constraint)
def test_range_adjacent_opclass(self):
constraint_name = "ints_adjacent_opclass"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[
(OpClass("ints", name="range_ops"), RangeOperators.ADJACENT_TO),
],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
constraints = self.get_constraints(RangesModel._meta.db_table)
self.assertIn(constraint_name, constraints)
with editor.connection.cursor() as cursor:
cursor.execute(SchemaTests.get_opclass_query, [constraint_name])
self.assertEqual(
cursor.fetchall(),
[("range_ops", constraint_name)],
)
RangesModel.objects.create(ints=(20, 50))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(10, 20))
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(RangesModel, constraint)
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
def test_range_adjacent_opclass_condition(self):
constraint_name = "ints_adjacent_opclass_condition"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[
(OpClass("ints", name="range_ops"), RangeOperators.ADJACENT_TO),
],
condition=Q(id__gte=100),
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_range_adjacent_opclass_deferrable(self):
constraint_name = "ints_adjacent_opclass_deferrable"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[
(OpClass("ints", name="range_ops"), RangeOperators.ADJACENT_TO),
],
deferrable=Deferrable.DEFERRED,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_range_adjacent_gist_opclass_include(self):
constraint_name = "ints_adjacent_gist_opclass_include"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[
(OpClass("ints", name="range_ops"), RangeOperators.ADJACENT_TO),
],
index_type="gist",
include=["decimals"],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@skipUnlessDBFeature("supports_covering_spgist_indexes")
def test_range_adjacent_spgist_opclass_include(self):
constraint_name = "ints_adjacent_spgist_opclass_include"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[
(OpClass("ints", name="range_ops"), RangeOperators.ADJACENT_TO),
],
index_type="spgist",
include=["decimals"],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
def test_range_equal_cast(self):
constraint_name = "exclusion_equal_room_cast"
self.assertNotIn(constraint_name, self.get_constraints(Room._meta.db_table))
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[(Cast("number", IntegerField()), RangeOperators.EQUAL)],
)
with connection.schema_editor() as editor:
editor.add_constraint(Room, constraint)
self.assertIn(constraint_name, self.get_constraints(Room._meta.db_table))
@isolate_apps("postgres_tests")
def test_table_create(self):
constraint_name = "exclusion_equal_number_tc"
class ModelWithExclusionConstraint(Model):
number = IntegerField()
class Meta:
app_label = "postgres_tests"
constraints = [
ExclusionConstraint(
name=constraint_name,
expressions=[("number", RangeOperators.EQUAL)],
)
]
with connection.schema_editor() as editor:
editor.create_model(ModelWithExclusionConstraint)
self.assertIn(
constraint_name,
self.get_constraints(ModelWithExclusionConstraint._meta.db_table),
)
@modify_settings(INSTALLED_APPS={"append": "django.contrib.postgres"})
class ExclusionConstraintOpclassesDepracationTests(PostgreSQLTestCase):
def get_constraints(self, table):
"""Get the constraints on the table using a new cursor."""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
def test_warning(self):
msg = (
"The opclasses argument is deprecated in favor of using "
"django.contrib.postgres.indexes.OpClass in "
"ExclusionConstraint.expressions."
)
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
ExclusionConstraint(
name="exclude_overlapping",
expressions=[(F("datespan"), RangeOperators.ADJACENT_TO)],
opclasses=["range_ops"],
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_repr(self):
constraint = ExclusionConstraint(
name="exclude_overlapping",
expressions=[(F("datespan"), RangeOperators.ADJACENT_TO)],
opclasses=["range_ops"],
)
self.assertEqual(
repr(constraint),
"<ExclusionConstraint: index_type='GIST' expressions=["
"(F(datespan), '-|-')] name='exclude_overlapping' "
"opclasses=['range_ops']>",
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_range_adjacent_opclasses(self):
constraint_name = "ints_adjacent_opclasses"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
opclasses=["range_ops"],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
constraints = self.get_constraints(RangesModel._meta.db_table)
self.assertIn(constraint_name, constraints)
with editor.connection.cursor() as cursor:
cursor.execute(SchemaTests.get_opclass_query, [constraint.name])
self.assertEqual(
cursor.fetchall(),
[("range_ops", constraint.name)],
)
RangesModel.objects.create(ints=(20, 50))
with self.assertRaises(IntegrityError), transaction.atomic():
RangesModel.objects.create(ints=(10, 20))
RangesModel.objects.create(ints=(10, 19))
RangesModel.objects.create(ints=(51, 60))
# Drop the constraint.
with connection.schema_editor() as editor:
editor.remove_constraint(RangesModel, constraint)
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_range_adjacent_opclasses_condition(self):
constraint_name = "ints_adjacent_opclasses_condition"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
opclasses=["range_ops"],
condition=Q(id__gte=100),
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@ignore_warnings(category=RemovedInDjango50Warning)
def test_range_adjacent_opclasses_deferrable(self):
constraint_name = "ints_adjacent_opclasses_deferrable"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
opclasses=["range_ops"],
deferrable=Deferrable.DEFERRED,
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@ignore_warnings(category=RemovedInDjango50Warning)
def test_range_adjacent_gist_opclasses_include(self):
constraint_name = "ints_adjacent_gist_opclasses_include"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="gist",
opclasses=["range_ops"],
include=["decimals"],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
@ignore_warnings(category=RemovedInDjango50Warning)
@skipUnlessDBFeature("supports_covering_spgist_indexes")
def test_range_adjacent_spgist_opclasses_include(self):
constraint_name = "ints_adjacent_spgist_opclasses_include"
self.assertNotIn(
constraint_name, self.get_constraints(RangesModel._meta.db_table)
)
constraint = ExclusionConstraint(
name=constraint_name,
expressions=[("ints", RangeOperators.ADJACENT_TO)],
index_type="spgist",
opclasses=["range_ops"],
include=["decimals"],
)
with connection.schema_editor() as editor:
editor.add_constraint(RangesModel, constraint)
self.assertIn(constraint_name, self.get_constraints(RangesModel._meta.db_table))
|
a21e379485acee511912ac38f98598ce84872f0ca79218b052ae42b5d780fcb5 | import gzip
import random
import re
import struct
from io import BytesIO
from urllib.parse import quote
from django.conf import settings
from django.core import mail
from django.core.exceptions import PermissionDenied
from django.http import (
FileResponse,
HttpRequest,
HttpResponse,
HttpResponseNotFound,
HttpResponsePermanentRedirect,
HttpResponseRedirect,
StreamingHttpResponse,
)
from django.middleware.clickjacking import XFrameOptionsMiddleware
from django.middleware.common import BrokenLinkEmailsMiddleware, CommonMiddleware
from django.middleware.gzip import GZipMiddleware
from django.middleware.http import ConditionalGetMiddleware
from django.test import RequestFactory, SimpleTestCase, override_settings
int2byte = struct.Struct(">B").pack
def get_response_empty(request):
return HttpResponse()
def get_response_404(request):
return HttpResponseNotFound()
@override_settings(ROOT_URLCONF="middleware.urls")
class CommonMiddlewareTest(SimpleTestCase):
rf = RequestFactory()
@override_settings(APPEND_SLASH=True)
def test_append_slash_have_slash(self):
"""
URLs with slashes should go unmolested.
"""
request = self.rf.get("/slash/")
self.assertIsNone(CommonMiddleware(get_response_404).process_request(request))
self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_resource(self):
"""
Matches to explicit slashless URLs should go unmolested.
"""
def get_response(req):
return HttpResponse("Here's the text of the web page.")
request = self.rf.get("/noslash")
self.assertIsNone(CommonMiddleware(get_response).process_request(request))
self.assertEqual(
CommonMiddleware(get_response)(request).content,
b"Here's the text of the web page.",
)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_unknown(self):
"""
APPEND_SLASH should not redirect to unknown resources.
"""
request = self.rf.get("/unknown")
response = CommonMiddleware(get_response_404)(request)
self.assertEqual(response.status_code, 404)
@override_settings(APPEND_SLASH=True)
def test_append_slash_redirect(self):
"""
APPEND_SLASH should redirect slashless URLs to a valid pattern.
"""
request = self.rf.get("/slash")
r = CommonMiddleware(get_response_empty).process_request(request)
self.assertIsNone(r)
response = HttpResponseNotFound()
r = CommonMiddleware(get_response_empty).process_response(request, response)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, "/slash/")
@override_settings(APPEND_SLASH=True)
def test_append_slash_redirect_querystring(self):
"""
APPEND_SLASH should preserve querystrings when redirecting.
"""
request = self.rf.get("/slash?test=1")
resp = CommonMiddleware(get_response_404)(request)
self.assertEqual(resp.url, "/slash/?test=1")
@override_settings(APPEND_SLASH=True)
def test_append_slash_redirect_querystring_have_slash(self):
"""
APPEND_SLASH should append slash to path when redirecting a request
with a querystring ending with slash.
"""
request = self.rf.get("/slash?test=slash/")
resp = CommonMiddleware(get_response_404)(request)
self.assertIsInstance(resp, HttpResponsePermanentRedirect)
self.assertEqual(resp.url, "/slash/?test=slash/")
@override_settings(APPEND_SLASH=True, DEBUG=True)
def test_append_slash_no_redirect_on_POST_in_DEBUG(self):
"""
While in debug mode, an exception is raised with a warning
when a failed attempt is made to POST, PUT, or PATCH to an URL which
would normally be redirected to a slashed version.
"""
msg = "maintaining %s data. Change your form to point to testserver/slash/"
request = self.rf.get("/slash")
request.method = "POST"
with self.assertRaisesMessage(RuntimeError, msg % request.method):
CommonMiddleware(get_response_404)(request)
request = self.rf.get("/slash")
request.method = "PUT"
with self.assertRaisesMessage(RuntimeError, msg % request.method):
CommonMiddleware(get_response_404)(request)
request = self.rf.get("/slash")
request.method = "PATCH"
with self.assertRaisesMessage(RuntimeError, msg % request.method):
CommonMiddleware(get_response_404)(request)
@override_settings(APPEND_SLASH=False)
def test_append_slash_disabled(self):
"""
Disabling append slash functionality should leave slashless URLs alone.
"""
request = self.rf.get("/slash")
self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)
@override_settings(APPEND_SLASH=True)
def test_append_slash_opt_out(self):
"""
Views marked with @no_append_slash should be left alone.
"""
request = self.rf.get("/sensitive_fbv")
self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)
request = self.rf.get("/sensitive_cbv")
self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)
@override_settings(APPEND_SLASH=True)
def test_append_slash_quoted(self):
"""
URLs which require quoting should be redirected to their slash version.
"""
request = self.rf.get(quote("/needsquoting#"))
r = CommonMiddleware(get_response_404)(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, "/needsquoting%23/")
@override_settings(APPEND_SLASH=True)
def test_append_slash_leading_slashes(self):
"""
Paths starting with two slashes are escaped to prevent open redirects.
If there's a URL pattern that allows paths to start with two slashes, a
request with path //evil.com must not redirect to //evil.com/ (appended
slash) which is a schemaless absolute URL. The browser would navigate
to evil.com/.
"""
# Use 4 slashes because of RequestFactory behavior.
request = self.rf.get("////evil.com/security")
r = CommonMiddleware(get_response_404).process_request(request)
self.assertIsNone(r)
response = HttpResponseNotFound()
r = CommonMiddleware(get_response_404).process_response(request, response)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, "/%2Fevil.com/security/")
r = CommonMiddleware(get_response_404)(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, "/%2Fevil.com/security/")
@override_settings(APPEND_SLASH=False, PREPEND_WWW=True)
def test_prepend_www(self):
request = self.rf.get("/path/")
r = CommonMiddleware(get_response_empty).process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, "http://www.testserver/path/")
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_have_slash(self):
request = self.rf.get("/slash/")
r = CommonMiddleware(get_response_empty).process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, "http://www.testserver/slash/")
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_slashless(self):
request = self.rf.get("/slash")
r = CommonMiddleware(get_response_empty).process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, "http://www.testserver/slash/")
# The following tests examine expected behavior given a custom URLconf that
# overrides the default one through the request object.
@override_settings(APPEND_SLASH=True)
def test_append_slash_have_slash_custom_urlconf(self):
"""
URLs with slashes should go unmolested.
"""
request = self.rf.get("/customurlconf/slash/")
request.urlconf = "middleware.extra_urls"
self.assertIsNone(CommonMiddleware(get_response_404).process_request(request))
self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_resource_custom_urlconf(self):
"""
Matches to explicit slashless URLs should go unmolested.
"""
def get_response(req):
return HttpResponse("web content")
request = self.rf.get("/customurlconf/noslash")
request.urlconf = "middleware.extra_urls"
self.assertIsNone(CommonMiddleware(get_response).process_request(request))
self.assertEqual(
CommonMiddleware(get_response)(request).content, b"web content"
)
@override_settings(APPEND_SLASH=True)
def test_append_slash_slashless_unknown_custom_urlconf(self):
"""
APPEND_SLASH should not redirect to unknown resources.
"""
request = self.rf.get("/customurlconf/unknown")
request.urlconf = "middleware.extra_urls"
self.assertIsNone(CommonMiddleware(get_response_404).process_request(request))
self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)
@override_settings(APPEND_SLASH=True)
def test_append_slash_redirect_custom_urlconf(self):
"""
APPEND_SLASH should redirect slashless URLs to a valid pattern.
"""
request = self.rf.get("/customurlconf/slash")
request.urlconf = "middleware.extra_urls"
r = CommonMiddleware(get_response_404)(request)
self.assertIsNotNone(
r,
"CommonMiddleware failed to return APPEND_SLASH redirect using "
"request.urlconf",
)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, "/customurlconf/slash/")
@override_settings(APPEND_SLASH=True, DEBUG=True)
def test_append_slash_no_redirect_on_POST_in_DEBUG_custom_urlconf(self):
"""
While in debug mode, an exception is raised with a warning
when a failed attempt is made to POST to an URL which would normally be
redirected to a slashed version.
"""
request = self.rf.get("/customurlconf/slash")
request.urlconf = "middleware.extra_urls"
request.method = "POST"
with self.assertRaisesMessage(RuntimeError, "end in a slash"):
CommonMiddleware(get_response_404)(request)
@override_settings(APPEND_SLASH=False)
def test_append_slash_disabled_custom_urlconf(self):
"""
Disabling append slash functionality should leave slashless URLs alone.
"""
request = self.rf.get("/customurlconf/slash")
request.urlconf = "middleware.extra_urls"
self.assertIsNone(CommonMiddleware(get_response_404).process_request(request))
self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)
@override_settings(APPEND_SLASH=True)
def test_append_slash_quoted_custom_urlconf(self):
"""
URLs which require quoting should be redirected to their slash version.
"""
request = self.rf.get(quote("/customurlconf/needsquoting#"))
request.urlconf = "middleware.extra_urls"
r = CommonMiddleware(get_response_404)(request)
self.assertIsNotNone(
r,
"CommonMiddleware failed to return APPEND_SLASH redirect using "
"request.urlconf",
)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, "/customurlconf/needsquoting%23/")
@override_settings(APPEND_SLASH=False, PREPEND_WWW=True)
def test_prepend_www_custom_urlconf(self):
request = self.rf.get("/customurlconf/path/")
request.urlconf = "middleware.extra_urls"
r = CommonMiddleware(get_response_empty).process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, "http://www.testserver/customurlconf/path/")
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_have_slash_custom_urlconf(self):
request = self.rf.get("/customurlconf/slash/")
request.urlconf = "middleware.extra_urls"
r = CommonMiddleware(get_response_empty).process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, "http://www.testserver/customurlconf/slash/")
@override_settings(APPEND_SLASH=True, PREPEND_WWW=True)
def test_prepend_www_append_slash_slashless_custom_urlconf(self):
request = self.rf.get("/customurlconf/slash")
request.urlconf = "middleware.extra_urls"
r = CommonMiddleware(get_response_empty).process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, "http://www.testserver/customurlconf/slash/")
# Tests for the Content-Length header
def test_content_length_header_added(self):
def get_response(req):
response = HttpResponse("content")
self.assertNotIn("Content-Length", response)
return response
response = CommonMiddleware(get_response)(self.rf.get("/"))
self.assertEqual(int(response.headers["Content-Length"]), len(response.content))
def test_content_length_header_not_added_for_streaming_response(self):
def get_response(req):
response = StreamingHttpResponse("content")
self.assertNotIn("Content-Length", response)
return response
response = CommonMiddleware(get_response)(self.rf.get("/"))
self.assertNotIn("Content-Length", response)
def test_content_length_header_not_changed(self):
bad_content_length = 500
def get_response(req):
response = HttpResponse()
response.headers["Content-Length"] = bad_content_length
return response
response = CommonMiddleware(get_response)(self.rf.get("/"))
self.assertEqual(int(response.headers["Content-Length"]), bad_content_length)
# Other tests
@override_settings(DISALLOWED_USER_AGENTS=[re.compile(r"foo")])
def test_disallowed_user_agents(self):
request = self.rf.get("/slash")
request.META["HTTP_USER_AGENT"] = "foo"
with self.assertRaisesMessage(PermissionDenied, "Forbidden user agent"):
CommonMiddleware(get_response_empty).process_request(request)
def test_non_ascii_query_string_does_not_crash(self):
"""Regression test for #15152"""
request = self.rf.get("/slash")
request.META["QUERY_STRING"] = "drink=café"
r = CommonMiddleware(get_response_empty).process_request(request)
self.assertIsNone(r)
response = HttpResponseNotFound()
r = CommonMiddleware(get_response_empty).process_response(request, response)
self.assertEqual(r.status_code, 301)
def test_response_redirect_class(self):
request = self.rf.get("/slash")
r = CommonMiddleware(get_response_404)(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r.url, "/slash/")
self.assertIsInstance(r, HttpResponsePermanentRedirect)
def test_response_redirect_class_subclass(self):
class MyCommonMiddleware(CommonMiddleware):
response_redirect_class = HttpResponseRedirect
request = self.rf.get("/slash")
r = MyCommonMiddleware(get_response_404)(request)
self.assertEqual(r.status_code, 302)
self.assertEqual(r.url, "/slash/")
self.assertIsInstance(r, HttpResponseRedirect)
@override_settings(
IGNORABLE_404_URLS=[re.compile(r"foo")],
MANAGERS=[("PHD", "[email protected]")],
)
class BrokenLinkEmailsMiddlewareTest(SimpleTestCase):
rf = RequestFactory()
def setUp(self):
self.req = self.rf.get("/regular_url/that/does/not/exist")
def get_response(self, req):
return self.client.get(req.path)
def test_404_error_reporting(self):
self.req.META["HTTP_REFERER"] = "/another/url/"
BrokenLinkEmailsMiddleware(self.get_response)(self.req)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("Broken", mail.outbox[0].subject)
def test_404_error_reporting_no_referer(self):
BrokenLinkEmailsMiddleware(self.get_response)(self.req)
self.assertEqual(len(mail.outbox), 0)
def test_404_error_reporting_ignored_url(self):
self.req.path = self.req.path_info = "foo_url/that/does/not/exist"
BrokenLinkEmailsMiddleware(self.get_response)(self.req)
self.assertEqual(len(mail.outbox), 0)
def test_custom_request_checker(self):
class SubclassedMiddleware(BrokenLinkEmailsMiddleware):
ignored_user_agent_patterns = (
re.compile(r"Spider.*"),
re.compile(r"Robot.*"),
)
def is_ignorable_request(self, request, uri, domain, referer):
"""Check user-agent in addition to normal checks."""
if super().is_ignorable_request(request, uri, domain, referer):
return True
user_agent = request.META["HTTP_USER_AGENT"]
return any(
pattern.search(user_agent)
for pattern in self.ignored_user_agent_patterns
)
self.req.META["HTTP_REFERER"] = "/another/url/"
self.req.META["HTTP_USER_AGENT"] = "Spider machine 3.4"
SubclassedMiddleware(self.get_response)(self.req)
self.assertEqual(len(mail.outbox), 0)
self.req.META["HTTP_USER_AGENT"] = "My user agent"
SubclassedMiddleware(self.get_response)(self.req)
self.assertEqual(len(mail.outbox), 1)
def test_referer_equal_to_requested_url(self):
"""
Some bots set the referer to the current URL to avoid being blocked by
an referer check (#25302).
"""
self.req.META["HTTP_REFERER"] = self.req.path
BrokenLinkEmailsMiddleware(self.get_response)(self.req)
self.assertEqual(len(mail.outbox), 0)
# URL with scheme and domain should also be ignored
self.req.META["HTTP_REFERER"] = "http://testserver%s" % self.req.path
BrokenLinkEmailsMiddleware(self.get_response)(self.req)
self.assertEqual(len(mail.outbox), 0)
# URL with a different scheme should be ignored as well because bots
# tend to use http:// in referers even when browsing HTTPS websites.
self.req.META["HTTP_X_PROTO"] = "https"
self.req.META["SERVER_PORT"] = 443
with self.settings(SECURE_PROXY_SSL_HEADER=("HTTP_X_PROTO", "https")):
BrokenLinkEmailsMiddleware(self.get_response)(self.req)
self.assertEqual(len(mail.outbox), 0)
def test_referer_equal_to_requested_url_on_another_domain(self):
self.req.META["HTTP_REFERER"] = "http://anotherserver%s" % self.req.path
BrokenLinkEmailsMiddleware(self.get_response)(self.req)
self.assertEqual(len(mail.outbox), 1)
@override_settings(APPEND_SLASH=True)
def test_referer_equal_to_requested_url_without_trailing_slash_with_append_slash(
self,
):
self.req.path = self.req.path_info = "/regular_url/that/does/not/exist/"
self.req.META["HTTP_REFERER"] = self.req.path_info[:-1]
BrokenLinkEmailsMiddleware(self.get_response)(self.req)
self.assertEqual(len(mail.outbox), 0)
@override_settings(APPEND_SLASH=False)
def test_referer_equal_to_requested_url_without_trailing_slash_with_no_append_slash(
self,
):
self.req.path = self.req.path_info = "/regular_url/that/does/not/exist/"
self.req.META["HTTP_REFERER"] = self.req.path_info[:-1]
BrokenLinkEmailsMiddleware(self.get_response)(self.req)
self.assertEqual(len(mail.outbox), 1)
@override_settings(ROOT_URLCONF="middleware.cond_get_urls")
class ConditionalGetMiddlewareTest(SimpleTestCase):
request_factory = RequestFactory()
def setUp(self):
self.req = self.request_factory.get("/")
self.resp_headers = {}
def get_response(self, req):
resp = self.client.get(req.path_info)
for key, value in self.resp_headers.items():
resp[key] = value
return resp
# Tests for the ETag header
def test_middleware_calculates_etag(self):
resp = ConditionalGetMiddleware(self.get_response)(self.req)
self.assertEqual(resp.status_code, 200)
self.assertNotEqual("", resp["ETag"])
def test_middleware_wont_overwrite_etag(self):
self.resp_headers["ETag"] = "eggs"
resp = ConditionalGetMiddleware(self.get_response)(self.req)
self.assertEqual(resp.status_code, 200)
self.assertEqual("eggs", resp["ETag"])
def test_no_etag_streaming_response(self):
def get_response(req):
return StreamingHttpResponse(["content"])
self.assertFalse(
ConditionalGetMiddleware(get_response)(self.req).has_header("ETag")
)
def test_no_etag_response_empty_content(self):
def get_response(req):
return HttpResponse()
self.assertFalse(
ConditionalGetMiddleware(get_response)(self.req).has_header("ETag")
)
def test_no_etag_no_store_cache(self):
self.resp_headers["Cache-Control"] = "No-Cache, No-Store, Max-age=0"
self.assertFalse(
ConditionalGetMiddleware(self.get_response)(self.req).has_header("ETag")
)
def test_etag_extended_cache_control(self):
self.resp_headers["Cache-Control"] = 'my-directive="my-no-store"'
self.assertTrue(
ConditionalGetMiddleware(self.get_response)(self.req).has_header("ETag")
)
def test_if_none_match_and_no_etag(self):
self.req.META["HTTP_IF_NONE_MATCH"] = "spam"
resp = ConditionalGetMiddleware(self.get_response)(self.req)
self.assertEqual(resp.status_code, 200)
def test_no_if_none_match_and_etag(self):
self.resp_headers["ETag"] = "eggs"
resp = ConditionalGetMiddleware(self.get_response)(self.req)
self.assertEqual(resp.status_code, 200)
def test_if_none_match_and_same_etag(self):
self.req.META["HTTP_IF_NONE_MATCH"] = '"spam"'
self.resp_headers["ETag"] = '"spam"'
resp = ConditionalGetMiddleware(self.get_response)(self.req)
self.assertEqual(resp.status_code, 304)
def test_if_none_match_and_different_etag(self):
self.req.META["HTTP_IF_NONE_MATCH"] = "spam"
self.resp_headers["ETag"] = "eggs"
resp = ConditionalGetMiddleware(self.get_response)(self.req)
self.assertEqual(resp.status_code, 200)
def test_if_none_match_and_redirect(self):
def get_response(req):
resp = self.client.get(req.path_info)
resp["ETag"] = "spam"
resp["Location"] = "/"
resp.status_code = 301
return resp
self.req.META["HTTP_IF_NONE_MATCH"] = "spam"
resp = ConditionalGetMiddleware(get_response)(self.req)
self.assertEqual(resp.status_code, 301)
def test_if_none_match_and_client_error(self):
def get_response(req):
resp = self.client.get(req.path_info)
resp["ETag"] = "spam"
resp.status_code = 400
return resp
self.req.META["HTTP_IF_NONE_MATCH"] = "spam"
resp = ConditionalGetMiddleware(get_response)(self.req)
self.assertEqual(resp.status_code, 400)
# Tests for the Last-Modified header
def test_if_modified_since_and_no_last_modified(self):
self.req.META["HTTP_IF_MODIFIED_SINCE"] = "Sat, 12 Feb 2011 17:38:44 GMT"
resp = ConditionalGetMiddleware(self.get_response)(self.req)
self.assertEqual(resp.status_code, 200)
def test_no_if_modified_since_and_last_modified(self):
self.resp_headers["Last-Modified"] = "Sat, 12 Feb 2011 17:38:44 GMT"
resp = ConditionalGetMiddleware(self.get_response)(self.req)
self.assertEqual(resp.status_code, 200)
def test_if_modified_since_and_same_last_modified(self):
self.req.META["HTTP_IF_MODIFIED_SINCE"] = "Sat, 12 Feb 2011 17:38:44 GMT"
self.resp_headers["Last-Modified"] = "Sat, 12 Feb 2011 17:38:44 GMT"
self.resp = ConditionalGetMiddleware(self.get_response)(self.req)
self.assertEqual(self.resp.status_code, 304)
def test_if_modified_since_and_last_modified_in_the_past(self):
self.req.META["HTTP_IF_MODIFIED_SINCE"] = "Sat, 12 Feb 2011 17:38:44 GMT"
self.resp_headers["Last-Modified"] = "Sat, 12 Feb 2011 17:35:44 GMT"
resp = ConditionalGetMiddleware(self.get_response)(self.req)
self.assertEqual(resp.status_code, 304)
def test_if_modified_since_and_last_modified_in_the_future(self):
self.req.META["HTTP_IF_MODIFIED_SINCE"] = "Sat, 12 Feb 2011 17:38:44 GMT"
self.resp_headers["Last-Modified"] = "Sat, 12 Feb 2011 17:41:44 GMT"
self.resp = ConditionalGetMiddleware(self.get_response)(self.req)
self.assertEqual(self.resp.status_code, 200)
def test_if_modified_since_and_redirect(self):
def get_response(req):
resp = self.client.get(req.path_info)
resp["Last-Modified"] = "Sat, 12 Feb 2011 17:35:44 GMT"
resp["Location"] = "/"
resp.status_code = 301
return resp
self.req.META["HTTP_IF_MODIFIED_SINCE"] = "Sat, 12 Feb 2011 17:38:44 GMT"
resp = ConditionalGetMiddleware(get_response)(self.req)
self.assertEqual(resp.status_code, 301)
def test_if_modified_since_and_client_error(self):
def get_response(req):
resp = self.client.get(req.path_info)
resp["Last-Modified"] = "Sat, 12 Feb 2011 17:35:44 GMT"
resp.status_code = 400
return resp
self.req.META["HTTP_IF_MODIFIED_SINCE"] = "Sat, 12 Feb 2011 17:38:44 GMT"
resp = ConditionalGetMiddleware(get_response)(self.req)
self.assertEqual(resp.status_code, 400)
def test_not_modified_headers(self):
"""
The 304 Not Modified response should include only the headers required
by RFC 9110 Section 15.4.5, Last-Modified, and the cookies.
"""
def get_response(req):
resp = self.client.get(req.path_info)
resp["Date"] = "Sat, 12 Feb 2011 17:35:44 GMT"
resp["Last-Modified"] = "Sat, 12 Feb 2011 17:35:44 GMT"
resp["Expires"] = "Sun, 13 Feb 2011 17:35:44 GMT"
resp["Vary"] = "Cookie"
resp["Cache-Control"] = "public"
resp["Content-Location"] = "/alt"
resp["Content-Language"] = "en" # shouldn't be preserved
resp["ETag"] = '"spam"'
resp.set_cookie("key", "value")
return resp
self.req.META["HTTP_IF_NONE_MATCH"] = '"spam"'
new_response = ConditionalGetMiddleware(get_response)(self.req)
self.assertEqual(new_response.status_code, 304)
base_response = get_response(self.req)
for header in (
"Cache-Control",
"Content-Location",
"Date",
"ETag",
"Expires",
"Last-Modified",
"Vary",
):
self.assertEqual(
new_response.headers[header], base_response.headers[header]
)
self.assertEqual(new_response.cookies, base_response.cookies)
self.assertNotIn("Content-Language", new_response)
def test_no_unsafe(self):
"""
ConditionalGetMiddleware shouldn't return a conditional response on an
unsafe request. A response has already been generated by the time
ConditionalGetMiddleware is called, so it's too late to return a 412
Precondition Failed.
"""
def get_200_response(req):
return HttpResponse(status=200)
response = ConditionalGetMiddleware(self.get_response)(self.req)
etag = response.headers["ETag"]
put_request = self.request_factory.put("/", HTTP_IF_MATCH=etag)
conditional_get_response = ConditionalGetMiddleware(get_200_response)(
put_request
)
self.assertEqual(
conditional_get_response.status_code, 200
) # should never be a 412
def test_no_head(self):
"""
ConditionalGetMiddleware shouldn't compute and return an ETag on a
HEAD request since it can't do so accurately without access to the
response body of the corresponding GET.
"""
def get_200_response(req):
return HttpResponse(status=200)
request = self.request_factory.head("/")
conditional_get_response = ConditionalGetMiddleware(get_200_response)(request)
self.assertNotIn("ETag", conditional_get_response)
class XFrameOptionsMiddlewareTest(SimpleTestCase):
"""
Tests for the X-Frame-Options clickjacking prevention middleware.
"""
def test_same_origin(self):
"""
The X_FRAME_OPTIONS setting can be set to SAMEORIGIN to have the
middleware use that value for the HTTP header.
"""
with override_settings(X_FRAME_OPTIONS="SAMEORIGIN"):
r = XFrameOptionsMiddleware(get_response_empty)(HttpRequest())
self.assertEqual(r.headers["X-Frame-Options"], "SAMEORIGIN")
with override_settings(X_FRAME_OPTIONS="sameorigin"):
r = XFrameOptionsMiddleware(get_response_empty)(HttpRequest())
self.assertEqual(r.headers["X-Frame-Options"], "SAMEORIGIN")
def test_deny(self):
"""
The X_FRAME_OPTIONS setting can be set to DENY to have the middleware
use that value for the HTTP header.
"""
with override_settings(X_FRAME_OPTIONS="DENY"):
r = XFrameOptionsMiddleware(get_response_empty)(HttpRequest())
self.assertEqual(r.headers["X-Frame-Options"], "DENY")
with override_settings(X_FRAME_OPTIONS="deny"):
r = XFrameOptionsMiddleware(get_response_empty)(HttpRequest())
self.assertEqual(r.headers["X-Frame-Options"], "DENY")
def test_defaults_sameorigin(self):
"""
If the X_FRAME_OPTIONS setting is not set then it defaults to
DENY.
"""
with override_settings(X_FRAME_OPTIONS=None):
del settings.X_FRAME_OPTIONS # restored by override_settings
r = XFrameOptionsMiddleware(get_response_empty)(HttpRequest())
self.assertEqual(r.headers["X-Frame-Options"], "DENY")
def test_dont_set_if_set(self):
"""
If the X-Frame-Options header is already set then the middleware does
not attempt to override it.
"""
def same_origin_response(request):
response = HttpResponse()
response.headers["X-Frame-Options"] = "SAMEORIGIN"
return response
def deny_response(request):
response = HttpResponse()
response.headers["X-Frame-Options"] = "DENY"
return response
with override_settings(X_FRAME_OPTIONS="DENY"):
r = XFrameOptionsMiddleware(same_origin_response)(HttpRequest())
self.assertEqual(r.headers["X-Frame-Options"], "SAMEORIGIN")
with override_settings(X_FRAME_OPTIONS="SAMEORIGIN"):
r = XFrameOptionsMiddleware(deny_response)(HttpRequest())
self.assertEqual(r.headers["X-Frame-Options"], "DENY")
def test_response_exempt(self):
"""
If the response has an xframe_options_exempt attribute set to False
then it still sets the header, but if it's set to True then it doesn't.
"""
def xframe_exempt_response(request):
response = HttpResponse()
response.xframe_options_exempt = True
return response
def xframe_not_exempt_response(request):
response = HttpResponse()
response.xframe_options_exempt = False
return response
with override_settings(X_FRAME_OPTIONS="SAMEORIGIN"):
r = XFrameOptionsMiddleware(xframe_not_exempt_response)(HttpRequest())
self.assertEqual(r.headers["X-Frame-Options"], "SAMEORIGIN")
r = XFrameOptionsMiddleware(xframe_exempt_response)(HttpRequest())
self.assertIsNone(r.headers.get("X-Frame-Options"))
def test_is_extendable(self):
"""
The XFrameOptionsMiddleware method that determines the X-Frame-Options
header value can be overridden based on something in the request or
response.
"""
class OtherXFrameOptionsMiddleware(XFrameOptionsMiddleware):
# This is just an example for testing purposes...
def get_xframe_options_value(self, request, response):
if getattr(request, "sameorigin", False):
return "SAMEORIGIN"
if getattr(response, "sameorigin", False):
return "SAMEORIGIN"
return "DENY"
def same_origin_response(request):
response = HttpResponse()
response.sameorigin = True
return response
with override_settings(X_FRAME_OPTIONS="DENY"):
r = OtherXFrameOptionsMiddleware(same_origin_response)(HttpRequest())
self.assertEqual(r.headers["X-Frame-Options"], "SAMEORIGIN")
request = HttpRequest()
request.sameorigin = True
r = OtherXFrameOptionsMiddleware(get_response_empty)(request)
self.assertEqual(r.headers["X-Frame-Options"], "SAMEORIGIN")
with override_settings(X_FRAME_OPTIONS="SAMEORIGIN"):
r = OtherXFrameOptionsMiddleware(get_response_empty)(HttpRequest())
self.assertEqual(r.headers["X-Frame-Options"], "DENY")
class GZipMiddlewareTest(SimpleTestCase):
"""
Tests the GZipMiddleware.
"""
short_string = b"This string is too short to be worth compressing."
compressible_string = b"a" * 500
incompressible_string = b"".join(
int2byte(random.randint(0, 255)) for _ in range(500)
)
sequence = [b"a" * 500, b"b" * 200, b"a" * 300]
sequence_unicode = ["a" * 500, "é" * 200, "a" * 300]
request_factory = RequestFactory()
def setUp(self):
self.req = self.request_factory.get("/")
self.req.META["HTTP_ACCEPT_ENCODING"] = "gzip, deflate"
self.req.META[
"HTTP_USER_AGENT"
] = "Mozilla/5.0 (Windows NT 5.1; rv:9.0.1) Gecko/20100101 Firefox/9.0.1"
self.resp = HttpResponse()
self.resp.status_code = 200
self.resp.content = self.compressible_string
self.resp["Content-Type"] = "text/html; charset=UTF-8"
def get_response(self, request):
return self.resp
@staticmethod
def decompress(gzipped_string):
with gzip.GzipFile(mode="rb", fileobj=BytesIO(gzipped_string)) as f:
return f.read()
@staticmethod
def get_mtime(gzipped_string):
with gzip.GzipFile(mode="rb", fileobj=BytesIO(gzipped_string)) as f:
f.read() # must read the data before accessing the header
return f.mtime
def test_compress_response(self):
"""
Compression is performed on responses with compressible content.
"""
r = GZipMiddleware(self.get_response)(self.req)
self.assertEqual(self.decompress(r.content), self.compressible_string)
self.assertEqual(r.get("Content-Encoding"), "gzip")
self.assertEqual(r.get("Content-Length"), str(len(r.content)))
def test_compress_streaming_response(self):
"""
Compression is performed on responses with streaming content.
"""
def get_stream_response(request):
resp = StreamingHttpResponse(self.sequence)
resp["Content-Type"] = "text/html; charset=UTF-8"
return resp
r = GZipMiddleware(get_stream_response)(self.req)
self.assertEqual(self.decompress(b"".join(r)), b"".join(self.sequence))
self.assertEqual(r.get("Content-Encoding"), "gzip")
self.assertFalse(r.has_header("Content-Length"))
def test_compress_streaming_response_unicode(self):
"""
Compression is performed on responses with streaming Unicode content.
"""
def get_stream_response_unicode(request):
resp = StreamingHttpResponse(self.sequence_unicode)
resp["Content-Type"] = "text/html; charset=UTF-8"
return resp
r = GZipMiddleware(get_stream_response_unicode)(self.req)
self.assertEqual(
self.decompress(b"".join(r)),
b"".join(x.encode() for x in self.sequence_unicode),
)
self.assertEqual(r.get("Content-Encoding"), "gzip")
self.assertFalse(r.has_header("Content-Length"))
def test_compress_file_response(self):
"""
Compression is performed on FileResponse.
"""
with open(__file__, "rb") as file1:
def get_response(req):
file_resp = FileResponse(file1)
file_resp["Content-Type"] = "text/html; charset=UTF-8"
return file_resp
r = GZipMiddleware(get_response)(self.req)
with open(__file__, "rb") as file2:
self.assertEqual(self.decompress(b"".join(r)), file2.read())
self.assertEqual(r.get("Content-Encoding"), "gzip")
self.assertIsNot(r.file_to_stream, file1)
def test_compress_non_200_response(self):
"""
Compression is performed on responses with a status other than 200
(#10762).
"""
self.resp.status_code = 404
r = GZipMiddleware(self.get_response)(self.req)
self.assertEqual(self.decompress(r.content), self.compressible_string)
self.assertEqual(r.get("Content-Encoding"), "gzip")
def test_no_compress_short_response(self):
"""
Compression isn't performed on responses with short content.
"""
self.resp.content = self.short_string
r = GZipMiddleware(self.get_response)(self.req)
self.assertEqual(r.content, self.short_string)
self.assertIsNone(r.get("Content-Encoding"))
def test_no_compress_compressed_response(self):
"""
Compression isn't performed on responses that are already compressed.
"""
self.resp["Content-Encoding"] = "deflate"
r = GZipMiddleware(self.get_response)(self.req)
self.assertEqual(r.content, self.compressible_string)
self.assertEqual(r.get("Content-Encoding"), "deflate")
def test_no_compress_incompressible_response(self):
"""
Compression isn't performed on responses with incompressible content.
"""
self.resp.content = self.incompressible_string
r = GZipMiddleware(self.get_response)(self.req)
self.assertEqual(r.content, self.incompressible_string)
self.assertIsNone(r.get("Content-Encoding"))
def test_compress_deterministic(self):
"""
Compression results are the same for the same content and don't
include a modification time (since that would make the results
of compression non-deterministic and prevent
ConditionalGetMiddleware from recognizing conditional matches
on gzipped content).
"""
r1 = GZipMiddleware(self.get_response)(self.req)
r2 = GZipMiddleware(self.get_response)(self.req)
self.assertEqual(r1.content, r2.content)
self.assertEqual(self.get_mtime(r1.content), 0)
self.assertEqual(self.get_mtime(r2.content), 0)
class ETagGZipMiddlewareTest(SimpleTestCase):
"""
ETags are handled properly by GZipMiddleware.
"""
rf = RequestFactory()
compressible_string = b"a" * 500
def test_strong_etag_modified(self):
"""
GZipMiddleware makes a strong ETag weak.
"""
def get_response(req):
response = HttpResponse(self.compressible_string)
response.headers["ETag"] = '"eggs"'
return response
request = self.rf.get("/", HTTP_ACCEPT_ENCODING="gzip, deflate")
gzip_response = GZipMiddleware(get_response)(request)
self.assertEqual(gzip_response.headers["ETag"], 'W/"eggs"')
def test_weak_etag_not_modified(self):
"""
GZipMiddleware doesn't modify a weak ETag.
"""
def get_response(req):
response = HttpResponse(self.compressible_string)
response.headers["ETag"] = 'W/"eggs"'
return response
request = self.rf.get("/", HTTP_ACCEPT_ENCODING="gzip, deflate")
gzip_response = GZipMiddleware(get_response)(request)
self.assertEqual(gzip_response.headers["ETag"], 'W/"eggs"')
def test_etag_match(self):
"""
GZipMiddleware allows 304 Not Modified responses.
"""
def get_response(req):
return HttpResponse(self.compressible_string)
def get_cond_response(req):
return ConditionalGetMiddleware(get_response)(req)
request = self.rf.get("/", HTTP_ACCEPT_ENCODING="gzip, deflate")
response = GZipMiddleware(get_cond_response)(request)
gzip_etag = response.headers["ETag"]
next_request = self.rf.get(
"/", HTTP_ACCEPT_ENCODING="gzip, deflate", HTTP_IF_NONE_MATCH=gzip_etag
)
next_response = ConditionalGetMiddleware(get_response)(next_request)
self.assertEqual(next_response.status_code, 304)
|
0a1efc456f01f796d2506dba900db67540dfe5cf877c950e40c9a1da1119b4c5 | import unittest
from django.utils.ipv6 import clean_ipv6_address, is_valid_ipv6_address
class TestUtilsIPv6(unittest.TestCase):
def test_validates_correct_plain_address(self):
self.assertTrue(is_valid_ipv6_address("fe80::223:6cff:fe8a:2e8a"))
self.assertTrue(is_valid_ipv6_address("2a02::223:6cff:fe8a:2e8a"))
self.assertTrue(is_valid_ipv6_address("1::2:3:4:5:6:7"))
self.assertTrue(is_valid_ipv6_address("::"))
self.assertTrue(is_valid_ipv6_address("::a"))
self.assertTrue(is_valid_ipv6_address("2::"))
def test_validates_correct_with_v4mapping(self):
self.assertTrue(is_valid_ipv6_address("::ffff:254.42.16.14"))
self.assertTrue(is_valid_ipv6_address("::ffff:0a0a:0a0a"))
def test_validates_incorrect_plain_address(self):
self.assertFalse(is_valid_ipv6_address("foo"))
self.assertFalse(is_valid_ipv6_address("127.0.0.1"))
self.assertFalse(is_valid_ipv6_address("12345::"))
self.assertFalse(is_valid_ipv6_address("1::2:3::4"))
self.assertFalse(is_valid_ipv6_address("1::zzz"))
self.assertFalse(is_valid_ipv6_address("1::2:3:4:5:6:7:8"))
self.assertFalse(is_valid_ipv6_address("1:2"))
self.assertFalse(is_valid_ipv6_address("1:::2"))
self.assertFalse(is_valid_ipv6_address("fe80::223: 6cff:fe8a:2e8a"))
self.assertFalse(is_valid_ipv6_address("2a02::223:6cff :fe8a:2e8a"))
def test_validates_incorrect_with_v4mapping(self):
self.assertFalse(is_valid_ipv6_address("::ffff:999.42.16.14"))
self.assertFalse(is_valid_ipv6_address("::ffff:zzzz:0a0a"))
# The ::1.2.3.4 format used to be valid but was deprecated
# in RFC 4291 section 2.5.5.1.
self.assertTrue(is_valid_ipv6_address("::254.42.16.14"))
self.assertTrue(is_valid_ipv6_address("::0a0a:0a0a"))
self.assertFalse(is_valid_ipv6_address("::999.42.16.14"))
self.assertFalse(is_valid_ipv6_address("::zzzz:0a0a"))
def test_cleans_plain_address(self):
self.assertEqual(clean_ipv6_address("DEAD::0:BEEF"), "dead::beef")
self.assertEqual(
clean_ipv6_address("2001:000:a:0000:0:fe:fe:beef"), "2001:0:a::fe:fe:beef"
)
self.assertEqual(
clean_ipv6_address("2001::a:0000:0:fe:fe:beef"), "2001:0:a::fe:fe:beef"
)
def test_cleans_with_v4_mapping(self):
self.assertEqual(clean_ipv6_address("::ffff:0a0a:0a0a"), "::ffff:10.10.10.10")
self.assertEqual(clean_ipv6_address("::ffff:1234:1234"), "::ffff:18.52.18.52")
self.assertEqual(clean_ipv6_address("::ffff:18.52.18.52"), "::ffff:18.52.18.52")
self.assertEqual(clean_ipv6_address("::ffff:0.52.18.52"), "::ffff:0.52.18.52")
self.assertEqual(clean_ipv6_address("::ffff:0.0.0.0"), "::ffff:0.0.0.0")
def test_unpacks_ipv4(self):
self.assertEqual(
clean_ipv6_address("::ffff:0a0a:0a0a", unpack_ipv4=True), "10.10.10.10"
)
self.assertEqual(
clean_ipv6_address("::ffff:1234:1234", unpack_ipv4=True), "18.52.18.52"
)
self.assertEqual(
clean_ipv6_address("::ffff:18.52.18.52", unpack_ipv4=True), "18.52.18.52"
)
|
8260e6e3ac9101c373303e6c557da20fcf65c51c5ae80eb84c5985a30c14dcda | import platform
import unittest
from datetime import datetime, timezone
from unittest import mock
from django.test import SimpleTestCase
from django.utils.datastructures import MultiValueDict
from django.utils.http import (
base36_to_int,
escape_leading_slashes,
http_date,
int_to_base36,
is_same_domain,
parse_etags,
parse_header_parameters,
parse_http_date,
quote_etag,
url_has_allowed_host_and_scheme,
urlencode,
urlsafe_base64_decode,
urlsafe_base64_encode,
)
class URLEncodeTests(SimpleTestCase):
cannot_encode_none_msg = (
"Cannot encode None for key 'a' in a query string. Did you mean to "
"pass an empty string or omit the value?"
)
def test_tuples(self):
self.assertEqual(urlencode((("a", 1), ("b", 2), ("c", 3))), "a=1&b=2&c=3")
def test_dict(self):
result = urlencode({"a": 1, "b": 2, "c": 3})
# Dictionaries are treated as unordered.
self.assertIn(
result,
[
"a=1&b=2&c=3",
"a=1&c=3&b=2",
"b=2&a=1&c=3",
"b=2&c=3&a=1",
"c=3&a=1&b=2",
"c=3&b=2&a=1",
],
)
def test_dict_containing_sequence_not_doseq(self):
self.assertEqual(urlencode({"a": [1, 2]}, doseq=False), "a=%5B1%2C+2%5D")
def test_dict_containing_tuple_not_doseq(self):
self.assertEqual(urlencode({"a": (1, 2)}, doseq=False), "a=%281%2C+2%29")
def test_custom_iterable_not_doseq(self):
class IterableWithStr:
def __str__(self):
return "custom"
def __iter__(self):
yield from range(0, 3)
self.assertEqual(urlencode({"a": IterableWithStr()}, doseq=False), "a=custom")
def test_dict_containing_sequence_doseq(self):
self.assertEqual(urlencode({"a": [1, 2]}, doseq=True), "a=1&a=2")
def test_dict_containing_empty_sequence_doseq(self):
self.assertEqual(urlencode({"a": []}, doseq=True), "")
def test_multivaluedict(self):
result = urlencode(
MultiValueDict(
{
"name": ["Adrian", "Simon"],
"position": ["Developer"],
}
),
doseq=True,
)
# MultiValueDicts are similarly unordered.
self.assertIn(
result,
[
"name=Adrian&name=Simon&position=Developer",
"position=Developer&name=Adrian&name=Simon",
],
)
def test_dict_with_bytes_values(self):
self.assertEqual(urlencode({"a": b"abc"}, doseq=True), "a=abc")
def test_dict_with_sequence_of_bytes(self):
self.assertEqual(
urlencode({"a": [b"spam", b"eggs", b"bacon"]}, doseq=True),
"a=spam&a=eggs&a=bacon",
)
def test_dict_with_bytearray(self):
self.assertEqual(urlencode({"a": bytearray(range(2))}, doseq=True), "a=0&a=1")
def test_generator(self):
self.assertEqual(urlencode({"a": range(2)}, doseq=True), "a=0&a=1")
self.assertEqual(urlencode({"a": range(2)}, doseq=False), "a=range%280%2C+2%29")
def test_none(self):
with self.assertRaisesMessage(TypeError, self.cannot_encode_none_msg):
urlencode({"a": None})
def test_none_in_sequence(self):
with self.assertRaisesMessage(TypeError, self.cannot_encode_none_msg):
urlencode({"a": [None]}, doseq=True)
def test_none_in_generator(self):
def gen():
yield None
with self.assertRaisesMessage(TypeError, self.cannot_encode_none_msg):
urlencode({"a": gen()}, doseq=True)
class Base36IntTests(SimpleTestCase):
def test_roundtrip(self):
for n in [0, 1, 1000, 1000000]:
self.assertEqual(n, base36_to_int(int_to_base36(n)))
def test_negative_input(self):
with self.assertRaisesMessage(ValueError, "Negative base36 conversion input."):
int_to_base36(-1)
def test_to_base36_errors(self):
for n in ["1", "foo", {1: 2}, (1, 2, 3), 3.141]:
with self.assertRaises(TypeError):
int_to_base36(n)
def test_invalid_literal(self):
for n in ["#", " "]:
with self.assertRaisesMessage(
ValueError, "invalid literal for int() with base 36: '%s'" % n
):
base36_to_int(n)
def test_input_too_large(self):
with self.assertRaisesMessage(ValueError, "Base36 input too large"):
base36_to_int("1" * 14)
def test_to_int_errors(self):
for n in [123, {1: 2}, (1, 2, 3), 3.141]:
with self.assertRaises(TypeError):
base36_to_int(n)
def test_values(self):
for n, b36 in [(0, "0"), (1, "1"), (42, "16"), (818469960, "django")]:
self.assertEqual(int_to_base36(n), b36)
self.assertEqual(base36_to_int(b36), n)
class URLHasAllowedHostAndSchemeTests(unittest.TestCase):
def test_bad_urls(self):
bad_urls = (
"http://example.com",
"http:///example.com",
"https://example.com",
"ftp://example.com",
r"\\example.com",
r"\\\example.com",
r"/\\/example.com",
r"\\\example.com",
r"\\example.com",
r"\\//example.com",
r"/\/example.com",
r"\/example.com",
r"/\example.com",
"http:///example.com",
r"http:/\//example.com",
r"http:\/example.com",
r"http:/\example.com",
'javascript:alert("XSS")',
"\njavascript:alert(x)",
"java\nscript:alert(x)",
"\x08//example.com",
r"http://otherserver\@example.com",
r"http:\\testserver\@example.com",
r"http://testserver\me:[email protected]",
r"http://testserver\@example.com",
r"http:\\testserver\confirm\[email protected]",
"http:999999999",
"ftp:9999999999",
"\n",
"http://[2001:cdba:0000:0000:0000:0000:3257:9652/",
"http://2001:cdba:0000:0000:0000:0000:3257:9652]/",
)
for bad_url in bad_urls:
with self.subTest(url=bad_url):
self.assertIs(
url_has_allowed_host_and_scheme(
bad_url, allowed_hosts={"testserver", "testserver2"}
),
False,
)
def test_good_urls(self):
good_urls = (
"/view/?param=http://example.com",
"/view/?param=https://example.com",
"/view?param=ftp://example.com",
"view/?param=//example.com",
"https://testserver/",
"HTTPS://testserver/",
"//testserver/",
"http://testserver/[email protected]",
"/url%20with%20spaces/",
"path/http:2222222222",
)
for good_url in good_urls:
with self.subTest(url=good_url):
self.assertIs(
url_has_allowed_host_and_scheme(
good_url, allowed_hosts={"otherserver", "testserver"}
),
True,
)
def test_basic_auth(self):
# Valid basic auth credentials are allowed.
self.assertIs(
url_has_allowed_host_and_scheme(
r"http://user:pass@testserver/", allowed_hosts={"user:pass@testserver"}
),
True,
)
def test_no_allowed_hosts(self):
# A path without host is allowed.
self.assertIs(
url_has_allowed_host_and_scheme(
"/confirm/[email protected]", allowed_hosts=None
),
True,
)
# Basic auth without host is not allowed.
self.assertIs(
url_has_allowed_host_and_scheme(
r"http://testserver\@example.com", allowed_hosts=None
),
False,
)
def test_allowed_hosts_str(self):
self.assertIs(
url_has_allowed_host_and_scheme(
"http://good.com/good", allowed_hosts="good.com"
),
True,
)
self.assertIs(
url_has_allowed_host_and_scheme(
"http://good.co/evil", allowed_hosts="good.com"
),
False,
)
def test_secure_param_https_urls(self):
secure_urls = (
"https://example.com/p",
"HTTPS://example.com/p",
"/view/?param=http://example.com",
)
for url in secure_urls:
with self.subTest(url=url):
self.assertIs(
url_has_allowed_host_and_scheme(
url, allowed_hosts={"example.com"}, require_https=True
),
True,
)
def test_secure_param_non_https_urls(self):
insecure_urls = (
"http://example.com/p",
"ftp://example.com/p",
"//example.com/p",
)
for url in insecure_urls:
with self.subTest(url=url):
self.assertIs(
url_has_allowed_host_and_scheme(
url, allowed_hosts={"example.com"}, require_https=True
),
False,
)
class URLSafeBase64Tests(unittest.TestCase):
def test_roundtrip(self):
bytestring = b"foo"
encoded = urlsafe_base64_encode(bytestring)
decoded = urlsafe_base64_decode(encoded)
self.assertEqual(bytestring, decoded)
class IsSameDomainTests(unittest.TestCase):
def test_good(self):
for pair in (
("example.com", "example.com"),
("example.com", ".example.com"),
("foo.example.com", ".example.com"),
("example.com:8888", "example.com:8888"),
("example.com:8888", ".example.com:8888"),
("foo.example.com:8888", ".example.com:8888"),
):
self.assertIs(is_same_domain(*pair), True)
def test_bad(self):
for pair in (
("example2.com", "example.com"),
("foo.example.com", "example.com"),
("example.com:9999", "example.com:8888"),
("foo.example.com:8888", ""),
):
self.assertIs(is_same_domain(*pair), False)
class ETagProcessingTests(unittest.TestCase):
def test_parsing(self):
self.assertEqual(
parse_etags(r'"" , "etag", "e\\tag", W/"weak"'),
['""', '"etag"', r'"e\\tag"', 'W/"weak"'],
)
self.assertEqual(parse_etags("*"), ["*"])
# Ignore RFC 2616 ETags that are invalid according to RFC 9110.
self.assertEqual(parse_etags(r'"etag", "e\"t\"ag"'), ['"etag"'])
def test_quoting(self):
self.assertEqual(quote_etag("etag"), '"etag"') # unquoted
self.assertEqual(quote_etag('"etag"'), '"etag"') # quoted
self.assertEqual(quote_etag('W/"etag"'), 'W/"etag"') # quoted, weak
class HttpDateProcessingTests(unittest.TestCase):
def test_http_date(self):
t = 1167616461.0
self.assertEqual(http_date(t), "Mon, 01 Jan 2007 01:54:21 GMT")
def test_parsing_rfc1123(self):
parsed = parse_http_date("Sun, 06 Nov 1994 08:49:37 GMT")
self.assertEqual(
datetime.fromtimestamp(parsed, timezone.utc),
datetime(1994, 11, 6, 8, 49, 37, tzinfo=timezone.utc),
)
@unittest.skipIf(platform.architecture()[0] == "32bit", "The Year 2038 problem.")
@mock.patch("django.utils.http.datetime.datetime")
def test_parsing_rfc850(self, mocked_datetime):
mocked_datetime.side_effect = datetime
mocked_datetime.now = mock.Mock()
now_1 = datetime(2019, 11, 6, 8, 49, 37, tzinfo=timezone.utc)
now_2 = datetime(2020, 11, 6, 8, 49, 37, tzinfo=timezone.utc)
now_3 = datetime(2048, 11, 6, 8, 49, 37, tzinfo=timezone.utc)
tests = (
(
now_1,
"Tuesday, 31-Dec-69 08:49:37 GMT",
datetime(2069, 12, 31, 8, 49, 37, tzinfo=timezone.utc),
),
(
now_1,
"Tuesday, 10-Nov-70 08:49:37 GMT",
datetime(1970, 11, 10, 8, 49, 37, tzinfo=timezone.utc),
),
(
now_1,
"Sunday, 06-Nov-94 08:49:37 GMT",
datetime(1994, 11, 6, 8, 49, 37, tzinfo=timezone.utc),
),
(
now_2,
"Wednesday, 31-Dec-70 08:49:37 GMT",
datetime(2070, 12, 31, 8, 49, 37, tzinfo=timezone.utc),
),
(
now_2,
"Friday, 31-Dec-71 08:49:37 GMT",
datetime(1971, 12, 31, 8, 49, 37, tzinfo=timezone.utc),
),
(
now_3,
"Sunday, 31-Dec-00 08:49:37 GMT",
datetime(2000, 12, 31, 8, 49, 37, tzinfo=timezone.utc),
),
(
now_3,
"Friday, 31-Dec-99 08:49:37 GMT",
datetime(1999, 12, 31, 8, 49, 37, tzinfo=timezone.utc),
),
)
for now, rfc850str, expected_date in tests:
with self.subTest(rfc850str=rfc850str):
mocked_datetime.now.return_value = now
parsed = parse_http_date(rfc850str)
mocked_datetime.now.assert_called_once_with(tz=timezone.utc)
self.assertEqual(
datetime.fromtimestamp(parsed, timezone.utc),
expected_date,
)
mocked_datetime.reset_mock()
def test_parsing_asctime(self):
parsed = parse_http_date("Sun Nov 6 08:49:37 1994")
self.assertEqual(
datetime.fromtimestamp(parsed, timezone.utc),
datetime(1994, 11, 6, 8, 49, 37, tzinfo=timezone.utc),
)
def test_parsing_asctime_nonascii_digits(self):
"""Non-ASCII unicode decimals raise an error."""
with self.assertRaises(ValueError):
parse_http_date("Sun Nov 6 08:49:37 1994")
with self.assertRaises(ValueError):
parse_http_date("Sun Nov 12 08:49:37 1994")
def test_parsing_year_less_than_70(self):
parsed = parse_http_date("Sun Nov 6 08:49:37 0037")
self.assertEqual(
datetime.fromtimestamp(parsed, timezone.utc),
datetime(2037, 11, 6, 8, 49, 37, tzinfo=timezone.utc),
)
class EscapeLeadingSlashesTests(unittest.TestCase):
def test(self):
tests = (
("//example.com", "/%2Fexample.com"),
("//", "/%2F"),
)
for url, expected in tests:
with self.subTest(url=url):
self.assertEqual(escape_leading_slashes(url), expected)
class ParseHeaderParameterTests(unittest.TestCase):
def test_basic(self):
tests = [
("text/plain", ("text/plain", {})),
("text/vnd.just.made.this.up ; ", ("text/vnd.just.made.this.up", {})),
("text/plain;charset=us-ascii", ("text/plain", {"charset": "us-ascii"})),
(
'text/plain ; charset="us-ascii"',
("text/plain", {"charset": "us-ascii"}),
),
(
'text/plain ; charset="us-ascii"; another=opt',
("text/plain", {"charset": "us-ascii", "another": "opt"}),
),
(
'attachment; filename="silly.txt"',
("attachment", {"filename": "silly.txt"}),
),
(
'attachment; filename="strange;name"',
("attachment", {"filename": "strange;name"}),
),
(
'attachment; filename="strange;name";size=123;',
("attachment", {"filename": "strange;name", "size": "123"}),
),
(
'form-data; name="files"; filename="fo\\"o;bar"',
("form-data", {"name": "files", "filename": 'fo"o;bar'}),
),
]
for header, expected in tests:
with self.subTest(header=header):
self.assertEqual(parse_header_parameters(header), expected)
def test_rfc2231_parsing(self):
test_data = (
(
"Content-Type: application/x-stuff; "
"title*=us-ascii'en-us'This%20is%20%2A%2A%2Afun%2A%2A%2A",
"This is ***fun***",
),
(
"Content-Type: application/x-stuff; title*=UTF-8''foo-%c3%a4.html",
"foo-ä.html",
),
(
"Content-Type: application/x-stuff; title*=iso-8859-1''foo-%E4.html",
"foo-ä.html",
),
)
for raw_line, expected_title in test_data:
parsed = parse_header_parameters(raw_line)
self.assertEqual(parsed[1]["title"], expected_title)
def test_rfc2231_wrong_title(self):
"""
Test wrongly formatted RFC 2231 headers (missing double single quotes).
Parsing should not crash (#24209).
"""
test_data = (
(
"Content-Type: application/x-stuff; "
"title*='This%20is%20%2A%2A%2Afun%2A%2A%2A",
"'This%20is%20%2A%2A%2Afun%2A%2A%2A",
),
("Content-Type: application/x-stuff; title*='foo.html", "'foo.html"),
("Content-Type: application/x-stuff; title*=bar.html", "bar.html"),
)
for raw_line, expected_title in test_data:
parsed = parse_header_parameters(raw_line)
self.assertEqual(parsed[1]["title"], expected_title)
|
66ae6c5f1de3cfc5181ed4f6d453f4badde1ffdc20758cfedd892cc951d94136 | import pickle
from io import BytesIO
from itertools import chain
from urllib.parse import urlencode
from django.core.exceptions import DisallowedHost
from django.core.handlers.wsgi import LimitedStream, WSGIRequest
from django.http import (
HttpHeaders,
HttpRequest,
RawPostDataException,
UnreadablePostError,
)
from django.http.multipartparser import MultiPartParserError
from django.http.request import split_domain_port
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.client import FakePayload
class RequestsTests(SimpleTestCase):
def test_httprequest(self):
request = HttpRequest()
self.assertEqual(list(request.GET), [])
self.assertEqual(list(request.POST), [])
self.assertEqual(list(request.COOKIES), [])
self.assertEqual(list(request.META), [])
# .GET and .POST should be QueryDicts
self.assertEqual(request.GET.urlencode(), "")
self.assertEqual(request.POST.urlencode(), "")
# and FILES should be MultiValueDict
self.assertEqual(request.FILES.getlist("foo"), [])
self.assertIsNone(request.content_type)
self.assertIsNone(request.content_params)
def test_httprequest_full_path(self):
request = HttpRequest()
request.path = "/;some/?awful/=path/foo:bar/"
request.path_info = "/prefix" + request.path
request.META["QUERY_STRING"] = ";some=query&+query=string"
expected = "/%3Bsome/%3Fawful/%3Dpath/foo:bar/?;some=query&+query=string"
self.assertEqual(request.get_full_path(), expected)
self.assertEqual(request.get_full_path_info(), "/prefix" + expected)
def test_httprequest_full_path_with_query_string_and_fragment(self):
request = HttpRequest()
request.path = "/foo#bar"
request.path_info = "/prefix" + request.path
request.META["QUERY_STRING"] = "baz#quux"
self.assertEqual(request.get_full_path(), "/foo%23bar?baz#quux")
self.assertEqual(request.get_full_path_info(), "/prefix/foo%23bar?baz#quux")
def test_httprequest_repr(self):
request = HttpRequest()
request.path = "/somepath/"
request.method = "GET"
request.GET = {"get-key": "get-value"}
request.POST = {"post-key": "post-value"}
request.COOKIES = {"post-key": "post-value"}
request.META = {"post-key": "post-value"}
self.assertEqual(repr(request), "<HttpRequest: GET '/somepath/'>")
def test_httprequest_repr_invalid_method_and_path(self):
request = HttpRequest()
self.assertEqual(repr(request), "<HttpRequest>")
request = HttpRequest()
request.method = "GET"
self.assertEqual(repr(request), "<HttpRequest>")
request = HttpRequest()
request.path = ""
self.assertEqual(repr(request), "<HttpRequest>")
def test_wsgirequest(self):
request = WSGIRequest(
{
"PATH_INFO": "bogus",
"REQUEST_METHOD": "bogus",
"CONTENT_TYPE": "text/html; charset=utf8",
"wsgi.input": BytesIO(b""),
}
)
self.assertEqual(list(request.GET), [])
self.assertEqual(list(request.POST), [])
self.assertEqual(list(request.COOKIES), [])
self.assertEqual(
set(request.META),
{
"PATH_INFO",
"REQUEST_METHOD",
"SCRIPT_NAME",
"CONTENT_TYPE",
"wsgi.input",
},
)
self.assertEqual(request.META["PATH_INFO"], "bogus")
self.assertEqual(request.META["REQUEST_METHOD"], "bogus")
self.assertEqual(request.META["SCRIPT_NAME"], "")
self.assertEqual(request.content_type, "text/html")
self.assertEqual(request.content_params, {"charset": "utf8"})
def test_wsgirequest_with_script_name(self):
"""
The request's path is correctly assembled, regardless of whether or
not the SCRIPT_NAME has a trailing slash (#20169).
"""
# With trailing slash
request = WSGIRequest(
{
"PATH_INFO": "/somepath/",
"SCRIPT_NAME": "/PREFIX/",
"REQUEST_METHOD": "get",
"wsgi.input": BytesIO(b""),
}
)
self.assertEqual(request.path, "/PREFIX/somepath/")
# Without trailing slash
request = WSGIRequest(
{
"PATH_INFO": "/somepath/",
"SCRIPT_NAME": "/PREFIX",
"REQUEST_METHOD": "get",
"wsgi.input": BytesIO(b""),
}
)
self.assertEqual(request.path, "/PREFIX/somepath/")
def test_wsgirequest_script_url_double_slashes(self):
"""
WSGI squashes multiple successive slashes in PATH_INFO, WSGIRequest
should take that into account when populating request.path and
request.META['SCRIPT_NAME'] (#17133).
"""
request = WSGIRequest(
{
"SCRIPT_URL": "/mst/milestones//accounts/login//help",
"PATH_INFO": "/milestones/accounts/login/help",
"REQUEST_METHOD": "get",
"wsgi.input": BytesIO(b""),
}
)
self.assertEqual(request.path, "/mst/milestones/accounts/login/help")
self.assertEqual(request.META["SCRIPT_NAME"], "/mst")
def test_wsgirequest_with_force_script_name(self):
"""
The FORCE_SCRIPT_NAME setting takes precedence over the request's
SCRIPT_NAME environment parameter (#20169).
"""
with override_settings(FORCE_SCRIPT_NAME="/FORCED_PREFIX/"):
request = WSGIRequest(
{
"PATH_INFO": "/somepath/",
"SCRIPT_NAME": "/PREFIX/",
"REQUEST_METHOD": "get",
"wsgi.input": BytesIO(b""),
}
)
self.assertEqual(request.path, "/FORCED_PREFIX/somepath/")
def test_wsgirequest_path_with_force_script_name_trailing_slash(self):
"""
The request's path is correctly assembled, regardless of whether or not
the FORCE_SCRIPT_NAME setting has a trailing slash (#20169).
"""
# With trailing slash
with override_settings(FORCE_SCRIPT_NAME="/FORCED_PREFIX/"):
request = WSGIRequest(
{
"PATH_INFO": "/somepath/",
"REQUEST_METHOD": "get",
"wsgi.input": BytesIO(b""),
}
)
self.assertEqual(request.path, "/FORCED_PREFIX/somepath/")
# Without trailing slash
with override_settings(FORCE_SCRIPT_NAME="/FORCED_PREFIX"):
request = WSGIRequest(
{
"PATH_INFO": "/somepath/",
"REQUEST_METHOD": "get",
"wsgi.input": BytesIO(b""),
}
)
self.assertEqual(request.path, "/FORCED_PREFIX/somepath/")
def test_wsgirequest_repr(self):
request = WSGIRequest({"REQUEST_METHOD": "get", "wsgi.input": BytesIO(b"")})
self.assertEqual(repr(request), "<WSGIRequest: GET '/'>")
request = WSGIRequest(
{
"PATH_INFO": "/somepath/",
"REQUEST_METHOD": "get",
"wsgi.input": BytesIO(b""),
}
)
request.GET = {"get-key": "get-value"}
request.POST = {"post-key": "post-value"}
request.COOKIES = {"post-key": "post-value"}
request.META = {"post-key": "post-value"}
self.assertEqual(repr(request), "<WSGIRequest: GET '/somepath/'>")
def test_wsgirequest_path_info(self):
def wsgi_str(path_info, encoding="utf-8"):
path_info = path_info.encode(
encoding
) # Actual URL sent by the browser (bytestring)
path_info = path_info.decode(
"iso-8859-1"
) # Value in the WSGI environ dict (native string)
return path_info
# Regression for #19468
request = WSGIRequest(
{
"PATH_INFO": wsgi_str("/سلام/"),
"REQUEST_METHOD": "get",
"wsgi.input": BytesIO(b""),
}
)
self.assertEqual(request.path, "/سلام/")
# The URL may be incorrectly encoded in a non-UTF-8 encoding (#26971)
request = WSGIRequest(
{
"PATH_INFO": wsgi_str("/café/", encoding="iso-8859-1"),
"REQUEST_METHOD": "get",
"wsgi.input": BytesIO(b""),
}
)
# Since it's impossible to decide the (wrong) encoding of the URL, it's
# left percent-encoded in the path.
self.assertEqual(request.path, "/caf%E9/")
def test_limited_stream(self):
# Read all of a limited stream
stream = LimitedStream(BytesIO(b"test"), 2)
self.assertEqual(stream.read(), b"te")
# Reading again returns nothing.
self.assertEqual(stream.read(), b"")
# Read a number of characters greater than the stream has to offer
stream = LimitedStream(BytesIO(b"test"), 2)
self.assertEqual(stream.read(5), b"te")
# Reading again returns nothing.
self.assertEqual(stream.readline(5), b"")
# Read sequentially from a stream
stream = LimitedStream(BytesIO(b"12345678"), 8)
self.assertEqual(stream.read(5), b"12345")
self.assertEqual(stream.read(5), b"678")
# Reading again returns nothing.
self.assertEqual(stream.readline(5), b"")
# Read lines from a stream
stream = LimitedStream(BytesIO(b"1234\n5678\nabcd\nefgh\nijkl"), 24)
# Read a full line, unconditionally
self.assertEqual(stream.readline(), b"1234\n")
# Read a number of characters less than a line
self.assertEqual(stream.readline(2), b"56")
# Read the rest of the partial line
self.assertEqual(stream.readline(), b"78\n")
# Read a full line, with a character limit greater than the line length
self.assertEqual(stream.readline(6), b"abcd\n")
# Read the next line, deliberately terminated at the line end
self.assertEqual(stream.readline(4), b"efgh")
# Read the next line... just the line end
self.assertEqual(stream.readline(), b"\n")
# Read everything else.
self.assertEqual(stream.readline(), b"ijkl")
# Regression for #15018
# If a stream contains a newline, but the provided length
# is less than the number of provided characters, the newline
# doesn't reset the available character count
stream = LimitedStream(BytesIO(b"1234\nabcdef"), 9)
self.assertEqual(stream.readline(10), b"1234\n")
self.assertEqual(stream.readline(3), b"abc")
# Now expire the available characters
self.assertEqual(stream.readline(3), b"d")
# Reading again returns nothing.
self.assertEqual(stream.readline(2), b"")
# Same test, but with read, not readline.
stream = LimitedStream(BytesIO(b"1234\nabcdef"), 9)
self.assertEqual(stream.read(6), b"1234\na")
self.assertEqual(stream.read(2), b"bc")
self.assertEqual(stream.read(2), b"d")
self.assertEqual(stream.read(2), b"")
self.assertEqual(stream.read(), b"")
def test_stream(self):
payload = FakePayload("name=value")
request = WSGIRequest(
{
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": "application/x-www-form-urlencoded",
"CONTENT_LENGTH": len(payload),
"wsgi.input": payload,
},
)
self.assertEqual(request.read(), b"name=value")
def test_read_after_value(self):
"""
Reading from request is allowed after accessing request contents as
POST or body.
"""
payload = FakePayload("name=value")
request = WSGIRequest(
{
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": "application/x-www-form-urlencoded",
"CONTENT_LENGTH": len(payload),
"wsgi.input": payload,
}
)
self.assertEqual(request.POST, {"name": ["value"]})
self.assertEqual(request.body, b"name=value")
self.assertEqual(request.read(), b"name=value")
def test_value_after_read(self):
"""
Construction of POST or body is not allowed after reading
from request.
"""
payload = FakePayload("name=value")
request = WSGIRequest(
{
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": "application/x-www-form-urlencoded",
"CONTENT_LENGTH": len(payload),
"wsgi.input": payload,
}
)
self.assertEqual(request.read(2), b"na")
with self.assertRaises(RawPostDataException):
request.body
self.assertEqual(request.POST, {})
def test_non_ascii_POST(self):
payload = FakePayload(urlencode({"key": "España"}))
request = WSGIRequest(
{
"REQUEST_METHOD": "POST",
"CONTENT_LENGTH": len(payload),
"CONTENT_TYPE": "application/x-www-form-urlencoded",
"wsgi.input": payload,
}
)
self.assertEqual(request.POST, {"key": ["España"]})
def test_alternate_charset_POST(self):
"""
Test a POST with non-utf-8 payload encoding.
"""
payload = FakePayload(urlencode({"key": "España".encode("latin-1")}))
request = WSGIRequest(
{
"REQUEST_METHOD": "POST",
"CONTENT_LENGTH": len(payload),
"CONTENT_TYPE": "application/x-www-form-urlencoded; charset=iso-8859-1",
"wsgi.input": payload,
}
)
self.assertEqual(request.POST, {"key": ["España"]})
def test_body_after_POST_multipart_form_data(self):
"""
Reading body after parsing multipart/form-data is not allowed
"""
# Because multipart is used for large amounts of data i.e. file uploads,
# we don't want the data held in memory twice, and we don't want to
# silence the error by setting body = '' either.
payload = FakePayload(
"\r\n".join(
[
"--boundary",
'Content-Disposition: form-data; name="name"',
"",
"value",
"--boundary--",
]
)
)
request = WSGIRequest(
{
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": "multipart/form-data; boundary=boundary",
"CONTENT_LENGTH": len(payload),
"wsgi.input": payload,
}
)
self.assertEqual(request.POST, {"name": ["value"]})
with self.assertRaises(RawPostDataException):
request.body
def test_body_after_POST_multipart_related(self):
"""
Reading body after parsing multipart that isn't form-data is allowed
"""
# Ticket #9054
# There are cases in which the multipart data is related instead of
# being a binary upload, in which case it should still be accessible
# via body.
payload_data = b"\r\n".join(
[
b"--boundary",
b'Content-ID: id; name="name"',
b"",
b"value",
b"--boundary--",
]
)
payload = FakePayload(payload_data)
request = WSGIRequest(
{
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": "multipart/related; boundary=boundary",
"CONTENT_LENGTH": len(payload),
"wsgi.input": payload,
}
)
self.assertEqual(request.POST, {})
self.assertEqual(request.body, payload_data)
def test_POST_multipart_with_content_length_zero(self):
"""
Multipart POST requests with Content-Length >= 0 are valid and need to
be handled.
"""
# According to RFC 9110 Section 8.6 every POST with Content-Length >= 0
# is a valid request, so ensure that we handle Content-Length == 0.
payload = FakePayload(
"\r\n".join(
[
"--boundary",
'Content-Disposition: form-data; name="name"',
"",
"value",
"--boundary--",
]
)
)
request = WSGIRequest(
{
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": "multipart/form-data; boundary=boundary",
"CONTENT_LENGTH": 0,
"wsgi.input": payload,
}
)
self.assertEqual(request.POST, {})
def test_POST_binary_only(self):
payload = b"\r\n\x01\x00\x00\x00ab\x00\x00\xcd\xcc,@"
environ = {
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": "application/octet-stream",
"CONTENT_LENGTH": len(payload),
"wsgi.input": BytesIO(payload),
}
request = WSGIRequest(environ)
self.assertEqual(request.POST, {})
self.assertEqual(request.FILES, {})
self.assertEqual(request.body, payload)
# Same test without specifying content-type
environ.update({"CONTENT_TYPE": "", "wsgi.input": BytesIO(payload)})
request = WSGIRequest(environ)
self.assertEqual(request.POST, {})
self.assertEqual(request.FILES, {})
self.assertEqual(request.body, payload)
def test_read_by_lines(self):
payload = FakePayload("name=value")
request = WSGIRequest(
{
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": "application/x-www-form-urlencoded",
"CONTENT_LENGTH": len(payload),
"wsgi.input": payload,
}
)
self.assertEqual(list(request), [b"name=value"])
def test_POST_after_body_read(self):
"""
POST should be populated even if body is read first
"""
payload = FakePayload("name=value")
request = WSGIRequest(
{
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": "application/x-www-form-urlencoded",
"CONTENT_LENGTH": len(payload),
"wsgi.input": payload,
}
)
request.body # evaluate
self.assertEqual(request.POST, {"name": ["value"]})
def test_POST_after_body_read_and_stream_read(self):
"""
POST should be populated even if body is read first, and then
the stream is read second.
"""
payload = FakePayload("name=value")
request = WSGIRequest(
{
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": "application/x-www-form-urlencoded",
"CONTENT_LENGTH": len(payload),
"wsgi.input": payload,
}
)
request.body # evaluate
self.assertEqual(request.read(1), b"n")
self.assertEqual(request.POST, {"name": ["value"]})
def test_POST_after_body_read_and_stream_read_multipart(self):
"""
POST should be populated even if body is read first, and then
the stream is read second. Using multipart/form-data instead of urlencoded.
"""
payload = FakePayload(
"\r\n".join(
[
"--boundary",
'Content-Disposition: form-data; name="name"',
"",
"value",
"--boundary--" "",
]
)
)
request = WSGIRequest(
{
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": "multipart/form-data; boundary=boundary",
"CONTENT_LENGTH": len(payload),
"wsgi.input": payload,
}
)
request.body # evaluate
# Consume enough data to mess up the parsing:
self.assertEqual(request.read(13), b"--boundary\r\nC")
self.assertEqual(request.POST, {"name": ["value"]})
def test_POST_immutable_for_multipart(self):
"""
MultiPartParser.parse() leaves request.POST immutable.
"""
payload = FakePayload(
"\r\n".join(
[
"--boundary",
'Content-Disposition: form-data; name="name"',
"",
"value",
"--boundary--",
]
)
)
request = WSGIRequest(
{
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": "multipart/form-data; boundary=boundary",
"CONTENT_LENGTH": len(payload),
"wsgi.input": payload,
}
)
self.assertFalse(request.POST._mutable)
def test_multipart_without_boundary(self):
request = WSGIRequest(
{
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": "multipart/form-data;",
"CONTENT_LENGTH": 0,
"wsgi.input": FakePayload(),
}
)
with self.assertRaisesMessage(
MultiPartParserError, "Invalid boundary in multipart: None"
):
request.POST
def test_multipart_non_ascii_content_type(self):
request = WSGIRequest(
{
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": "multipart/form-data; boundary = \xe0",
"CONTENT_LENGTH": 0,
"wsgi.input": FakePayload(),
}
)
msg = (
"Invalid non-ASCII Content-Type in multipart: multipart/form-data; "
"boundary = à"
)
with self.assertRaisesMessage(MultiPartParserError, msg):
request.POST
def test_POST_connection_error(self):
"""
If wsgi.input.read() raises an exception while trying to read() the
POST, the exception is identifiable (not a generic OSError).
"""
class ExplodingBytesIO(BytesIO):
def read(self, len=0):
raise OSError("kaboom!")
payload = b"name=value"
request = WSGIRequest(
{
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": "application/x-www-form-urlencoded",
"CONTENT_LENGTH": len(payload),
"wsgi.input": ExplodingBytesIO(payload),
}
)
with self.assertRaises(UnreadablePostError):
request.body
def test_set_encoding_clears_POST(self):
payload = FakePayload("name=Hello Günter")
request = WSGIRequest(
{
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": "application/x-www-form-urlencoded",
"CONTENT_LENGTH": len(payload),
"wsgi.input": payload,
}
)
self.assertEqual(request.POST, {"name": ["Hello Günter"]})
request.encoding = "iso-8859-16"
self.assertEqual(request.POST, {"name": ["Hello GĂŒnter"]})
def test_set_encoding_clears_GET(self):
request = WSGIRequest(
{
"REQUEST_METHOD": "GET",
"wsgi.input": "",
"QUERY_STRING": "name=Hello%20G%C3%BCnter",
}
)
self.assertEqual(request.GET, {"name": ["Hello Günter"]})
request.encoding = "iso-8859-16"
self.assertEqual(request.GET, {"name": ["Hello G\u0102\u0152nter"]})
def test_FILES_connection_error(self):
"""
If wsgi.input.read() raises an exception while trying to read() the
FILES, the exception is identifiable (not a generic OSError).
"""
class ExplodingBytesIO(BytesIO):
def read(self, len=0):
raise OSError("kaboom!")
payload = b"x"
request = WSGIRequest(
{
"REQUEST_METHOD": "POST",
"CONTENT_TYPE": "multipart/form-data; boundary=foo_",
"CONTENT_LENGTH": len(payload),
"wsgi.input": ExplodingBytesIO(payload),
}
)
with self.assertRaises(UnreadablePostError):
request.FILES
def test_pickling_request(self):
request = HttpRequest()
request.method = "GET"
request.path = "/testpath/"
request.META = {
"QUERY_STRING": ";some=query&+query=string",
"SERVER_NAME": "example.com",
"SERVER_PORT": 80,
}
request.COOKIES = {"post-key": "post-value"}
dump = pickle.dumps(request)
request_from_pickle = pickle.loads(dump)
self.assertEqual(repr(request), repr(request_from_pickle))
class HostValidationTests(SimpleTestCase):
poisoned_hosts = [
"[email protected]",
"example.com:[email protected]",
"example.com:[email protected]:80",
"example.com:80/badpath",
"example.com: recovermypassword.com",
]
@override_settings(
USE_X_FORWARDED_HOST=False,
ALLOWED_HOSTS=[
"forward.com",
"example.com",
"internal.com",
"12.34.56.78",
"[2001:19f0:feee::dead:beef:cafe]",
"xn--4ca9at.com",
".multitenant.com",
"INSENSITIVE.com",
"[::ffff:169.254.169.254]",
],
)
def test_http_get_host(self):
# Check if X_FORWARDED_HOST is provided.
request = HttpRequest()
request.META = {
"HTTP_X_FORWARDED_HOST": "forward.com",
"HTTP_HOST": "example.com",
"SERVER_NAME": "internal.com",
"SERVER_PORT": 80,
}
# X_FORWARDED_HOST is ignored.
self.assertEqual(request.get_host(), "example.com")
# Check if X_FORWARDED_HOST isn't provided.
request = HttpRequest()
request.META = {
"HTTP_HOST": "example.com",
"SERVER_NAME": "internal.com",
"SERVER_PORT": 80,
}
self.assertEqual(request.get_host(), "example.com")
# Check if HTTP_HOST isn't provided.
request = HttpRequest()
request.META = {
"SERVER_NAME": "internal.com",
"SERVER_PORT": 80,
}
self.assertEqual(request.get_host(), "internal.com")
# Check if HTTP_HOST isn't provided, and we're on a nonstandard port
request = HttpRequest()
request.META = {
"SERVER_NAME": "internal.com",
"SERVER_PORT": 8042,
}
self.assertEqual(request.get_host(), "internal.com:8042")
legit_hosts = [
"example.com",
"example.com:80",
"12.34.56.78",
"12.34.56.78:443",
"[2001:19f0:feee::dead:beef:cafe]",
"[2001:19f0:feee::dead:beef:cafe]:8080",
"xn--4ca9at.com", # Punycode for öäü.com
"anything.multitenant.com",
"multitenant.com",
"insensitive.com",
"example.com.",
"example.com.:80",
"[::ffff:169.254.169.254]",
]
for host in legit_hosts:
request = HttpRequest()
request.META = {
"HTTP_HOST": host,
}
request.get_host()
# Poisoned host headers are rejected as suspicious
for host in chain(self.poisoned_hosts, ["other.com", "example.com.."]):
with self.assertRaises(DisallowedHost):
request = HttpRequest()
request.META = {
"HTTP_HOST": host,
}
request.get_host()
@override_settings(USE_X_FORWARDED_HOST=True, ALLOWED_HOSTS=["*"])
def test_http_get_host_with_x_forwarded_host(self):
# Check if X_FORWARDED_HOST is provided.
request = HttpRequest()
request.META = {
"HTTP_X_FORWARDED_HOST": "forward.com",
"HTTP_HOST": "example.com",
"SERVER_NAME": "internal.com",
"SERVER_PORT": 80,
}
# X_FORWARDED_HOST is obeyed.
self.assertEqual(request.get_host(), "forward.com")
# Check if X_FORWARDED_HOST isn't provided.
request = HttpRequest()
request.META = {
"HTTP_HOST": "example.com",
"SERVER_NAME": "internal.com",
"SERVER_PORT": 80,
}
self.assertEqual(request.get_host(), "example.com")
# Check if HTTP_HOST isn't provided.
request = HttpRequest()
request.META = {
"SERVER_NAME": "internal.com",
"SERVER_PORT": 80,
}
self.assertEqual(request.get_host(), "internal.com")
# Check if HTTP_HOST isn't provided, and we're on a nonstandard port
request = HttpRequest()
request.META = {
"SERVER_NAME": "internal.com",
"SERVER_PORT": 8042,
}
self.assertEqual(request.get_host(), "internal.com:8042")
# Poisoned host headers are rejected as suspicious
legit_hosts = [
"example.com",
"example.com:80",
"12.34.56.78",
"12.34.56.78:443",
"[2001:19f0:feee::dead:beef:cafe]",
"[2001:19f0:feee::dead:beef:cafe]:8080",
"xn--4ca9at.com", # Punycode for öäü.com
]
for host in legit_hosts:
request = HttpRequest()
request.META = {
"HTTP_HOST": host,
}
request.get_host()
for host in self.poisoned_hosts:
with self.assertRaises(DisallowedHost):
request = HttpRequest()
request.META = {
"HTTP_HOST": host,
}
request.get_host()
@override_settings(USE_X_FORWARDED_PORT=False)
def test_get_port(self):
request = HttpRequest()
request.META = {
"SERVER_PORT": "8080",
"HTTP_X_FORWARDED_PORT": "80",
}
# Shouldn't use the X-Forwarded-Port header
self.assertEqual(request.get_port(), "8080")
request = HttpRequest()
request.META = {
"SERVER_PORT": "8080",
}
self.assertEqual(request.get_port(), "8080")
@override_settings(USE_X_FORWARDED_PORT=True)
def test_get_port_with_x_forwarded_port(self):
request = HttpRequest()
request.META = {
"SERVER_PORT": "8080",
"HTTP_X_FORWARDED_PORT": "80",
}
# Should use the X-Forwarded-Port header
self.assertEqual(request.get_port(), "80")
request = HttpRequest()
request.META = {
"SERVER_PORT": "8080",
}
self.assertEqual(request.get_port(), "8080")
@override_settings(DEBUG=True, ALLOWED_HOSTS=[])
def test_host_validation_in_debug_mode(self):
"""
If ALLOWED_HOSTS is empty and DEBUG is True, variants of localhost are
allowed.
"""
valid_hosts = ["localhost", "subdomain.localhost", "127.0.0.1", "[::1]"]
for host in valid_hosts:
request = HttpRequest()
request.META = {"HTTP_HOST": host}
self.assertEqual(request.get_host(), host)
# Other hostnames raise a DisallowedHost.
with self.assertRaises(DisallowedHost):
request = HttpRequest()
request.META = {"HTTP_HOST": "example.com"}
request.get_host()
@override_settings(ALLOWED_HOSTS=[])
def test_get_host_suggestion_of_allowed_host(self):
"""
get_host() makes helpful suggestions if a valid-looking host is not in
ALLOWED_HOSTS.
"""
msg_invalid_host = "Invalid HTTP_HOST header: %r."
msg_suggestion = msg_invalid_host + " You may need to add %r to ALLOWED_HOSTS."
msg_suggestion2 = (
msg_invalid_host
+ " The domain name provided is not valid according to RFC 1034/1035"
)
for host in [ # Valid-looking hosts
"example.com",
"12.34.56.78",
"[2001:19f0:feee::dead:beef:cafe]",
"xn--4ca9at.com", # Punycode for öäü.com
]:
request = HttpRequest()
request.META = {"HTTP_HOST": host}
with self.assertRaisesMessage(
DisallowedHost, msg_suggestion % (host, host)
):
request.get_host()
for domain, port in [ # Valid-looking hosts with a port number
("example.com", 80),
("12.34.56.78", 443),
("[2001:19f0:feee::dead:beef:cafe]", 8080),
]:
host = "%s:%s" % (domain, port)
request = HttpRequest()
request.META = {"HTTP_HOST": host}
with self.assertRaisesMessage(
DisallowedHost, msg_suggestion % (host, domain)
):
request.get_host()
for host in self.poisoned_hosts:
request = HttpRequest()
request.META = {"HTTP_HOST": host}
with self.assertRaisesMessage(DisallowedHost, msg_invalid_host % host):
request.get_host()
request = HttpRequest()
request.META = {"HTTP_HOST": "invalid_hostname.com"}
with self.assertRaisesMessage(
DisallowedHost, msg_suggestion2 % "invalid_hostname.com"
):
request.get_host()
def test_split_domain_port_removes_trailing_dot(self):
domain, port = split_domain_port("example.com.:8080")
self.assertEqual(domain, "example.com")
self.assertEqual(port, "8080")
class BuildAbsoluteURITests(SimpleTestCase):
factory = RequestFactory()
def test_absolute_url(self):
request = HttpRequest()
url = "https://www.example.com/asdf"
self.assertEqual(request.build_absolute_uri(location=url), url)
def test_host_retrieval(self):
request = HttpRequest()
request.get_host = lambda: "www.example.com"
request.path = ""
self.assertEqual(
request.build_absolute_uri(location="/path/with:colons"),
"http://www.example.com/path/with:colons",
)
def test_request_path_begins_with_two_slashes(self):
# //// creates a request with a path beginning with //
request = self.factory.get("////absolute-uri")
tests = (
# location isn't provided
(None, "http://testserver//absolute-uri"),
# An absolute URL
("http://example.com/?foo=bar", "http://example.com/?foo=bar"),
# A schema-relative URL
("//example.com/?foo=bar", "http://example.com/?foo=bar"),
# Relative URLs
("/foo/bar/", "http://testserver/foo/bar/"),
("/foo/./bar/", "http://testserver/foo/bar/"),
("/foo/../bar/", "http://testserver/bar/"),
("///foo/bar/", "http://testserver/foo/bar/"),
)
for location, expected_url in tests:
with self.subTest(location=location):
self.assertEqual(
request.build_absolute_uri(location=location), expected_url
)
class RequestHeadersTests(SimpleTestCase):
ENVIRON = {
# Non-headers are ignored.
"PATH_INFO": "/somepath/",
"REQUEST_METHOD": "get",
"wsgi.input": BytesIO(b""),
"SERVER_NAME": "internal.com",
"SERVER_PORT": 80,
# These non-HTTP prefixed headers are included.
"CONTENT_TYPE": "text/html",
"CONTENT_LENGTH": "100",
# All HTTP-prefixed headers are included.
"HTTP_ACCEPT": "*",
"HTTP_HOST": "example.com",
"HTTP_USER_AGENT": "python-requests/1.2.0",
}
def test_base_request_headers(self):
request = HttpRequest()
request.META = self.ENVIRON
self.assertEqual(
dict(request.headers),
{
"Content-Type": "text/html",
"Content-Length": "100",
"Accept": "*",
"Host": "example.com",
"User-Agent": "python-requests/1.2.0",
},
)
def test_wsgi_request_headers(self):
request = WSGIRequest(self.ENVIRON)
self.assertEqual(
dict(request.headers),
{
"Content-Type": "text/html",
"Content-Length": "100",
"Accept": "*",
"Host": "example.com",
"User-Agent": "python-requests/1.2.0",
},
)
def test_wsgi_request_headers_getitem(self):
request = WSGIRequest(self.ENVIRON)
self.assertEqual(request.headers["User-Agent"], "python-requests/1.2.0")
self.assertEqual(request.headers["user-agent"], "python-requests/1.2.0")
self.assertEqual(request.headers["user_agent"], "python-requests/1.2.0")
self.assertEqual(request.headers["Content-Type"], "text/html")
self.assertEqual(request.headers["Content-Length"], "100")
def test_wsgi_request_headers_get(self):
request = WSGIRequest(self.ENVIRON)
self.assertEqual(request.headers.get("User-Agent"), "python-requests/1.2.0")
self.assertEqual(request.headers.get("user-agent"), "python-requests/1.2.0")
self.assertEqual(request.headers.get("Content-Type"), "text/html")
self.assertEqual(request.headers.get("Content-Length"), "100")
class HttpHeadersTests(SimpleTestCase):
def test_basic(self):
environ = {
"CONTENT_TYPE": "text/html",
"CONTENT_LENGTH": "100",
"HTTP_HOST": "example.com",
}
headers = HttpHeaders(environ)
self.assertEqual(sorted(headers), ["Content-Length", "Content-Type", "Host"])
self.assertEqual(
headers,
{
"Content-Type": "text/html",
"Content-Length": "100",
"Host": "example.com",
},
)
def test_parse_header_name(self):
tests = (
("PATH_INFO", None),
("HTTP_ACCEPT", "Accept"),
("HTTP_USER_AGENT", "User-Agent"),
("HTTP_X_FORWARDED_PROTO", "X-Forwarded-Proto"),
("CONTENT_TYPE", "Content-Type"),
("CONTENT_LENGTH", "Content-Length"),
)
for header, expected in tests:
with self.subTest(header=header):
self.assertEqual(HttpHeaders.parse_header_name(header), expected)
|
56081fcbf476615c1bcc180c1f2b6d1dc32d5c8e360dbe471a7798e334ea1002 | import operator
from django.db import DatabaseError, NotSupportedError, connection
from django.db.models import Exists, F, IntegerField, OuterRef, Subquery, Value
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import CaptureQueriesContext
from .models import Author, Celebrity, ExtraInfo, Number, ReservedName
@skipUnlessDBFeature("supports_select_union")
class QuerySetSetOperationTests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.bulk_create(Number(num=i, other_num=10 - i) for i in range(10))
def assertNumbersEqual(self, queryset, expected_numbers, ordered=True):
self.assertQuerySetEqual(
queryset, expected_numbers, operator.attrgetter("num"), ordered
)
def test_simple_union(self):
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=8)
qs3 = Number.objects.filter(num=5)
self.assertNumbersEqual(qs1.union(qs2, qs3), [0, 1, 5, 8, 9], ordered=False)
@skipUnlessDBFeature("supports_select_intersection")
def test_simple_intersection(self):
qs1 = Number.objects.filter(num__lte=5)
qs2 = Number.objects.filter(num__gte=5)
qs3 = Number.objects.filter(num__gte=4, num__lte=6)
self.assertNumbersEqual(qs1.intersection(qs2, qs3), [5], ordered=False)
@skipUnlessDBFeature("supports_select_intersection")
def test_intersection_with_values(self):
ReservedName.objects.create(name="a", order=2)
qs1 = ReservedName.objects.all()
reserved_name = qs1.intersection(qs1).values("name", "order", "id").get()
self.assertEqual(reserved_name["name"], "a")
self.assertEqual(reserved_name["order"], 2)
reserved_name = qs1.intersection(qs1).values_list("name", "order", "id").get()
self.assertEqual(reserved_name[:2], ("a", 2))
@skipUnlessDBFeature("supports_select_difference")
def test_simple_difference(self):
qs1 = Number.objects.filter(num__lte=5)
qs2 = Number.objects.filter(num__lte=4)
self.assertNumbersEqual(qs1.difference(qs2), [5], ordered=False)
def test_union_distinct(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
self.assertEqual(len(list(qs1.union(qs2, all=True))), 20)
self.assertEqual(len(list(qs1.union(qs2))), 10)
def test_union_none(self):
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=8)
qs3 = qs1.union(qs2)
self.assertSequenceEqual(qs3.none(), [])
self.assertNumbersEqual(qs3, [0, 1, 8, 9], ordered=False)
def test_union_none_slice(self):
qs1 = Number.objects.filter(num__lte=0)
qs2 = Number.objects.none()
qs3 = qs1.union(qs2)
self.assertNumbersEqual(qs3[:1], [0])
def test_union_empty_filter_slice(self):
qs1 = Number.objects.filter(num__lte=0)
qs2 = Number.objects.filter(pk__in=[])
qs3 = qs1.union(qs2)
self.assertNumbersEqual(qs3[:1], [0])
@skipUnlessDBFeature("supports_slicing_ordering_in_compound")
def test_union_slice_compound_empty(self):
qs1 = Number.objects.filter(num__lte=0)[:1]
qs2 = Number.objects.none()
qs3 = qs1.union(qs2)
self.assertNumbersEqual(qs3[:1], [0])
@skipUnlessDBFeature("supports_slicing_ordering_in_compound")
def test_union_combined_slice_compound_empty(self):
qs1 = Number.objects.filter(num__lte=2)[:3]
qs2 = Number.objects.none()
qs3 = qs1.union(qs2)
self.assertNumbersEqual(qs3.order_by("num")[2:3], [2])
def test_union_order_with_null_first_last(self):
Number.objects.filter(other_num=5).update(other_num=None)
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=2)
qs3 = qs1.union(qs2)
self.assertSequenceEqual(
qs3.order_by(
F("other_num").asc(nulls_first=True),
).values_list("other_num", flat=True),
[None, 1, 2, 3, 4, 6, 7, 8, 9, 10],
)
self.assertSequenceEqual(
qs3.order_by(
F("other_num").asc(nulls_last=True),
).values_list("other_num", flat=True),
[1, 2, 3, 4, 6, 7, 8, 9, 10, None],
)
@skipUnlessDBFeature("supports_select_intersection")
def test_intersection_with_empty_qs(self):
qs1 = Number.objects.all()
qs2 = Number.objects.none()
qs3 = Number.objects.filter(pk__in=[])
self.assertEqual(len(qs1.intersection(qs2)), 0)
self.assertEqual(len(qs1.intersection(qs3)), 0)
self.assertEqual(len(qs2.intersection(qs1)), 0)
self.assertEqual(len(qs3.intersection(qs1)), 0)
self.assertEqual(len(qs2.intersection(qs2)), 0)
self.assertEqual(len(qs3.intersection(qs3)), 0)
@skipUnlessDBFeature("supports_select_difference")
def test_difference_with_empty_qs(self):
qs1 = Number.objects.all()
qs2 = Number.objects.none()
qs3 = Number.objects.filter(pk__in=[])
self.assertEqual(len(qs1.difference(qs2)), 10)
self.assertEqual(len(qs1.difference(qs3)), 10)
self.assertEqual(len(qs2.difference(qs1)), 0)
self.assertEqual(len(qs3.difference(qs1)), 0)
self.assertEqual(len(qs2.difference(qs2)), 0)
self.assertEqual(len(qs3.difference(qs3)), 0)
@skipUnlessDBFeature("supports_select_difference")
def test_difference_with_values(self):
ReservedName.objects.create(name="a", order=2)
qs1 = ReservedName.objects.all()
qs2 = ReservedName.objects.none()
reserved_name = qs1.difference(qs2).values("name", "order", "id").get()
self.assertEqual(reserved_name["name"], "a")
self.assertEqual(reserved_name["order"], 2)
reserved_name = qs1.difference(qs2).values_list("name", "order", "id").get()
self.assertEqual(reserved_name[:2], ("a", 2))
def test_union_with_empty_qs(self):
qs1 = Number.objects.all()
qs2 = Number.objects.none()
qs3 = Number.objects.filter(pk__in=[])
self.assertEqual(len(qs1.union(qs2)), 10)
self.assertEqual(len(qs2.union(qs1)), 10)
self.assertEqual(len(qs1.union(qs3)), 10)
self.assertEqual(len(qs3.union(qs1)), 10)
self.assertEqual(len(qs2.union(qs1, qs1, qs1)), 10)
self.assertEqual(len(qs2.union(qs1, qs1, all=True)), 20)
self.assertEqual(len(qs2.union(qs2)), 0)
self.assertEqual(len(qs3.union(qs3)), 0)
def test_empty_qs_union_with_ordered_qs(self):
qs1 = Number.objects.order_by("num")
qs2 = Number.objects.none().union(qs1).order_by("num")
self.assertEqual(list(qs1), list(qs2))
def test_limits(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
self.assertEqual(len(list(qs1.union(qs2)[:2])), 2)
def test_ordering(self):
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=2, num__lte=3)
self.assertNumbersEqual(qs1.union(qs2).order_by("-num"), [3, 2, 1, 0])
def test_ordering_by_alias(self):
qs1 = Number.objects.filter(num__lte=1).values(alias=F("num"))
qs2 = Number.objects.filter(num__gte=2, num__lte=3).values(alias=F("num"))
self.assertQuerySetEqual(
qs1.union(qs2).order_by("-alias"),
[3, 2, 1, 0],
operator.itemgetter("alias"),
)
def test_ordering_by_f_expression(self):
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=2, num__lte=3)
self.assertNumbersEqual(qs1.union(qs2).order_by(F("num").desc()), [3, 2, 1, 0])
def test_ordering_by_f_expression_and_alias(self):
qs1 = Number.objects.filter(num__lte=1).values(alias=F("other_num"))
qs2 = Number.objects.filter(num__gte=2, num__lte=3).values(alias=F("other_num"))
self.assertQuerySetEqual(
qs1.union(qs2).order_by(F("alias").desc()),
[10, 9, 8, 7],
operator.itemgetter("alias"),
)
Number.objects.create(num=-1)
self.assertQuerySetEqual(
qs1.union(qs2).order_by(F("alias").desc(nulls_last=True)),
[10, 9, 8, 7, None],
operator.itemgetter("alias"),
)
def test_union_with_values(self):
ReservedName.objects.create(name="a", order=2)
qs1 = ReservedName.objects.all()
reserved_name = qs1.union(qs1).values("name", "order", "id").get()
self.assertEqual(reserved_name["name"], "a")
self.assertEqual(reserved_name["order"], 2)
reserved_name = qs1.union(qs1).values_list("name", "order", "id").get()
self.assertEqual(reserved_name[:2], ("a", 2))
# List of columns can be changed.
reserved_name = qs1.union(qs1).values_list("order").get()
self.assertEqual(reserved_name, (2,))
def test_union_with_two_annotated_values_list(self):
qs1 = (
Number.objects.filter(num=1)
.annotate(
count=Value(0, IntegerField()),
)
.values_list("num", "count")
)
qs2 = (
Number.objects.filter(num=2)
.values("pk")
.annotate(
count=F("num"),
)
.annotate(
num=Value(1, IntegerField()),
)
.values_list("num", "count")
)
self.assertCountEqual(qs1.union(qs2), [(1, 0), (2, 1)])
def test_union_with_extra_and_values_list(self):
qs1 = (
Number.objects.filter(num=1)
.extra(
select={"count": 0},
)
.values_list("num", "count")
)
qs2 = Number.objects.filter(num=2).extra(select={"count": 1})
self.assertCountEqual(qs1.union(qs2), [(1, 0), (2, 1)])
def test_union_with_values_list_on_annotated_and_unannotated(self):
ReservedName.objects.create(name="rn1", order=1)
qs1 = Number.objects.annotate(
has_reserved_name=Exists(ReservedName.objects.filter(order=OuterRef("num")))
).filter(has_reserved_name=True)
qs2 = Number.objects.filter(num=9)
self.assertCountEqual(qs1.union(qs2).values_list("num", flat=True), [1, 9])
def test_union_with_values_list_and_order(self):
ReservedName.objects.bulk_create(
[
ReservedName(name="rn1", order=7),
ReservedName(name="rn2", order=5),
ReservedName(name="rn0", order=6),
ReservedName(name="rn9", order=-1),
]
)
qs1 = ReservedName.objects.filter(order__gte=6)
qs2 = ReservedName.objects.filter(order__lte=5)
union_qs = qs1.union(qs2)
for qs, expected_result in (
# Order by a single column.
(union_qs.order_by("-pk").values_list("order", flat=True), [-1, 6, 5, 7]),
(union_qs.order_by("pk").values_list("order", flat=True), [7, 5, 6, -1]),
(union_qs.values_list("order", flat=True).order_by("-pk"), [-1, 6, 5, 7]),
(union_qs.values_list("order", flat=True).order_by("pk"), [7, 5, 6, -1]),
# Order by multiple columns.
(
union_qs.order_by("-name", "pk").values_list("order", flat=True),
[-1, 5, 7, 6],
),
(
union_qs.values_list("order", flat=True).order_by("-name", "pk"),
[-1, 5, 7, 6],
),
):
with self.subTest(qs=qs):
self.assertEqual(list(qs), expected_result)
def test_union_with_values_list_and_order_on_annotation(self):
qs1 = Number.objects.annotate(
annotation=Value(-1),
multiplier=F("annotation"),
).filter(num__gte=6)
qs2 = Number.objects.annotate(
annotation=Value(2),
multiplier=F("annotation"),
).filter(num__lte=5)
self.assertSequenceEqual(
qs1.union(qs2).order_by("annotation", "num").values_list("num", flat=True),
[6, 7, 8, 9, 0, 1, 2, 3, 4, 5],
)
self.assertQuerySetEqual(
qs1.union(qs2)
.order_by(
F("annotation") * F("multiplier"),
"num",
)
.values("num"),
[6, 7, 8, 9, 0, 1, 2, 3, 4, 5],
operator.itemgetter("num"),
)
def test_union_with_select_related_and_order(self):
e1 = ExtraInfo.objects.create(value=7, info="e1")
a1 = Author.objects.create(name="a1", num=1, extra=e1)
a2 = Author.objects.create(name="a2", num=3, extra=e1)
Author.objects.create(name="a3", num=2, extra=e1)
base_qs = Author.objects.select_related("extra").order_by()
qs1 = base_qs.filter(name="a1")
qs2 = base_qs.filter(name="a2")
self.assertSequenceEqual(qs1.union(qs2).order_by("pk"), [a1, a2])
@skipUnlessDBFeature("supports_slicing_ordering_in_compound")
def test_union_with_select_related_and_first(self):
e1 = ExtraInfo.objects.create(value=7, info="e1")
a1 = Author.objects.create(name="a1", num=1, extra=e1)
Author.objects.create(name="a2", num=3, extra=e1)
base_qs = Author.objects.select_related("extra")
qs1 = base_qs.filter(name="a1")
qs2 = base_qs.filter(name="a2")
self.assertEqual(qs1.union(qs2).first(), a1)
def test_union_with_first(self):
e1 = ExtraInfo.objects.create(value=7, info="e1")
a1 = Author.objects.create(name="a1", num=1, extra=e1)
base_qs = Author.objects.order_by()
qs1 = base_qs.filter(name="a1")
qs2 = base_qs.filter(name="a2")
self.assertEqual(qs1.union(qs2).first(), a1)
def test_union_multiple_models_with_values_list_and_order(self):
reserved_name = ReservedName.objects.create(name="rn1", order=0)
qs1 = Celebrity.objects.all()
qs2 = ReservedName.objects.all()
self.assertSequenceEqual(
qs1.union(qs2).order_by("name").values_list("pk", flat=True),
[reserved_name.pk],
)
def test_union_multiple_models_with_values_list_and_order_by_extra_select(self):
reserved_name = ReservedName.objects.create(name="rn1", order=0)
qs1 = Celebrity.objects.extra(select={"extra_name": "name"})
qs2 = ReservedName.objects.extra(select={"extra_name": "name"})
self.assertSequenceEqual(
qs1.union(qs2).order_by("extra_name").values_list("pk", flat=True),
[reserved_name.pk],
)
def test_union_in_subquery(self):
ReservedName.objects.bulk_create(
[
ReservedName(name="rn1", order=8),
ReservedName(name="rn2", order=1),
ReservedName(name="rn3", order=5),
]
)
qs1 = Number.objects.filter(num__gt=7, num=OuterRef("order"))
qs2 = Number.objects.filter(num__lt=2, num=OuterRef("order"))
self.assertCountEqual(
ReservedName.objects.annotate(
number=Subquery(qs1.union(qs2).values("num")),
)
.filter(number__isnull=False)
.values_list("order", flat=True),
[8, 1],
)
def test_union_in_subquery_related_outerref(self):
e1 = ExtraInfo.objects.create(value=7, info="e3")
e2 = ExtraInfo.objects.create(value=5, info="e2")
e3 = ExtraInfo.objects.create(value=1, info="e1")
Author.objects.bulk_create(
[
Author(name="a1", num=1, extra=e1),
Author(name="a2", num=3, extra=e2),
Author(name="a3", num=2, extra=e3),
]
)
qs1 = ExtraInfo.objects.order_by().filter(value=OuterRef("num"))
qs2 = ExtraInfo.objects.order_by().filter(value__lt=OuterRef("extra__value"))
qs = (
Author.objects.annotate(
info=Subquery(qs1.union(qs2).values("info")[:1]),
)
.filter(info__isnull=False)
.values_list("name", flat=True)
)
self.assertCountEqual(qs, ["a1", "a2"])
# Combined queries don't mutate.
self.assertCountEqual(qs, ["a1", "a2"])
@skipUnlessDBFeature("supports_slicing_ordering_in_compound")
def test_union_in_with_ordering(self):
qs1 = Number.objects.filter(num__gt=7).order_by("num")
qs2 = Number.objects.filter(num__lt=2).order_by("num")
self.assertNumbersEqual(
Number.objects.exclude(id__in=qs1.union(qs2).values("id")),
[2, 3, 4, 5, 6, 7],
ordered=False,
)
@skipUnlessDBFeature(
"supports_slicing_ordering_in_compound", "allow_sliced_subqueries_with_in"
)
def test_union_in_with_ordering_and_slice(self):
qs1 = Number.objects.filter(num__gt=7).order_by("num")[:1]
qs2 = Number.objects.filter(num__lt=2).order_by("-num")[:1]
self.assertNumbersEqual(
Number.objects.exclude(id__in=qs1.union(qs2).values("id")),
[0, 2, 3, 4, 5, 6, 7, 9],
ordered=False,
)
def test_count_union(self):
qs1 = Number.objects.filter(num__lte=1).values("num")
qs2 = Number.objects.filter(num__gte=2, num__lte=3).values("num")
self.assertEqual(qs1.union(qs2).count(), 4)
def test_count_union_empty_result(self):
qs = Number.objects.filter(pk__in=[])
self.assertEqual(qs.union(qs).count(), 0)
@skipUnlessDBFeature("supports_select_difference")
def test_count_difference(self):
qs1 = Number.objects.filter(num__lt=10)
qs2 = Number.objects.filter(num__lt=9)
self.assertEqual(qs1.difference(qs2).count(), 1)
@skipUnlessDBFeature("supports_select_intersection")
def test_count_intersection(self):
qs1 = Number.objects.filter(num__gte=5)
qs2 = Number.objects.filter(num__lte=5)
self.assertEqual(qs1.intersection(qs2).count(), 1)
def test_exists_union(self):
qs1 = Number.objects.filter(num__gte=5)
qs2 = Number.objects.filter(num__lte=5)
with CaptureQueriesContext(connection) as context:
self.assertIs(qs1.union(qs2).exists(), True)
captured_queries = context.captured_queries
self.assertEqual(len(captured_queries), 1)
captured_sql = captured_queries[0]["sql"]
self.assertNotIn(
connection.ops.quote_name(Number._meta.pk.column),
captured_sql,
)
self.assertEqual(
captured_sql.count(connection.ops.limit_offset_sql(None, 1)),
3 if connection.features.supports_slicing_ordering_in_compound else 1,
)
def test_exists_union_empty_result(self):
qs = Number.objects.filter(pk__in=[])
self.assertIs(qs.union(qs).exists(), False)
@skipUnlessDBFeature("supports_select_intersection")
def test_exists_intersection(self):
qs1 = Number.objects.filter(num__gt=5)
qs2 = Number.objects.filter(num__lt=5)
self.assertIs(qs1.intersection(qs1).exists(), True)
self.assertIs(qs1.intersection(qs2).exists(), False)
@skipUnlessDBFeature("supports_select_difference")
def test_exists_difference(self):
qs1 = Number.objects.filter(num__gte=5)
qs2 = Number.objects.filter(num__gte=3)
self.assertIs(qs1.difference(qs2).exists(), False)
self.assertIs(qs2.difference(qs1).exists(), True)
def test_get_union(self):
qs = Number.objects.filter(num=2)
self.assertEqual(qs.union(qs).get().num, 2)
@skipUnlessDBFeature("supports_select_difference")
def test_get_difference(self):
qs1 = Number.objects.all()
qs2 = Number.objects.exclude(num=2)
self.assertEqual(qs1.difference(qs2).get().num, 2)
@skipUnlessDBFeature("supports_select_intersection")
def test_get_intersection(self):
qs1 = Number.objects.all()
qs2 = Number.objects.filter(num=2)
self.assertEqual(qs1.intersection(qs2).get().num, 2)
@skipUnlessDBFeature("supports_slicing_ordering_in_compound")
def test_ordering_subqueries(self):
qs1 = Number.objects.order_by("num")[:2]
qs2 = Number.objects.order_by("-num")[:2]
self.assertNumbersEqual(qs1.union(qs2).order_by("-num")[:4], [9, 8, 1, 0])
@skipIfDBFeature("supports_slicing_ordering_in_compound")
def test_unsupported_ordering_slicing_raises_db_error(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
qs3 = Number.objects.all()
msg = "LIMIT/OFFSET not allowed in subqueries of compound statements"
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2[:10]))
msg = "ORDER BY not allowed in subqueries of compound statements"
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.order_by("id").union(qs2))
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by("id").union(qs3))
@skipIfDBFeature("supports_select_intersection")
def test_unsupported_intersection_raises_db_error(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
msg = "intersection is not supported on this database backend"
with self.assertRaisesMessage(NotSupportedError, msg):
list(qs1.intersection(qs2))
def test_combining_multiple_models(self):
ReservedName.objects.create(name="99 little bugs", order=99)
qs1 = Number.objects.filter(num=1).values_list("num", flat=True)
qs2 = ReservedName.objects.values_list("order")
self.assertEqual(list(qs1.union(qs2).order_by("num")), [1, 99])
def test_order_raises_on_non_selected_column(self):
qs1 = (
Number.objects.filter()
.annotate(
annotation=Value(1, IntegerField()),
)
.values("annotation", num2=F("num"))
)
qs2 = Number.objects.filter().values("id", "num")
# Should not raise
list(qs1.union(qs2).order_by("annotation"))
list(qs1.union(qs2).order_by("num2"))
msg = "ORDER BY term does not match any column in the result set"
# 'id' is not part of the select
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by("id"))
# 'num' got realiased to num2
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by("num"))
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by(F("num")))
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by(F("num").desc()))
# switched order, now 'exists' again:
list(qs2.union(qs1).order_by("num"))
@skipUnlessDBFeature("supports_select_difference", "supports_select_intersection")
def test_qs_with_subcompound_qs(self):
qs1 = Number.objects.all()
qs2 = Number.objects.intersection(Number.objects.filter(num__gt=1))
self.assertEqual(qs1.difference(qs2).count(), 2)
def test_order_by_same_type(self):
qs = Number.objects.all()
union = qs.union(qs)
numbers = list(range(10))
self.assertNumbersEqual(union.order_by("num"), numbers)
self.assertNumbersEqual(union.order_by("other_num"), reversed(numbers))
def test_unsupported_operations_on_combined_qs(self):
qs = Number.objects.all()
msg = "Calling QuerySet.%s() after %s() is not supported."
combinators = ["union"]
if connection.features.supports_select_difference:
combinators.append("difference")
if connection.features.supports_select_intersection:
combinators.append("intersection")
for combinator in combinators:
for operation in (
"alias",
"annotate",
"defer",
"delete",
"distinct",
"exclude",
"extra",
"filter",
"only",
"prefetch_related",
"select_related",
"update",
):
with self.subTest(combinator=combinator, operation=operation):
with self.assertRaisesMessage(
NotSupportedError,
msg % (operation, combinator),
):
getattr(getattr(qs, combinator)(qs), operation)()
with self.assertRaisesMessage(
NotSupportedError,
msg % ("contains", combinator),
):
obj = Number.objects.first()
getattr(qs, combinator)(qs).contains(obj)
def test_get_with_filters_unsupported_on_combined_qs(self):
qs = Number.objects.all()
msg = "Calling QuerySet.get(...) with filters after %s() is not supported."
combinators = ["union"]
if connection.features.supports_select_difference:
combinators.append("difference")
if connection.features.supports_select_intersection:
combinators.append("intersection")
for combinator in combinators:
with self.subTest(combinator=combinator):
with self.assertRaisesMessage(NotSupportedError, msg % combinator):
getattr(qs, combinator)(qs).get(num=2)
def test_operator_on_combined_qs_error(self):
qs = Number.objects.all()
msg = "Cannot use %s operator with combined queryset."
combinators = ["union"]
if connection.features.supports_select_difference:
combinators.append("difference")
if connection.features.supports_select_intersection:
combinators.append("intersection")
operators = [
("|", operator.or_),
("&", operator.and_),
("^", operator.xor),
]
for combinator in combinators:
combined_qs = getattr(qs, combinator)(qs)
for operator_, operator_func in operators:
with self.subTest(combinator=combinator):
with self.assertRaisesMessage(TypeError, msg % operator_):
operator_func(qs, combined_qs)
with self.assertRaisesMessage(TypeError, msg % operator_):
operator_func(combined_qs, qs)
|
8bd8df6f53a502599f7193818154b2722b8af5a076a7e8aab8d82fb97c073dbb | """
Testing using the Test Client
The test client is a class that can act like a simple
browser for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
``Client`` objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the ``Client`` instance.
This is not intended as a replacement for Twill, Selenium, or
other browser automation frameworks - it is here to allow
testing against the contexts and templates produced by a view,
rather than the HTML rendered to the end-user.
"""
import itertools
import pickle
import tempfile
from unittest import mock
from django.contrib.auth.models import User
from django.core import mail
from django.http import HttpResponse, HttpResponseNotAllowed
from django.test import (
AsyncRequestFactory,
Client,
RequestFactory,
SimpleTestCase,
TestCase,
modify_settings,
override_settings,
)
from django.urls import reverse_lazy
from django.utils.decorators import async_only_middleware
from django.views.generic import RedirectView
from .views import TwoArgException, get_view, post_view, trace_view
def middleware_urlconf(get_response):
def middleware(request):
request.urlconf = "test_client.urls_middleware_urlconf"
return get_response(request)
return middleware
@async_only_middleware
def async_middleware_urlconf(get_response):
async def middleware(request):
request.urlconf = "test_client.urls_middleware_urlconf"
return await get_response(request)
return middleware
@override_settings(ROOT_URLCONF="test_client.urls")
class ClientTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_user(username="testclient", password="password")
cls.u2 = User.objects.create_user(
username="inactive", password="password", is_active=False
)
def test_get_view(self):
"GET a view"
# The data is ignored, but let's check it doesn't crash the system
# anyway.
data = {"var": "\xf2"}
response = self.client.get("/get_view/", data)
# Check some response details
self.assertContains(response, "This is a test")
self.assertEqual(response.context["var"], "\xf2")
self.assertEqual(response.templates[0].name, "GET Template")
def test_pickling_response(self):
tests = ["/cbv_view/", "/get_view/"]
for url in tests:
with self.subTest(url=url):
response = self.client.get(url)
dump = pickle.dumps(response)
response_from_pickle = pickle.loads(dump)
self.assertEqual(repr(response), repr(response_from_pickle))
async def test_pickling_response_async(self):
response = await self.async_client.get("/async_get_view/")
dump = pickle.dumps(response)
response_from_pickle = pickle.loads(dump)
self.assertEqual(repr(response), repr(response_from_pickle))
def test_query_string_encoding(self):
# WSGI requires latin-1 encoded strings.
response = self.client.get("/get_view/?var=1\ufffd")
self.assertEqual(response.context["var"], "1\ufffd")
def test_get_data_none(self):
msg = (
"Cannot encode None for key 'value' in a query string. Did you "
"mean to pass an empty string or omit the value?"
)
with self.assertRaisesMessage(TypeError, msg):
self.client.get("/get_view/", {"value": None})
def test_get_post_view(self):
"GET a view that normally expects POSTs"
response = self.client.get("/post_view/", {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, "Empty GET Template")
self.assertTemplateUsed(response, "Empty GET Template")
self.assertTemplateNotUsed(response, "Empty POST Template")
def test_empty_post(self):
"POST an empty dictionary to a view"
response = self.client.post("/post_view/", {})
# Check some response details
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, "Empty POST Template")
self.assertTemplateNotUsed(response, "Empty GET Template")
self.assertTemplateUsed(response, "Empty POST Template")
def test_post(self):
"POST some data to a view"
post_data = {"value": 37}
response = self.client.post("/post_view/", post_data)
# Check some response details
self.assertContains(response, "Data received")
self.assertEqual(response.context["data"], "37")
self.assertEqual(response.templates[0].name, "POST Template")
def test_post_data_none(self):
msg = (
"Cannot encode None for key 'value' as POST data. Did you mean "
"to pass an empty string or omit the value?"
)
with self.assertRaisesMessage(TypeError, msg):
self.client.post("/post_view/", {"value": None})
def test_json_serialization(self):
"""The test client serializes JSON data."""
methods = ("post", "put", "patch", "delete")
tests = (
({"value": 37}, {"value": 37}),
([37, True], [37, True]),
((37, False), [37, False]),
)
for method in methods:
with self.subTest(method=method):
for data, expected in tests:
with self.subTest(data):
client_method = getattr(self.client, method)
method_name = method.upper()
response = client_method(
"/json_view/", data, content_type="application/json"
)
self.assertContains(response, "Viewing %s page." % method_name)
self.assertEqual(response.context["data"], expected)
def test_json_encoder_argument(self):
"""The test Client accepts a json_encoder."""
mock_encoder = mock.MagicMock()
mock_encoding = mock.MagicMock()
mock_encoder.return_value = mock_encoding
mock_encoding.encode.return_value = '{"value": 37}'
client = self.client_class(json_encoder=mock_encoder)
# Vendored tree JSON content types are accepted.
client.post(
"/json_view/", {"value": 37}, content_type="application/vnd.api+json"
)
self.assertTrue(mock_encoder.called)
self.assertTrue(mock_encoding.encode.called)
def test_put(self):
response = self.client.put("/put_view/", {"foo": "bar"})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, "PUT Template")
self.assertEqual(response.context["data"], "{'foo': 'bar'}")
self.assertEqual(response.context["Content-Length"], "14")
def test_trace(self):
"""TRACE a view"""
response = self.client.trace("/trace_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["method"], "TRACE")
self.assertEqual(response.templates[0].name, "TRACE Template")
def test_response_headers(self):
"Check the value of HTTP headers returned in a response"
response = self.client.get("/header_view/")
self.assertEqual(response.headers["X-DJANGO-TEST"], "Slartibartfast")
def test_response_attached_request(self):
"""
The returned response has a ``request`` attribute with the originating
environ dict and a ``wsgi_request`` with the originating WSGIRequest.
"""
response = self.client.get("/header_view/")
self.assertTrue(hasattr(response, "request"))
self.assertTrue(hasattr(response, "wsgi_request"))
for key, value in response.request.items():
self.assertIn(key, response.wsgi_request.environ)
self.assertEqual(response.wsgi_request.environ[key], value)
def test_response_resolver_match(self):
"""
The response contains a ResolverMatch instance.
"""
response = self.client.get("/header_view/")
self.assertTrue(hasattr(response, "resolver_match"))
def test_response_resolver_match_redirect_follow(self):
"""
The response ResolverMatch instance contains the correct
information when following redirects.
"""
response = self.client.get("/redirect_view/", follow=True)
self.assertEqual(response.resolver_match.url_name, "get_view")
def test_response_resolver_match_regular_view(self):
"""
The response ResolverMatch instance contains the correct
information when accessing a regular view.
"""
response = self.client.get("/get_view/")
self.assertEqual(response.resolver_match.url_name, "get_view")
def test_response_resolver_match_class_based_view(self):
"""
The response ResolverMatch instance can be used to access the CBV view
class.
"""
response = self.client.get("/accounts/")
self.assertIs(response.resolver_match.func.view_class, RedirectView)
@modify_settings(MIDDLEWARE={"prepend": "test_client.tests.middleware_urlconf"})
def test_response_resolver_match_middleware_urlconf(self):
response = self.client.get("/middleware_urlconf_view/")
self.assertEqual(response.resolver_match.url_name, "middleware_urlconf_view")
def test_raw_post(self):
"POST raw data (with a content type) to a view"
test_doc = """<?xml version="1.0" encoding="utf-8"?>
<library><book><title>Blink</title><author>Malcolm Gladwell</author></book>
</library>
"""
response = self.client.post(
"/raw_post_view/", test_doc, content_type="text/xml"
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.templates[0].name, "Book template")
self.assertEqual(response.content, b"Blink - Malcolm Gladwell")
def test_insecure(self):
"GET a URL through http"
response = self.client.get("/secure_view/", secure=False)
self.assertFalse(response.test_was_secure_request)
self.assertEqual(response.test_server_port, "80")
def test_secure(self):
"GET a URL through https"
response = self.client.get("/secure_view/", secure=True)
self.assertTrue(response.test_was_secure_request)
self.assertEqual(response.test_server_port, "443")
def test_redirect(self):
"GET a URL that redirects elsewhere"
response = self.client.get("/redirect_view/")
self.assertRedirects(response, "/get_view/")
def test_redirect_with_query(self):
"GET a URL that redirects with given GET parameters"
response = self.client.get("/redirect_view/", {"var": "value"})
self.assertRedirects(response, "/get_view/?var=value")
def test_redirect_with_query_ordering(self):
"""assertRedirects() ignores the order of query string parameters."""
response = self.client.get("/redirect_view/", {"var": "value", "foo": "bar"})
self.assertRedirects(response, "/get_view/?var=value&foo=bar")
self.assertRedirects(response, "/get_view/?foo=bar&var=value")
def test_permanent_redirect(self):
"GET a URL that redirects permanently elsewhere"
response = self.client.get("/permanent_redirect_view/")
self.assertRedirects(response, "/get_view/", status_code=301)
def test_temporary_redirect(self):
"GET a URL that does a non-permanent redirect"
response = self.client.get("/temporary_redirect_view/")
self.assertRedirects(response, "/get_view/", status_code=302)
def test_redirect_to_strange_location(self):
"GET a URL that redirects to a non-200 page"
response = self.client.get("/double_redirect_view/")
# The response was a 302, and that the attempt to get the redirection
# location returned 301 when retrieved
self.assertRedirects(
response, "/permanent_redirect_view/", target_status_code=301
)
def test_follow_redirect(self):
"A URL that redirects can be followed to termination."
response = self.client.get("/double_redirect_view/", follow=True)
self.assertRedirects(
response, "/get_view/", status_code=302, target_status_code=200
)
self.assertEqual(len(response.redirect_chain), 2)
def test_follow_relative_redirect(self):
"A URL with a relative redirect can be followed."
response = self.client.get("/accounts/", follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.request["PATH_INFO"], "/accounts/login/")
def test_follow_relative_redirect_no_trailing_slash(self):
"A URL with a relative redirect with no trailing slash can be followed."
response = self.client.get("/accounts/no_trailing_slash", follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.request["PATH_INFO"], "/accounts/login/")
def test_redirect_to_querystring_only(self):
"""A URL that consists of a querystring only can be followed"""
response = self.client.post("/post_then_get_view/", follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.request["PATH_INFO"], "/post_then_get_view/")
self.assertEqual(response.content, b"The value of success is true.")
def test_follow_307_and_308_redirect(self):
"""
A 307 or 308 redirect preserves the request method after the redirect.
"""
methods = ("get", "post", "head", "options", "put", "patch", "delete", "trace")
codes = (307, 308)
for method, code in itertools.product(methods, codes):
with self.subTest(method=method, code=code):
req_method = getattr(self.client, method)
response = req_method(
"/redirect_view_%s/" % code, data={"value": "test"}, follow=True
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.request["PATH_INFO"], "/post_view/")
self.assertEqual(response.request["REQUEST_METHOD"], method.upper())
def test_follow_307_and_308_preserves_query_string(self):
methods = ("post", "options", "put", "patch", "delete", "trace")
codes = (307, 308)
for method, code in itertools.product(methods, codes):
with self.subTest(method=method, code=code):
req_method = getattr(self.client, method)
response = req_method(
"/redirect_view_%s_query_string/" % code,
data={"value": "test"},
follow=True,
)
self.assertRedirects(
response, "/post_view/?hello=world", status_code=code
)
self.assertEqual(response.request["QUERY_STRING"], "hello=world")
def test_follow_307_and_308_get_head_query_string(self):
methods = ("get", "head")
codes = (307, 308)
for method, code in itertools.product(methods, codes):
with self.subTest(method=method, code=code):
req_method = getattr(self.client, method)
response = req_method(
"/redirect_view_%s_query_string/" % code,
data={"value": "test"},
follow=True,
)
self.assertRedirects(
response, "/post_view/?hello=world", status_code=code
)
self.assertEqual(response.request["QUERY_STRING"], "value=test")
def test_follow_307_and_308_preserves_post_data(self):
for code in (307, 308):
with self.subTest(code=code):
response = self.client.post(
"/redirect_view_%s/" % code, data={"value": "test"}, follow=True
)
self.assertContains(response, "test is the value")
def test_follow_307_and_308_preserves_put_body(self):
for code in (307, 308):
with self.subTest(code=code):
response = self.client.put(
"/redirect_view_%s/?to=/put_view/" % code, data="a=b", follow=True
)
self.assertContains(response, "a=b is the body")
def test_follow_307_and_308_preserves_get_params(self):
data = {"var": 30, "to": "/get_view/"}
for code in (307, 308):
with self.subTest(code=code):
response = self.client.get(
"/redirect_view_%s/" % code, data=data, follow=True
)
self.assertContains(response, "30 is the value")
def test_redirect_http(self):
"""GET a URL that redirects to an HTTP URI."""
response = self.client.get("/http_redirect_view/", follow=True)
self.assertFalse(response.test_was_secure_request)
def test_redirect_https(self):
"""GET a URL that redirects to an HTTPS URI."""
response = self.client.get("/https_redirect_view/", follow=True)
self.assertTrue(response.test_was_secure_request)
def test_notfound_response(self):
"GET a URL that responds as '404:Not Found'"
response = self.client.get("/bad_view/")
self.assertContains(response, "MAGIC", status_code=404)
def test_valid_form(self):
"POST valid data to a form"
post_data = {
"text": "Hello World",
"email": "[email protected]",
"value": 37,
"single": "b",
"multi": ("b", "c", "e"),
}
response = self.client.post("/form_view/", post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Valid POST Template")
def test_valid_form_with_hints(self):
"GET a form, providing hints in the GET data"
hints = {"text": "Hello World", "multi": ("b", "c", "e")}
response = self.client.get("/form_view/", data=hints)
# The multi-value data has been rolled out ok
self.assertContains(response, "Select a valid choice.", 0)
self.assertTemplateUsed(response, "Form GET Template")
def test_incomplete_data_form(self):
"POST incomplete data to a form"
post_data = {"text": "Hello World", "value": 37}
response = self.client.post("/form_view/", post_data)
self.assertContains(response, "This field is required.", 3)
self.assertTemplateUsed(response, "Invalid POST Template")
form = response.context["form"]
self.assertFormError(form, "email", "This field is required.")
self.assertFormError(form, "single", "This field is required.")
self.assertFormError(form, "multi", "This field is required.")
def test_form_error(self):
"POST erroneous data to a form"
post_data = {
"text": "Hello World",
"email": "not an email address",
"value": 37,
"single": "b",
"multi": ("b", "c", "e"),
}
response = self.client.post("/form_view/", post_data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "Invalid POST Template")
self.assertFormError(
response.context["form"], "email", "Enter a valid email address."
)
def test_valid_form_with_template(self):
"POST valid data to a form using multiple templates"
post_data = {
"text": "Hello World",
"email": "[email protected]",
"value": 37,
"single": "b",
"multi": ("b", "c", "e"),
}
response = self.client.post("/form_view_with_template/", post_data)
self.assertContains(response, "POST data OK")
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, "base.html")
self.assertTemplateNotUsed(response, "Valid POST Template")
def test_incomplete_data_form_with_template(self):
"POST incomplete data to a form using multiple templates"
post_data = {"text": "Hello World", "value": 37}
response = self.client.post("/form_view_with_template/", post_data)
self.assertContains(response, "POST data has errors")
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, "base.html")
self.assertTemplateNotUsed(response, "Invalid POST Template")
form = response.context["form"]
self.assertFormError(form, "email", "This field is required.")
self.assertFormError(form, "single", "This field is required.")
self.assertFormError(form, "multi", "This field is required.")
def test_form_error_with_template(self):
"POST erroneous data to a form using multiple templates"
post_data = {
"text": "Hello World",
"email": "not an email address",
"value": 37,
"single": "b",
"multi": ("b", "c", "e"),
}
response = self.client.post("/form_view_with_template/", post_data)
self.assertContains(response, "POST data has errors")
self.assertTemplateUsed(response, "form_view.html")
self.assertTemplateUsed(response, "base.html")
self.assertTemplateNotUsed(response, "Invalid POST Template")
self.assertFormError(
response.context["form"], "email", "Enter a valid email address."
)
def test_unknown_page(self):
"GET an invalid URL"
response = self.client.get("/unknown_view/")
# The response was a 404
self.assertEqual(response.status_code, 404)
def test_url_parameters(self):
"Make sure that URL ;-parameters are not stripped."
response = self.client.get("/unknown_view/;some-parameter")
# The path in the response includes it (ignore that it's a 404)
self.assertEqual(response.request["PATH_INFO"], "/unknown_view/;some-parameter")
def test_view_with_login(self):
"Request a page that is protected with @login_required"
# Get the page without logging in. Should result in 302.
response = self.client.get("/login_protected_view/")
self.assertRedirects(response, "/accounts/login/?next=/login_protected_view/")
# Log in
login = self.client.login(username="testclient", password="password")
self.assertTrue(login, "Could not log in")
# Request a page that requires a login
response = self.client.get("/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].username, "testclient")
@override_settings(
INSTALLED_APPS=["django.contrib.auth"],
SESSION_ENGINE="django.contrib.sessions.backends.file",
)
def test_view_with_login_when_sessions_app_is_not_installed(self):
self.test_view_with_login()
def test_view_with_force_login(self):
"Request a page that is protected with @login_required"
# Get the page without logging in. Should result in 302.
response = self.client.get("/login_protected_view/")
self.assertRedirects(response, "/accounts/login/?next=/login_protected_view/")
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get("/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].username, "testclient")
def test_view_with_method_login(self):
"Request a page that is protected with a @login_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get("/login_protected_method_view/")
self.assertRedirects(
response, "/accounts/login/?next=/login_protected_method_view/"
)
# Log in
login = self.client.login(username="testclient", password="password")
self.assertTrue(login, "Could not log in")
# Request a page that requires a login
response = self.client.get("/login_protected_method_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].username, "testclient")
def test_view_with_method_force_login(self):
"Request a page that is protected with a @login_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get("/login_protected_method_view/")
self.assertRedirects(
response, "/accounts/login/?next=/login_protected_method_view/"
)
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get("/login_protected_method_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].username, "testclient")
def test_view_with_login_and_custom_redirect(self):
"""
Request a page that is protected with
@login_required(redirect_field_name='redirect_to')
"""
# Get the page without logging in. Should result in 302.
response = self.client.get("/login_protected_view_custom_redirect/")
self.assertRedirects(
response,
"/accounts/login/?redirect_to=/login_protected_view_custom_redirect/",
)
# Log in
login = self.client.login(username="testclient", password="password")
self.assertTrue(login, "Could not log in")
# Request a page that requires a login
response = self.client.get("/login_protected_view_custom_redirect/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].username, "testclient")
def test_view_with_force_login_and_custom_redirect(self):
"""
Request a page that is protected with
@login_required(redirect_field_name='redirect_to')
"""
# Get the page without logging in. Should result in 302.
response = self.client.get("/login_protected_view_custom_redirect/")
self.assertRedirects(
response,
"/accounts/login/?redirect_to=/login_protected_view_custom_redirect/",
)
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get("/login_protected_view_custom_redirect/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].username, "testclient")
def test_view_with_bad_login(self):
"Request a page that is protected with @login, but use bad credentials"
login = self.client.login(username="otheruser", password="nopassword")
self.assertFalse(login)
def test_view_with_inactive_login(self):
"""
An inactive user may login if the authenticate backend allows it.
"""
credentials = {"username": "inactive", "password": "password"}
self.assertFalse(self.client.login(**credentials))
with self.settings(
AUTHENTICATION_BACKENDS=[
"django.contrib.auth.backends.AllowAllUsersModelBackend"
]
):
self.assertTrue(self.client.login(**credentials))
@override_settings(
AUTHENTICATION_BACKENDS=[
"django.contrib.auth.backends.ModelBackend",
"django.contrib.auth.backends.AllowAllUsersModelBackend",
]
)
def test_view_with_inactive_force_login(self):
"Request a page that is protected with @login, but use an inactive login"
# Get the page without logging in. Should result in 302.
response = self.client.get("/login_protected_view/")
self.assertRedirects(response, "/accounts/login/?next=/login_protected_view/")
# Log in
self.client.force_login(
self.u2, backend="django.contrib.auth.backends.AllowAllUsersModelBackend"
)
# Request a page that requires a login
response = self.client.get("/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].username, "inactive")
def test_logout(self):
"Request a logout after logging in"
# Log in
self.client.login(username="testclient", password="password")
# Request a page that requires a login
response = self.client.get("/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].username, "testclient")
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get("/login_protected_view/")
self.assertRedirects(response, "/accounts/login/?next=/login_protected_view/")
def test_logout_with_force_login(self):
"Request a logout after logging in"
# Log in
self.client.force_login(self.u1)
# Request a page that requires a login
response = self.client.get("/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].username, "testclient")
# Log out
self.client.logout()
# Request a page that requires a login
response = self.client.get("/login_protected_view/")
self.assertRedirects(response, "/accounts/login/?next=/login_protected_view/")
@override_settings(
AUTHENTICATION_BACKENDS=[
"django.contrib.auth.backends.ModelBackend",
"test_client.auth_backends.TestClientBackend",
],
)
def test_force_login_with_backend(self):
"""
Request a page that is protected with @login_required when using
force_login() and passing a backend.
"""
# Get the page without logging in. Should result in 302.
response = self.client.get("/login_protected_view/")
self.assertRedirects(response, "/accounts/login/?next=/login_protected_view/")
# Log in
self.client.force_login(
self.u1, backend="test_client.auth_backends.TestClientBackend"
)
self.assertEqual(self.u1.backend, "test_client.auth_backends.TestClientBackend")
# Request a page that requires a login
response = self.client.get("/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].username, "testclient")
@override_settings(
AUTHENTICATION_BACKENDS=[
"django.contrib.auth.backends.ModelBackend",
"test_client.auth_backends.TestClientBackend",
],
)
def test_force_login_without_backend(self):
"""
force_login() without passing a backend and with multiple backends
configured should automatically use the first backend.
"""
self.client.force_login(self.u1)
response = self.client.get("/login_protected_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"].username, "testclient")
self.assertEqual(self.u1.backend, "django.contrib.auth.backends.ModelBackend")
@override_settings(
AUTHENTICATION_BACKENDS=[
"test_client.auth_backends.BackendWithoutGetUserMethod",
"django.contrib.auth.backends.ModelBackend",
]
)
def test_force_login_with_backend_missing_get_user(self):
"""
force_login() skips auth backends without a get_user() method.
"""
self.client.force_login(self.u1)
self.assertEqual(self.u1.backend, "django.contrib.auth.backends.ModelBackend")
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.signed_cookies")
def test_logout_cookie_sessions(self):
self.test_logout()
def test_view_with_permissions(self):
"Request a page that is protected with @permission_required"
# Get the page without logging in. Should result in 302.
response = self.client.get("/permission_protected_view/")
self.assertRedirects(
response, "/accounts/login/?next=/permission_protected_view/"
)
# Log in
login = self.client.login(username="testclient", password="password")
self.assertTrue(login, "Could not log in")
# Log in with wrong permissions. Should result in 302.
response = self.client.get("/permission_protected_view/")
self.assertRedirects(
response, "/accounts/login/?next=/permission_protected_view/"
)
# TODO: Log in with right permissions and request the page again
def test_view_with_permissions_exception(self):
"""
Request a page that is protected with @permission_required but raises
an exception.
"""
# Get the page without logging in. Should result in 403.
response = self.client.get("/permission_protected_view_exception/")
self.assertEqual(response.status_code, 403)
# Log in
login = self.client.login(username="testclient", password="password")
self.assertTrue(login, "Could not log in")
# Log in with wrong permissions. Should result in 403.
response = self.client.get("/permission_protected_view_exception/")
self.assertEqual(response.status_code, 403)
def test_view_with_method_permissions(self):
"Request a page that is protected with a @permission_required method"
# Get the page without logging in. Should result in 302.
response = self.client.get("/permission_protected_method_view/")
self.assertRedirects(
response, "/accounts/login/?next=/permission_protected_method_view/"
)
# Log in
login = self.client.login(username="testclient", password="password")
self.assertTrue(login, "Could not log in")
# Log in with wrong permissions. Should result in 302.
response = self.client.get("/permission_protected_method_view/")
self.assertRedirects(
response, "/accounts/login/?next=/permission_protected_method_view/"
)
# TODO: Log in with right permissions and request the page again
def test_external_redirect(self):
response = self.client.get("/django_project_redirect/")
self.assertRedirects(
response, "https://www.djangoproject.com/", fetch_redirect_response=False
)
def test_external_redirect_without_trailing_slash(self):
"""
Client._handle_redirects() with an empty path.
"""
response = self.client.get("/no_trailing_slash_external_redirect/", follow=True)
self.assertRedirects(response, "https://testserver")
def test_external_redirect_with_fetch_error_msg(self):
"""
assertRedirects without fetch_redirect_response=False raises
a relevant ValueError rather than a non-descript AssertionError.
"""
response = self.client.get("/django_project_redirect/")
msg = (
"The test client is unable to fetch remote URLs (got "
"https://www.djangoproject.com/). If the host is served by Django, "
"add 'www.djangoproject.com' to ALLOWED_HOSTS. "
"Otherwise, use assertRedirects(..., fetch_redirect_response=False)."
)
with self.assertRaisesMessage(ValueError, msg):
self.assertRedirects(response, "https://www.djangoproject.com/")
def test_session_modifying_view(self):
"Request a page that modifies the session"
# Session value isn't set initially
with self.assertRaises(KeyError):
self.client.session["tobacconist"]
self.client.post("/session_view/")
# The session was modified
self.assertEqual(self.client.session["tobacconist"], "hovercraft")
@override_settings(
INSTALLED_APPS=[],
SESSION_ENGINE="django.contrib.sessions.backends.file",
)
def test_sessions_app_is_not_installed(self):
self.test_session_modifying_view()
@override_settings(
INSTALLED_APPS=[],
SESSION_ENGINE="django.contrib.sessions.backends.nonexistent",
)
def test_session_engine_is_invalid(self):
with self.assertRaisesMessage(ImportError, "nonexistent"):
self.test_session_modifying_view()
def test_view_with_exception(self):
"Request a page that is known to throw an error"
with self.assertRaises(KeyError):
self.client.get("/broken_view/")
def test_exc_info(self):
client = Client(raise_request_exception=False)
response = client.get("/broken_view/")
self.assertEqual(response.status_code, 500)
exc_type, exc_value, exc_traceback = response.exc_info
self.assertIs(exc_type, KeyError)
self.assertIsInstance(exc_value, KeyError)
self.assertEqual(str(exc_value), "'Oops! Looks like you wrote some bad code.'")
self.assertIsNotNone(exc_traceback)
def test_exc_info_none(self):
response = self.client.get("/get_view/")
self.assertIsNone(response.exc_info)
def test_mail_sending(self):
"Mail is redirected to a dummy outbox during test setup"
response = self.client.get("/mail_sending_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, "Test message")
self.assertEqual(mail.outbox[0].body, "This is a test email")
self.assertEqual(mail.outbox[0].from_email, "[email protected]")
self.assertEqual(mail.outbox[0].to[0], "[email protected]")
self.assertEqual(mail.outbox[0].to[1], "[email protected]")
def test_reverse_lazy_decodes(self):
"reverse_lazy() works in the test client"
data = {"var": "data"}
response = self.client.get(reverse_lazy("get_view"), data)
# Check some response details
self.assertContains(response, "This is a test")
def test_relative_redirect(self):
response = self.client.get("/accounts/")
self.assertRedirects(response, "/accounts/login/")
def test_relative_redirect_no_trailing_slash(self):
response = self.client.get("/accounts/no_trailing_slash")
self.assertRedirects(response, "/accounts/login/")
def test_mass_mail_sending(self):
"Mass mail is redirected to a dummy outbox during test setup"
response = self.client.get("/mass_mail_sending_view/")
self.assertEqual(response.status_code, 200)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, "First Test message")
self.assertEqual(mail.outbox[0].body, "This is the first test email")
self.assertEqual(mail.outbox[0].from_email, "[email protected]")
self.assertEqual(mail.outbox[0].to[0], "[email protected]")
self.assertEqual(mail.outbox[0].to[1], "[email protected]")
self.assertEqual(mail.outbox[1].subject, "Second Test message")
self.assertEqual(mail.outbox[1].body, "This is the second test email")
self.assertEqual(mail.outbox[1].from_email, "[email protected]")
self.assertEqual(mail.outbox[1].to[0], "[email protected]")
self.assertEqual(mail.outbox[1].to[1], "[email protected]")
def test_exception_following_nested_client_request(self):
"""
A nested test client request shouldn't clobber exception signals from
the outer client request.
"""
with self.assertRaisesMessage(Exception, "exception message"):
self.client.get("/nesting_exception_view/")
def test_response_raises_multi_arg_exception(self):
"""A request may raise an exception with more than one required arg."""
with self.assertRaises(TwoArgException) as cm:
self.client.get("/two_arg_exception/")
self.assertEqual(cm.exception.args, ("one", "two"))
def test_uploading_temp_file(self):
with tempfile.TemporaryFile() as test_file:
response = self.client.post("/upload_view/", data={"temp_file": test_file})
self.assertEqual(response.content, b"temp_file")
def test_uploading_named_temp_file(self):
with tempfile.NamedTemporaryFile() as test_file:
response = self.client.post(
"/upload_view/",
data={"named_temp_file": test_file},
)
self.assertEqual(response.content, b"named_temp_file")
@override_settings(
MIDDLEWARE=["django.middleware.csrf.CsrfViewMiddleware"],
ROOT_URLCONF="test_client.urls",
)
class CSRFEnabledClientTests(SimpleTestCase):
def test_csrf_enabled_client(self):
"A client can be instantiated with CSRF checks enabled"
csrf_client = Client(enforce_csrf_checks=True)
# The normal client allows the post
response = self.client.post("/post_view/", {})
self.assertEqual(response.status_code, 200)
# The CSRF-enabled client rejects it
response = csrf_client.post("/post_view/", {})
self.assertEqual(response.status_code, 403)
class CustomTestClient(Client):
i_am_customized = "Yes"
class CustomTestClientTest(SimpleTestCase):
client_class = CustomTestClient
def test_custom_test_client(self):
"""A test case can specify a custom class for self.client."""
self.assertIs(hasattr(self.client, "i_am_customized"), True)
def _generic_view(request):
return HttpResponse(status=200)
@override_settings(ROOT_URLCONF="test_client.urls")
class RequestFactoryTest(SimpleTestCase):
"""Tests for the request factory."""
# A mapping between names of HTTP/1.1 methods and their test views.
http_methods_and_views = (
("get", get_view),
("post", post_view),
("put", _generic_view),
("patch", _generic_view),
("delete", _generic_view),
("head", _generic_view),
("options", _generic_view),
("trace", trace_view),
)
request_factory = RequestFactory()
def test_request_factory(self):
"""The request factory implements all the HTTP/1.1 methods."""
for method_name, view in self.http_methods_and_views:
method = getattr(self.request_factory, method_name)
request = method("/somewhere/")
response = view(request)
self.assertEqual(response.status_code, 200)
def test_get_request_from_factory(self):
"""
The request factory returns a templated response for a GET request.
"""
request = self.request_factory.get("/somewhere/")
response = get_view(request)
self.assertContains(response, "This is a test")
def test_trace_request_from_factory(self):
"""The request factory returns an echo response for a TRACE request."""
url_path = "/somewhere/"
request = self.request_factory.trace(url_path)
response = trace_view(request)
protocol = request.META["SERVER_PROTOCOL"]
echoed_request_line = "TRACE {} {}".format(url_path, protocol)
self.assertContains(response, echoed_request_line)
def test_request_factory_default_headers(self):
request = RequestFactory(
HTTP_AUTHORIZATION="Bearer faketoken",
HTTP_X_ANOTHER_HEADER="some other value",
).get("/somewhere/")
self.assertEqual(request.headers["authorization"], "Bearer faketoken")
self.assertIn("HTTP_AUTHORIZATION", request.META)
self.assertEqual(request.headers["x-another-header"], "some other value")
self.assertIn("HTTP_X_ANOTHER_HEADER", request.META)
request = RequestFactory(
headers={
"Authorization": "Bearer faketoken",
"X-Another-Header": "some other value",
}
).get("/somewhere/")
self.assertEqual(request.headers["authorization"], "Bearer faketoken")
self.assertIn("HTTP_AUTHORIZATION", request.META)
self.assertEqual(request.headers["x-another-header"], "some other value")
self.assertIn("HTTP_X_ANOTHER_HEADER", request.META)
def test_request_factory_sets_headers(self):
for method_name, view in self.http_methods_and_views:
method = getattr(self.request_factory, method_name)
request = method(
"/somewhere/",
HTTP_AUTHORIZATION="Bearer faketoken",
HTTP_X_ANOTHER_HEADER="some other value",
)
self.assertEqual(request.headers["authorization"], "Bearer faketoken")
self.assertIn("HTTP_AUTHORIZATION", request.META)
self.assertEqual(request.headers["x-another-header"], "some other value")
self.assertIn("HTTP_X_ANOTHER_HEADER", request.META)
request = method(
"/somewhere/",
headers={
"Authorization": "Bearer faketoken",
"X-Another-Header": "some other value",
},
)
self.assertEqual(request.headers["authorization"], "Bearer faketoken")
self.assertIn("HTTP_AUTHORIZATION", request.META)
self.assertEqual(request.headers["x-another-header"], "some other value")
self.assertIn("HTTP_X_ANOTHER_HEADER", request.META)
@override_settings(ROOT_URLCONF="test_client.urls")
class AsyncClientTest(TestCase):
async def test_response_resolver_match(self):
response = await self.async_client.get("/async_get_view/")
self.assertTrue(hasattr(response, "resolver_match"))
self.assertEqual(response.resolver_match.url_name, "async_get_view")
@modify_settings(
MIDDLEWARE={"prepend": "test_client.tests.async_middleware_urlconf"},
)
async def test_response_resolver_match_middleware_urlconf(self):
response = await self.async_client.get("/middleware_urlconf_view/")
self.assertEqual(response.resolver_match.url_name, "middleware_urlconf_view")
async def test_follow_parameter_not_implemented(self):
msg = "AsyncClient request methods do not accept the follow parameter."
tests = (
"get",
"post",
"put",
"patch",
"delete",
"head",
"options",
"trace",
)
for method_name in tests:
with self.subTest(method=method_name):
method = getattr(self.async_client, method_name)
with self.assertRaisesMessage(NotImplementedError, msg):
await method("/redirect_view/", follow=True)
async def test_get_data(self):
response = await self.async_client.get("/get_view/", {"var": "val"})
self.assertContains(response, "This is a test. val is the value.")
async def test_post_data(self):
response = await self.async_client.post("/post_view/", {"value": 37})
self.assertContains(response, "Data received: 37 is the value.")
async def test_body_read_on_get_data(self):
response = await self.async_client.get("/post_view/")
self.assertContains(response, "Viewing GET page.")
@override_settings(ROOT_URLCONF="test_client.urls")
class AsyncRequestFactoryTest(SimpleTestCase):
request_factory = AsyncRequestFactory()
async def test_request_factory(self):
tests = (
"get",
"post",
"put",
"patch",
"delete",
"head",
"options",
"trace",
)
for method_name in tests:
with self.subTest(method=method_name):
async def async_generic_view(request):
if request.method.lower() != method_name:
return HttpResponseNotAllowed(method_name)
return HttpResponse(status=200)
method = getattr(self.request_factory, method_name)
request = method("/somewhere/")
response = await async_generic_view(request)
self.assertEqual(response.status_code, 200)
async def test_request_factory_data(self):
async def async_generic_view(request):
return HttpResponse(status=200, content=request.body)
request = self.request_factory.post(
"/somewhere/",
data={"example": "data"},
content_type="application/json",
)
self.assertEqual(request.headers["content-length"], "19")
self.assertEqual(request.headers["content-type"], "application/json")
response = await async_generic_view(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'{"example": "data"}')
async def test_request_limited_read(self):
tests = ["GET", "POST"]
for method in tests:
with self.subTest(method=method):
request = self.request_factory.generic(
method,
"/somewhere",
)
self.assertEqual(request.read(200), b"")
def test_request_factory_sets_headers(self):
request = self.request_factory.get(
"/somewhere/",
AUTHORIZATION="Bearer faketoken",
X_ANOTHER_HEADER="some other value",
)
self.assertEqual(request.headers["authorization"], "Bearer faketoken")
self.assertIn("HTTP_AUTHORIZATION", request.META)
self.assertEqual(request.headers["x-another-header"], "some other value")
self.assertIn("HTTP_X_ANOTHER_HEADER", request.META)
request = self.request_factory.get(
"/somewhere/",
headers={
"Authorization": "Bearer faketoken",
"X-Another-Header": "some other value",
},
)
self.assertEqual(request.headers["authorization"], "Bearer faketoken")
self.assertIn("HTTP_AUTHORIZATION", request.META)
self.assertEqual(request.headers["x-another-header"], "some other value")
self.assertIn("HTTP_X_ANOTHER_HEADER", request.META)
def test_request_factory_query_string(self):
request = self.request_factory.get("/somewhere/", {"example": "data"})
self.assertNotIn("Query-String", request.headers)
self.assertEqual(request.GET["example"], "data")
|
bb7b36f7aaee8b7a6001e581f63c4beb2151a1d22a65997c48081472354806c4 | import json
from urllib.parse import urlencode
from xml.dom.minidom import parseString
from django.contrib.auth.decorators import login_required, permission_required
from django.core import mail
from django.core.exceptions import ValidationError
from django.forms import fields
from django.forms.forms import Form
from django.http import (
HttpResponse,
HttpResponseBadRequest,
HttpResponseNotAllowed,
HttpResponseNotFound,
HttpResponseRedirect,
)
from django.shortcuts import render
from django.template import Context, Template
from django.test import Client
from django.utils.decorators import method_decorator
from django.views.generic import TemplateView
def get_view(request):
"A simple view that expects a GET request, and returns a rendered template"
t = Template("This is a test. {{ var }} is the value.", name="GET Template")
c = Context({"var": request.GET.get("var", 42)})
return HttpResponse(t.render(c))
async def async_get_view(request):
return HttpResponse(b"GET content.")
def trace_view(request):
"""
A simple view that expects a TRACE request and echoes its status line.
TRACE requests should not have an entity; the view will return a 400 status
response if it is present.
"""
if request.method.upper() != "TRACE":
return HttpResponseNotAllowed("TRACE")
elif request.body:
return HttpResponseBadRequest("TRACE requests MUST NOT include an entity")
else:
protocol = request.META["SERVER_PROTOCOL"]
t = Template(
"{{ method }} {{ uri }} {{ version }}",
name="TRACE Template",
)
c = Context(
{
"method": request.method,
"uri": request.path,
"version": protocol,
}
)
return HttpResponse(t.render(c))
def put_view(request):
if request.method == "PUT":
t = Template("Data received: {{ data }} is the body.", name="PUT Template")
c = Context(
{
"Content-Length": request.META["CONTENT_LENGTH"],
"data": request.body.decode(),
}
)
else:
t = Template("Viewing GET page.", name="Empty GET Template")
c = Context()
return HttpResponse(t.render(c))
def post_view(request):
"""A view that expects a POST, and returns a different template depending
on whether any POST data is available
"""
if request.method == "POST":
if request.POST:
t = Template(
"Data received: {{ data }} is the value.", name="POST Template"
)
c = Context({"data": request.POST["value"]})
else:
t = Template("Viewing POST page.", name="Empty POST Template")
c = Context()
else:
t = Template("Viewing GET page.", name="Empty GET Template")
# Used by test_body_read_on_get_data.
request.read(200)
c = Context()
return HttpResponse(t.render(c))
def post_then_get_view(request):
"""
A view that expects a POST request, returns a redirect response
to itself providing only a ?success=true querystring,
the value of this querystring is then rendered upon GET.
"""
if request.method == "POST":
return HttpResponseRedirect("?success=true")
t = Template("The value of success is {{ value }}.", name="GET Template")
c = Context({"value": request.GET.get("success", "false")})
return HttpResponse(t.render(c))
def json_view(request):
"""
A view that expects a request with the header 'application/json' and JSON
data, which is deserialized and included in the context.
"""
if request.META.get("CONTENT_TYPE") != "application/json":
return HttpResponse()
t = Template("Viewing {} page. With data {{ data }}.".format(request.method))
data = json.loads(request.body.decode("utf-8"))
c = Context({"data": data})
return HttpResponse(t.render(c))
def view_with_header(request):
"A view that has a custom header"
response = HttpResponse()
response.headers["X-DJANGO-TEST"] = "Slartibartfast"
return response
def raw_post_view(request):
"""A view which expects raw XML to be posted and returns content extracted
from the XML"""
if request.method == "POST":
root = parseString(request.body)
first_book = root.firstChild.firstChild
title, author = [n.firstChild.nodeValue for n in first_book.childNodes]
t = Template("{{ title }} - {{ author }}", name="Book template")
c = Context({"title": title, "author": author})
else:
t = Template("GET request.", name="Book GET template")
c = Context()
return HttpResponse(t.render(c))
def redirect_view(request):
"A view that redirects all requests to the GET view"
if request.GET:
query = "?" + urlencode(request.GET, True)
else:
query = ""
return HttpResponseRedirect("/get_view/" + query)
def method_saving_307_redirect_query_string_view(request):
return HttpResponseRedirect("/post_view/?hello=world", status=307)
def method_saving_308_redirect_query_string_view(request):
return HttpResponseRedirect("/post_view/?hello=world", status=308)
def _post_view_redirect(request, status_code):
"""Redirect to /post_view/ using the status code."""
redirect_to = request.GET.get("to", "/post_view/")
return HttpResponseRedirect(redirect_to, status=status_code)
def method_saving_307_redirect_view(request):
return _post_view_redirect(request, 307)
def method_saving_308_redirect_view(request):
return _post_view_redirect(request, 308)
def view_with_secure(request):
"A view that indicates if the request was secure"
response = HttpResponse()
response.test_was_secure_request = request.is_secure()
response.test_server_port = request.META.get("SERVER_PORT", 80)
return response
def double_redirect_view(request):
"A view that redirects all requests to a redirection view"
return HttpResponseRedirect("/permanent_redirect_view/")
def bad_view(request):
"A view that returns a 404 with some error content"
return HttpResponseNotFound("Not found!. This page contains some MAGIC content")
TestChoices = (
("a", "First Choice"),
("b", "Second Choice"),
("c", "Third Choice"),
("d", "Fourth Choice"),
("e", "Fifth Choice"),
)
class TestForm(Form):
text = fields.CharField()
email = fields.EmailField()
value = fields.IntegerField()
single = fields.ChoiceField(choices=TestChoices)
multi = fields.MultipleChoiceField(choices=TestChoices)
def clean(self):
cleaned_data = self.cleaned_data
if cleaned_data.get("text") == "Raise non-field error":
raise ValidationError("Non-field error.")
return cleaned_data
def form_view(request):
"A view that tests a simple form"
if request.method == "POST":
form = TestForm(request.POST)
if form.is_valid():
t = Template("Valid POST data.", name="Valid POST Template")
c = Context()
else:
t = Template(
"Invalid POST data. {{ form.errors }}", name="Invalid POST Template"
)
c = Context({"form": form})
else:
form = TestForm(request.GET)
t = Template("Viewing base form. {{ form }}.", name="Form GET Template")
c = Context({"form": form})
return HttpResponse(t.render(c))
def form_view_with_template(request):
"A view that tests a simple form"
if request.method == "POST":
form = TestForm(request.POST)
if form.is_valid():
message = "POST data OK"
else:
message = "POST data has errors"
else:
form = TestForm()
message = "GET form page"
return render(
request,
"form_view.html",
{
"form": form,
"message": message,
},
)
@login_required
def login_protected_view(request):
"A simple view that is login protected."
t = Template(
"This is a login protected test. Username is {{ user.username }}.",
name="Login Template",
)
c = Context({"user": request.user})
return HttpResponse(t.render(c))
@login_required(redirect_field_name="redirect_to")
def login_protected_view_changed_redirect(request):
"A simple view that is login protected with a custom redirect field set"
t = Template(
"This is a login protected test. Username is {{ user.username }}.",
name="Login Template",
)
c = Context({"user": request.user})
return HttpResponse(t.render(c))
def _permission_protected_view(request):
"A simple view that is permission protected."
t = Template(
"This is a permission protected test. "
"Username is {{ user.username }}. "
"Permissions are {{ user.get_all_permissions }}.",
name="Permissions Template",
)
c = Context({"user": request.user})
return HttpResponse(t.render(c))
permission_protected_view = permission_required("permission_not_granted")(
_permission_protected_view
)
permission_protected_view_exception = permission_required(
"permission_not_granted", raise_exception=True
)(_permission_protected_view)
class _ViewManager:
@method_decorator(login_required)
def login_protected_view(self, request):
t = Template(
"This is a login protected test using a method. "
"Username is {{ user.username }}.",
name="Login Method Template",
)
c = Context({"user": request.user})
return HttpResponse(t.render(c))
@method_decorator(permission_required("permission_not_granted"))
def permission_protected_view(self, request):
t = Template(
"This is a permission protected test using a method. "
"Username is {{ user.username }}. "
"Permissions are {{ user.get_all_permissions }}.",
name="Permissions Template",
)
c = Context({"user": request.user})
return HttpResponse(t.render(c))
_view_manager = _ViewManager()
login_protected_method_view = _view_manager.login_protected_view
permission_protected_method_view = _view_manager.permission_protected_view
def session_view(request):
"A view that modifies the session"
request.session["tobacconist"] = "hovercraft"
t = Template(
"This is a view that modifies the session.",
name="Session Modifying View Template",
)
c = Context()
return HttpResponse(t.render(c))
def broken_view(request):
"""A view which just raises an exception, simulating a broken view."""
raise KeyError("Oops! Looks like you wrote some bad code.")
def mail_sending_view(request):
mail.EmailMessage(
"Test message",
"This is a test email",
"[email protected]",
["[email protected]", "[email protected]"],
).send()
return HttpResponse("Mail sent")
def mass_mail_sending_view(request):
m1 = mail.EmailMessage(
"First Test message",
"This is the first test email",
"[email protected]",
["[email protected]", "[email protected]"],
)
m2 = mail.EmailMessage(
"Second Test message",
"This is the second test email",
"[email protected]",
["[email protected]", "[email protected]"],
)
c = mail.get_connection()
c.send_messages([m1, m2])
return HttpResponse("Mail sent")
def nesting_exception_view(request):
"""
A view that uses a nested client to call another view and then raises an
exception.
"""
client = Client()
client.get("/get_view/")
raise Exception("exception message")
def django_project_redirect(request):
return HttpResponseRedirect("https://www.djangoproject.com/")
def no_trailing_slash_external_redirect(request):
"""
RFC 3986 Section 6.2.3: Empty path should be normalized to "/".
Use https://testserver, rather than an external domain, in order to allow
use of follow=True, triggering Client._handle_redirects().
"""
return HttpResponseRedirect("https://testserver")
def index_view(request):
"""Target for no_trailing_slash_external_redirect with follow=True."""
return HttpResponse("Hello world")
def upload_view(request):
"""Prints keys of request.FILES to the response."""
return HttpResponse(", ".join(request.FILES))
class TwoArgException(Exception):
def __init__(self, one, two):
pass
def two_arg_exception(request):
raise TwoArgException("one", "two")
class CBView(TemplateView):
template_name = "base.html"
|
5284aa4752509c37ba027e2a91f68b386c6e9fd98f22a4e13cc8177f3e38a99b | from datetime import datetime
from django.test import SimpleTestCase, override_settings
FULL_RESPONSE = "Test conditional get response"
LAST_MODIFIED = datetime(2007, 10, 21, 23, 21, 47)
LAST_MODIFIED_STR = "Sun, 21 Oct 2007 23:21:47 GMT"
LAST_MODIFIED_NEWER_STR = "Mon, 18 Oct 2010 16:56:23 GMT"
LAST_MODIFIED_INVALID_STR = "Mon, 32 Oct 2010 16:56:23 GMT"
EXPIRED_LAST_MODIFIED_STR = "Sat, 20 Oct 2007 23:21:47 GMT"
ETAG = '"b4246ffc4f62314ca13147c9d4f76974"'
WEAK_ETAG = 'W/"b4246ffc4f62314ca13147c9d4f76974"' # weak match to ETAG
EXPIRED_ETAG = '"7fae4cd4b0f81e7d2914700043aa8ed6"'
@override_settings(ROOT_URLCONF="conditional_processing.urls")
class ConditionalGet(SimpleTestCase):
def assertFullResponse(self, response, check_last_modified=True, check_etag=True):
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, FULL_RESPONSE.encode())
if response.request["REQUEST_METHOD"] in ("GET", "HEAD"):
if check_last_modified:
self.assertEqual(response.headers["Last-Modified"], LAST_MODIFIED_STR)
if check_etag:
self.assertEqual(response.headers["ETag"], ETAG)
else:
self.assertNotIn("Last-Modified", response.headers)
self.assertNotIn("ETag", response.headers)
def assertNotModified(self, response):
self.assertEqual(response.status_code, 304)
self.assertEqual(response.content, b"")
def test_without_conditions(self):
response = self.client.get("/condition/")
self.assertFullResponse(response)
def test_if_modified_since(self):
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = LAST_MODIFIED_STR
response = self.client.get("/condition/")
self.assertNotModified(response)
response = self.client.put("/condition/")
self.assertFullResponse(response)
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = LAST_MODIFIED_NEWER_STR
response = self.client.get("/condition/")
self.assertNotModified(response)
response = self.client.put("/condition/")
self.assertFullResponse(response)
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = LAST_MODIFIED_INVALID_STR
response = self.client.get("/condition/")
self.assertFullResponse(response)
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get("/condition/")
self.assertFullResponse(response)
def test_if_unmodified_since(self):
self.client.defaults["HTTP_IF_UNMODIFIED_SINCE"] = LAST_MODIFIED_STR
response = self.client.get("/condition/")
self.assertFullResponse(response)
self.client.defaults["HTTP_IF_UNMODIFIED_SINCE"] = LAST_MODIFIED_NEWER_STR
response = self.client.get("/condition/")
self.assertFullResponse(response)
self.client.defaults["HTTP_IF_UNMODIFIED_SINCE"] = LAST_MODIFIED_INVALID_STR
response = self.client.get("/condition/")
self.assertFullResponse(response)
self.client.defaults["HTTP_IF_UNMODIFIED_SINCE"] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get("/condition/")
self.assertEqual(response.status_code, 412)
def test_if_none_match(self):
self.client.defaults["HTTP_IF_NONE_MATCH"] = ETAG
response = self.client.get("/condition/")
self.assertNotModified(response)
response = self.client.put("/condition/")
self.assertEqual(response.status_code, 412)
self.client.defaults["HTTP_IF_NONE_MATCH"] = EXPIRED_ETAG
response = self.client.get("/condition/")
self.assertFullResponse(response)
# Several etags in If-None-Match is a bit exotic but why not?
self.client.defaults["HTTP_IF_NONE_MATCH"] = "%s, %s" % (ETAG, EXPIRED_ETAG)
response = self.client.get("/condition/")
self.assertNotModified(response)
def test_weak_if_none_match(self):
"""
If-None-Match comparisons use weak matching, so weak and strong ETags
with the same value result in a 304 response.
"""
self.client.defaults["HTTP_IF_NONE_MATCH"] = ETAG
response = self.client.get("/condition/weak_etag/")
self.assertNotModified(response)
response = self.client.put("/condition/weak_etag/")
self.assertEqual(response.status_code, 412)
self.client.defaults["HTTP_IF_NONE_MATCH"] = WEAK_ETAG
response = self.client.get("/condition/weak_etag/")
self.assertNotModified(response)
response = self.client.put("/condition/weak_etag/")
self.assertEqual(response.status_code, 412)
response = self.client.get("/condition/")
self.assertNotModified(response)
response = self.client.put("/condition/")
self.assertEqual(response.status_code, 412)
def test_all_if_none_match(self):
self.client.defaults["HTTP_IF_NONE_MATCH"] = "*"
response = self.client.get("/condition/")
self.assertNotModified(response)
response = self.client.put("/condition/")
self.assertEqual(response.status_code, 412)
response = self.client.get("/condition/no_etag/")
self.assertFullResponse(response, check_last_modified=False, check_etag=False)
def test_if_match(self):
self.client.defaults["HTTP_IF_MATCH"] = ETAG
response = self.client.put("/condition/")
self.assertFullResponse(response)
self.client.defaults["HTTP_IF_MATCH"] = EXPIRED_ETAG
response = self.client.put("/condition/")
self.assertEqual(response.status_code, 412)
def test_weak_if_match(self):
"""
If-Match comparisons use strong matching, so any comparison involving
a weak ETag return a 412 response.
"""
self.client.defaults["HTTP_IF_MATCH"] = ETAG
response = self.client.get("/condition/weak_etag/")
self.assertEqual(response.status_code, 412)
self.client.defaults["HTTP_IF_MATCH"] = WEAK_ETAG
response = self.client.get("/condition/weak_etag/")
self.assertEqual(response.status_code, 412)
response = self.client.get("/condition/")
self.assertEqual(response.status_code, 412)
def test_all_if_match(self):
self.client.defaults["HTTP_IF_MATCH"] = "*"
response = self.client.get("/condition/")
self.assertFullResponse(response)
response = self.client.get("/condition/no_etag/")
self.assertEqual(response.status_code, 412)
def test_both_headers(self):
# See RFC 9110 Section 13.2.2.
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = LAST_MODIFIED_STR
self.client.defaults["HTTP_IF_NONE_MATCH"] = ETAG
response = self.client.get("/condition/")
self.assertNotModified(response)
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = EXPIRED_LAST_MODIFIED_STR
self.client.defaults["HTTP_IF_NONE_MATCH"] = ETAG
response = self.client.get("/condition/")
self.assertNotModified(response)
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = LAST_MODIFIED_STR
self.client.defaults["HTTP_IF_NONE_MATCH"] = EXPIRED_ETAG
response = self.client.get("/condition/")
self.assertFullResponse(response)
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = EXPIRED_LAST_MODIFIED_STR
self.client.defaults["HTTP_IF_NONE_MATCH"] = EXPIRED_ETAG
response = self.client.get("/condition/")
self.assertFullResponse(response)
def test_both_headers_2(self):
self.client.defaults["HTTP_IF_UNMODIFIED_SINCE"] = LAST_MODIFIED_STR
self.client.defaults["HTTP_IF_MATCH"] = ETAG
response = self.client.get("/condition/")
self.assertFullResponse(response)
self.client.defaults["HTTP_IF_UNMODIFIED_SINCE"] = EXPIRED_LAST_MODIFIED_STR
self.client.defaults["HTTP_IF_MATCH"] = ETAG
response = self.client.get("/condition/")
self.assertFullResponse(response)
self.client.defaults["HTTP_IF_UNMODIFIED_SINCE"] = EXPIRED_LAST_MODIFIED_STR
self.client.defaults["HTTP_IF_MATCH"] = EXPIRED_ETAG
response = self.client.get("/condition/")
self.assertEqual(response.status_code, 412)
self.client.defaults["HTTP_IF_UNMODIFIED_SINCE"] = LAST_MODIFIED_STR
self.client.defaults["HTTP_IF_MATCH"] = EXPIRED_ETAG
response = self.client.get("/condition/")
self.assertEqual(response.status_code, 412)
def test_single_condition_1(self):
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = LAST_MODIFIED_STR
response = self.client.get("/condition/last_modified/")
self.assertNotModified(response)
response = self.client.get("/condition/etag/")
self.assertFullResponse(response, check_last_modified=False)
def test_single_condition_2(self):
self.client.defaults["HTTP_IF_NONE_MATCH"] = ETAG
response = self.client.get("/condition/etag/")
self.assertNotModified(response)
response = self.client.get("/condition/last_modified/")
self.assertFullResponse(response, check_etag=False)
def test_single_condition_3(self):
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get("/condition/last_modified/")
self.assertFullResponse(response, check_etag=False)
def test_single_condition_4(self):
self.client.defaults["HTTP_IF_NONE_MATCH"] = EXPIRED_ETAG
response = self.client.get("/condition/etag/")
self.assertFullResponse(response, check_last_modified=False)
def test_single_condition_5(self):
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = LAST_MODIFIED_STR
response = self.client.get("/condition/last_modified2/")
self.assertNotModified(response)
response = self.client.get("/condition/etag2/")
self.assertFullResponse(response, check_last_modified=False)
def test_single_condition_6(self):
self.client.defaults["HTTP_IF_NONE_MATCH"] = ETAG
response = self.client.get("/condition/etag2/")
self.assertNotModified(response)
response = self.client.get("/condition/last_modified2/")
self.assertFullResponse(response, check_etag=False)
def test_single_condition_7(self):
self.client.defaults["HTTP_IF_UNMODIFIED_SINCE"] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get("/condition/last_modified/")
self.assertEqual(response.status_code, 412)
response = self.client.get("/condition/etag/")
self.assertEqual(response.status_code, 412)
def test_single_condition_8(self):
self.client.defaults["HTTP_IF_UNMODIFIED_SINCE"] = LAST_MODIFIED_STR
response = self.client.get("/condition/last_modified/")
self.assertFullResponse(response, check_etag=False)
def test_single_condition_9(self):
self.client.defaults["HTTP_IF_UNMODIFIED_SINCE"] = EXPIRED_LAST_MODIFIED_STR
response = self.client.get("/condition/last_modified2/")
self.assertEqual(response.status_code, 412)
response = self.client.get("/condition/etag2/")
self.assertEqual(response.status_code, 412)
def test_single_condition_head(self):
self.client.defaults["HTTP_IF_MODIFIED_SINCE"] = LAST_MODIFIED_STR
response = self.client.head("/condition/")
self.assertNotModified(response)
def test_unquoted(self):
"""
The same quoted ETag should be set on the header regardless of whether
etag_func() in condition() returns a quoted or an unquoted ETag.
"""
response_quoted = self.client.get("/condition/etag/")
response_unquoted = self.client.get("/condition/unquoted_etag/")
self.assertEqual(response_quoted["ETag"], response_unquoted["ETag"])
# It's possible that the matching algorithm could use the wrong value even
# if the ETag header is set correctly correctly (as tested by
# test_unquoted()), so check that the unquoted value is matched.
def test_unquoted_if_none_match(self):
self.client.defaults["HTTP_IF_NONE_MATCH"] = ETAG
response = self.client.get("/condition/unquoted_etag/")
self.assertNotModified(response)
response = self.client.put("/condition/unquoted_etag/")
self.assertEqual(response.status_code, 412)
self.client.defaults["HTTP_IF_NONE_MATCH"] = EXPIRED_ETAG
response = self.client.get("/condition/unquoted_etag/")
self.assertFullResponse(response, check_last_modified=False)
def test_invalid_etag(self):
self.client.defaults["HTTP_IF_NONE_MATCH"] = '"""'
response = self.client.get("/condition/etag/")
self.assertFullResponse(response, check_last_modified=False)
|
c6e36dddefaec5238091b92e31023e48a73a558517cea6eedb73dcabd53ef799 | from math import ceil
from operator import attrgetter
from django.core.exceptions import FieldDoesNotExist
from django.db import (
IntegrityError,
NotSupportedError,
OperationalError,
ProgrammingError,
connection,
)
from django.db.models import FileField, Value
from django.db.models.functions import Lower
from django.test import (
TestCase,
override_settings,
skipIfDBFeature,
skipUnlessDBFeature,
)
from .models import (
BigAutoFieldModel,
Country,
NoFields,
NullableFields,
Pizzeria,
ProxyCountry,
ProxyMultiCountry,
ProxyMultiProxyCountry,
ProxyProxyCountry,
RelatedModel,
Restaurant,
SmallAutoFieldModel,
State,
TwoFields,
UpsertConflict,
)
class BulkCreateTests(TestCase):
def setUp(self):
self.data = [
Country(name="United States of America", iso_two_letter="US"),
Country(name="The Netherlands", iso_two_letter="NL"),
Country(name="Germany", iso_two_letter="DE"),
Country(name="Czech Republic", iso_two_letter="CZ"),
]
def test_simple(self):
created = Country.objects.bulk_create(self.data)
self.assertEqual(created, self.data)
self.assertQuerySetEqual(
Country.objects.order_by("-name"),
[
"United States of America",
"The Netherlands",
"Germany",
"Czech Republic",
],
attrgetter("name"),
)
created = Country.objects.bulk_create([])
self.assertEqual(created, [])
self.assertEqual(Country.objects.count(), 4)
@skipUnlessDBFeature("has_bulk_insert")
def test_efficiency(self):
with self.assertNumQueries(1):
Country.objects.bulk_create(self.data)
@skipUnlessDBFeature("has_bulk_insert")
def test_long_non_ascii_text(self):
"""
Inserting non-ASCII values with a length in the range 2001 to 4000
characters, i.e. 4002 to 8000 bytes, must be set as a CLOB on Oracle
(#22144).
"""
Country.objects.bulk_create([Country(description="Ж" * 3000)])
self.assertEqual(Country.objects.count(), 1)
@skipUnlessDBFeature("has_bulk_insert")
def test_long_and_short_text(self):
Country.objects.bulk_create(
[
Country(description="a" * 4001, iso_two_letter="A"),
Country(description="a", iso_two_letter="B"),
Country(description="Ж" * 2001, iso_two_letter="C"),
Country(description="Ж", iso_two_letter="D"),
]
)
self.assertEqual(Country.objects.count(), 4)
def test_multi_table_inheritance_unsupported(self):
expected_message = "Can't bulk create a multi-table inherited model"
with self.assertRaisesMessage(ValueError, expected_message):
Pizzeria.objects.bulk_create(
[
Pizzeria(name="The Art of Pizza"),
]
)
with self.assertRaisesMessage(ValueError, expected_message):
ProxyMultiCountry.objects.bulk_create(
[
ProxyMultiCountry(name="Fillory", iso_two_letter="FL"),
]
)
with self.assertRaisesMessage(ValueError, expected_message):
ProxyMultiProxyCountry.objects.bulk_create(
[
ProxyMultiProxyCountry(name="Fillory", iso_two_letter="FL"),
]
)
def test_proxy_inheritance_supported(self):
ProxyCountry.objects.bulk_create(
[
ProxyCountry(name="Qwghlm", iso_two_letter="QW"),
Country(name="Tortall", iso_two_letter="TA"),
]
)
self.assertQuerySetEqual(
ProxyCountry.objects.all(),
{"Qwghlm", "Tortall"},
attrgetter("name"),
ordered=False,
)
ProxyProxyCountry.objects.bulk_create(
[
ProxyProxyCountry(name="Netherlands", iso_two_letter="NT"),
]
)
self.assertQuerySetEqual(
ProxyProxyCountry.objects.all(),
{
"Qwghlm",
"Tortall",
"Netherlands",
},
attrgetter("name"),
ordered=False,
)
def test_non_auto_increment_pk(self):
State.objects.bulk_create(
[State(two_letter_code=s) for s in ["IL", "NY", "CA", "ME"]]
)
self.assertQuerySetEqual(
State.objects.order_by("two_letter_code"),
[
"CA",
"IL",
"ME",
"NY",
],
attrgetter("two_letter_code"),
)
@skipUnlessDBFeature("has_bulk_insert")
def test_non_auto_increment_pk_efficiency(self):
with self.assertNumQueries(1):
State.objects.bulk_create(
[State(two_letter_code=s) for s in ["IL", "NY", "CA", "ME"]]
)
self.assertQuerySetEqual(
State.objects.order_by("two_letter_code"),
[
"CA",
"IL",
"ME",
"NY",
],
attrgetter("two_letter_code"),
)
@skipIfDBFeature("allows_auto_pk_0")
def test_zero_as_autoval(self):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for automatic primary key if the
NO_AUTO_VALUE_ON_ZERO SQL mode is not enabled.
"""
valid_country = Country(name="Germany", iso_two_letter="DE")
invalid_country = Country(id=0, name="Poland", iso_two_letter="PL")
msg = "The database backend does not accept 0 as a value for AutoField."
with self.assertRaisesMessage(ValueError, msg):
Country.objects.bulk_create([valid_country, invalid_country])
def test_batch_same_vals(self):
# SQLite had a problem where all the same-valued models were
# collapsed to one insert.
Restaurant.objects.bulk_create([Restaurant(name="foo") for i in range(0, 2)])
self.assertEqual(Restaurant.objects.count(), 2)
def test_large_batch(self):
TwoFields.objects.bulk_create(
[TwoFields(f1=i, f2=i + 1) for i in range(0, 1001)]
)
self.assertEqual(TwoFields.objects.count(), 1001)
self.assertEqual(
TwoFields.objects.filter(f1__gte=450, f1__lte=550).count(), 101
)
self.assertEqual(TwoFields.objects.filter(f2__gte=901).count(), 101)
@skipUnlessDBFeature("has_bulk_insert")
def test_large_single_field_batch(self):
# SQLite had a problem with more than 500 UNIONed selects in single
# query.
Restaurant.objects.bulk_create([Restaurant() for i in range(0, 501)])
@skipUnlessDBFeature("has_bulk_insert")
def test_large_batch_efficiency(self):
with override_settings(DEBUG=True):
connection.queries_log.clear()
TwoFields.objects.bulk_create(
[TwoFields(f1=i, f2=i + 1) for i in range(0, 1001)]
)
self.assertLess(len(connection.queries), 10)
def test_large_batch_mixed(self):
"""
Test inserting a large batch with objects having primary key set
mixed together with objects without PK set.
"""
TwoFields.objects.bulk_create(
[
TwoFields(id=i if i % 2 == 0 else None, f1=i, f2=i + 1)
for i in range(100000, 101000)
]
)
self.assertEqual(TwoFields.objects.count(), 1000)
# We can't assume much about the ID's created, except that the above
# created IDs must exist.
id_range = range(100000, 101000, 2)
self.assertEqual(TwoFields.objects.filter(id__in=id_range).count(), 500)
self.assertEqual(TwoFields.objects.exclude(id__in=id_range).count(), 500)
@skipUnlessDBFeature("has_bulk_insert")
def test_large_batch_mixed_efficiency(self):
"""
Test inserting a large batch with objects having primary key set
mixed together with objects without PK set.
"""
with override_settings(DEBUG=True):
connection.queries_log.clear()
TwoFields.objects.bulk_create(
[
TwoFields(id=i if i % 2 == 0 else None, f1=i, f2=i + 1)
for i in range(100000, 101000)
]
)
self.assertLess(len(connection.queries), 10)
def test_explicit_batch_size(self):
objs = [TwoFields(f1=i, f2=i) for i in range(0, 4)]
num_objs = len(objs)
TwoFields.objects.bulk_create(objs, batch_size=1)
self.assertEqual(TwoFields.objects.count(), num_objs)
TwoFields.objects.all().delete()
TwoFields.objects.bulk_create(objs, batch_size=2)
self.assertEqual(TwoFields.objects.count(), num_objs)
TwoFields.objects.all().delete()
TwoFields.objects.bulk_create(objs, batch_size=3)
self.assertEqual(TwoFields.objects.count(), num_objs)
TwoFields.objects.all().delete()
TwoFields.objects.bulk_create(objs, batch_size=num_objs)
self.assertEqual(TwoFields.objects.count(), num_objs)
def test_empty_model(self):
NoFields.objects.bulk_create([NoFields() for i in range(2)])
self.assertEqual(NoFields.objects.count(), 2)
@skipUnlessDBFeature("has_bulk_insert")
def test_explicit_batch_size_efficiency(self):
objs = [TwoFields(f1=i, f2=i) for i in range(0, 100)]
with self.assertNumQueries(2):
TwoFields.objects.bulk_create(objs, 50)
TwoFields.objects.all().delete()
with self.assertNumQueries(1):
TwoFields.objects.bulk_create(objs, len(objs))
@skipUnlessDBFeature("has_bulk_insert")
def test_explicit_batch_size_respects_max_batch_size(self):
objs = [Country(name=f"Country {i}") for i in range(1000)]
fields = ["name", "iso_two_letter", "description"]
max_batch_size = max(connection.ops.bulk_batch_size(fields, objs), 1)
with self.assertNumQueries(ceil(len(objs) / max_batch_size)):
Country.objects.bulk_create(objs, batch_size=max_batch_size + 1)
@skipUnlessDBFeature("has_bulk_insert")
def test_bulk_insert_expressions(self):
Restaurant.objects.bulk_create(
[
Restaurant(name="Sam's Shake Shack"),
Restaurant(name=Lower(Value("Betty's Beetroot Bar"))),
]
)
bbb = Restaurant.objects.filter(name="betty's beetroot bar")
self.assertEqual(bbb.count(), 1)
@skipUnlessDBFeature("has_bulk_insert")
def test_bulk_insert_nullable_fields(self):
fk_to_auto_fields = {
"auto_field": NoFields.objects.create(),
"small_auto_field": SmallAutoFieldModel.objects.create(),
"big_auto_field": BigAutoFieldModel.objects.create(),
}
# NULL can be mixed with other values in nullable fields
nullable_fields = [
field for field in NullableFields._meta.get_fields() if field.name != "id"
]
NullableFields.objects.bulk_create(
[
NullableFields(**{**fk_to_auto_fields, field.name: None})
for field in nullable_fields
]
)
self.assertEqual(NullableFields.objects.count(), len(nullable_fields))
for field in nullable_fields:
with self.subTest(field=field):
field_value = "" if isinstance(field, FileField) else None
self.assertEqual(
NullableFields.objects.filter(**{field.name: field_value}).count(),
1,
)
@skipUnlessDBFeature("can_return_rows_from_bulk_insert")
def test_set_pk_and_insert_single_item(self):
with self.assertNumQueries(1):
countries = Country.objects.bulk_create([self.data[0]])
self.assertEqual(len(countries), 1)
self.assertEqual(Country.objects.get(pk=countries[0].pk), countries[0])
@skipUnlessDBFeature("can_return_rows_from_bulk_insert")
def test_set_pk_and_query_efficiency(self):
with self.assertNumQueries(1):
countries = Country.objects.bulk_create(self.data)
self.assertEqual(len(countries), 4)
self.assertEqual(Country.objects.get(pk=countries[0].pk), countries[0])
self.assertEqual(Country.objects.get(pk=countries[1].pk), countries[1])
self.assertEqual(Country.objects.get(pk=countries[2].pk), countries[2])
self.assertEqual(Country.objects.get(pk=countries[3].pk), countries[3])
@skipUnlessDBFeature("can_return_rows_from_bulk_insert")
def test_set_state(self):
country_nl = Country(name="Netherlands", iso_two_letter="NL")
country_be = Country(name="Belgium", iso_two_letter="BE")
Country.objects.bulk_create([country_nl])
country_be.save()
# Objects save via bulk_create() and save() should have equal state.
self.assertEqual(country_nl._state.adding, country_be._state.adding)
self.assertEqual(country_nl._state.db, country_be._state.db)
def test_set_state_with_pk_specified(self):
state_ca = State(two_letter_code="CA")
state_ny = State(two_letter_code="NY")
State.objects.bulk_create([state_ca])
state_ny.save()
# Objects save via bulk_create() and save() should have equal state.
self.assertEqual(state_ca._state.adding, state_ny._state.adding)
self.assertEqual(state_ca._state.db, state_ny._state.db)
@skipIfDBFeature("supports_ignore_conflicts")
def test_ignore_conflicts_value_error(self):
message = "This database backend does not support ignoring conflicts."
with self.assertRaisesMessage(NotSupportedError, message):
TwoFields.objects.bulk_create(self.data, ignore_conflicts=True)
@skipUnlessDBFeature("supports_ignore_conflicts")
def test_ignore_conflicts_ignore(self):
data = [
TwoFields(f1=1, f2=1),
TwoFields(f1=2, f2=2),
TwoFields(f1=3, f2=3),
]
TwoFields.objects.bulk_create(data)
self.assertEqual(TwoFields.objects.count(), 3)
# With ignore_conflicts=True, conflicts are ignored.
conflicting_objects = [
TwoFields(f1=2, f2=2),
TwoFields(f1=3, f2=3),
]
TwoFields.objects.bulk_create([conflicting_objects[0]], ignore_conflicts=True)
TwoFields.objects.bulk_create(conflicting_objects, ignore_conflicts=True)
self.assertEqual(TwoFields.objects.count(), 3)
self.assertIsNone(conflicting_objects[0].pk)
self.assertIsNone(conflicting_objects[1].pk)
# New objects are created and conflicts are ignored.
new_object = TwoFields(f1=4, f2=4)
TwoFields.objects.bulk_create(
conflicting_objects + [new_object], ignore_conflicts=True
)
self.assertEqual(TwoFields.objects.count(), 4)
self.assertIsNone(new_object.pk)
# Without ignore_conflicts=True, there's a problem.
with self.assertRaises(IntegrityError):
TwoFields.objects.bulk_create(conflicting_objects)
def test_nullable_fk_after_parent(self):
parent = NoFields()
child = NullableFields(auto_field=parent, integer_field=88)
parent.save()
NullableFields.objects.bulk_create([child])
child = NullableFields.objects.get(integer_field=88)
self.assertEqual(child.auto_field, parent)
@skipUnlessDBFeature("can_return_rows_from_bulk_insert")
def test_nullable_fk_after_parent_bulk_create(self):
parent = NoFields()
child = NullableFields(auto_field=parent, integer_field=88)
NoFields.objects.bulk_create([parent])
NullableFields.objects.bulk_create([child])
child = NullableFields.objects.get(integer_field=88)
self.assertEqual(child.auto_field, parent)
def test_unsaved_parent(self):
parent = NoFields()
msg = (
"bulk_create() prohibited to prevent data loss due to unsaved "
"related object 'auto_field'."
)
with self.assertRaisesMessage(ValueError, msg):
NullableFields.objects.bulk_create([NullableFields(auto_field=parent)])
def test_invalid_batch_size_exception(self):
msg = "Batch size must be a positive integer."
with self.assertRaisesMessage(ValueError, msg):
Country.objects.bulk_create([], batch_size=-1)
@skipIfDBFeature("supports_update_conflicts")
def test_update_conflicts_unsupported(self):
msg = "This database backend does not support updating conflicts."
with self.assertRaisesMessage(NotSupportedError, msg):
Country.objects.bulk_create(self.data, update_conflicts=True)
@skipUnlessDBFeature("supports_ignore_conflicts", "supports_update_conflicts")
def test_ignore_update_conflicts_exclusive(self):
msg = "ignore_conflicts and update_conflicts are mutually exclusive"
with self.assertRaisesMessage(ValueError, msg):
Country.objects.bulk_create(
self.data,
ignore_conflicts=True,
update_conflicts=True,
)
@skipUnlessDBFeature("supports_update_conflicts")
def test_update_conflicts_no_update_fields(self):
msg = (
"Fields that will be updated when a row insertion fails on "
"conflicts must be provided."
)
with self.assertRaisesMessage(ValueError, msg):
Country.objects.bulk_create(self.data, update_conflicts=True)
@skipUnlessDBFeature("supports_update_conflicts")
@skipIfDBFeature("supports_update_conflicts_with_target")
def test_update_conflicts_unique_field_unsupported(self):
msg = (
"This database backend does not support updating conflicts with "
"specifying unique fields that can trigger the upsert."
)
with self.assertRaisesMessage(NotSupportedError, msg):
TwoFields.objects.bulk_create(
[TwoFields(f1=1, f2=1), TwoFields(f1=2, f2=2)],
update_conflicts=True,
update_fields=["f2"],
unique_fields=["f1"],
)
@skipUnlessDBFeature("supports_update_conflicts")
def test_update_conflicts_nonexistent_update_fields(self):
unique_fields = None
if connection.features.supports_update_conflicts_with_target:
unique_fields = ["f1"]
msg = "TwoFields has no field named 'nonexistent'"
with self.assertRaisesMessage(FieldDoesNotExist, msg):
TwoFields.objects.bulk_create(
[TwoFields(f1=1, f2=1), TwoFields(f1=2, f2=2)],
update_conflicts=True,
update_fields=["nonexistent"],
unique_fields=unique_fields,
)
@skipUnlessDBFeature(
"supports_update_conflicts",
"supports_update_conflicts_with_target",
)
def test_update_conflicts_unique_fields_required(self):
msg = "Unique fields that can trigger the upsert must be provided."
with self.assertRaisesMessage(ValueError, msg):
TwoFields.objects.bulk_create(
[TwoFields(f1=1, f2=1), TwoFields(f1=2, f2=2)],
update_conflicts=True,
update_fields=["f1"],
)
@skipUnlessDBFeature(
"supports_update_conflicts",
"supports_update_conflicts_with_target",
)
def test_update_conflicts_invalid_update_fields(self):
msg = "bulk_create() can only be used with concrete fields in update_fields."
# Reverse one-to-one relationship.
with self.assertRaisesMessage(ValueError, msg):
Country.objects.bulk_create(
self.data,
update_conflicts=True,
update_fields=["relatedmodel"],
unique_fields=["pk"],
)
# Many-to-many relationship.
with self.assertRaisesMessage(ValueError, msg):
RelatedModel.objects.bulk_create(
[RelatedModel(country=self.data[0])],
update_conflicts=True,
update_fields=["big_auto_fields"],
unique_fields=["country"],
)
@skipUnlessDBFeature(
"supports_update_conflicts",
"supports_update_conflicts_with_target",
)
def test_update_conflicts_pk_in_update_fields(self):
msg = "bulk_create() cannot be used with primary keys in update_fields."
with self.assertRaisesMessage(ValueError, msg):
BigAutoFieldModel.objects.bulk_create(
[BigAutoFieldModel()],
update_conflicts=True,
update_fields=["id"],
unique_fields=["id"],
)
@skipUnlessDBFeature(
"supports_update_conflicts",
"supports_update_conflicts_with_target",
)
def test_update_conflicts_invalid_unique_fields(self):
msg = "bulk_create() can only be used with concrete fields in unique_fields."
# Reverse one-to-one relationship.
with self.assertRaisesMessage(ValueError, msg):
Country.objects.bulk_create(
self.data,
update_conflicts=True,
update_fields=["name"],
unique_fields=["relatedmodel"],
)
# Many-to-many relationship.
with self.assertRaisesMessage(ValueError, msg):
RelatedModel.objects.bulk_create(
[RelatedModel(country=self.data[0])],
update_conflicts=True,
update_fields=["name"],
unique_fields=["big_auto_fields"],
)
def _test_update_conflicts_two_fields(self, unique_fields):
TwoFields.objects.bulk_create(
[
TwoFields(f1=1, f2=1, name="a"),
TwoFields(f1=2, f2=2, name="b"),
]
)
self.assertEqual(TwoFields.objects.count(), 2)
conflicting_objects = [
TwoFields(f1=1, f2=1, name="c"),
TwoFields(f1=2, f2=2, name="d"),
]
TwoFields.objects.bulk_create(
conflicting_objects,
update_conflicts=True,
unique_fields=unique_fields,
update_fields=["name"],
)
self.assertEqual(TwoFields.objects.count(), 2)
self.assertCountEqual(
TwoFields.objects.values("f1", "f2", "name"),
[
{"f1": 1, "f2": 1, "name": "c"},
{"f1": 2, "f2": 2, "name": "d"},
],
)
@skipUnlessDBFeature(
"supports_update_conflicts", "supports_update_conflicts_with_target"
)
def test_update_conflicts_two_fields_unique_fields_first(self):
self._test_update_conflicts_two_fields(["f1"])
@skipUnlessDBFeature(
"supports_update_conflicts", "supports_update_conflicts_with_target"
)
def test_update_conflicts_two_fields_unique_fields_second(self):
self._test_update_conflicts_two_fields(["f2"])
@skipUnlessDBFeature(
"supports_update_conflicts", "supports_update_conflicts_with_target"
)
def test_update_conflicts_unique_fields_pk(self):
TwoFields.objects.bulk_create(
[
TwoFields(f1=1, f2=1, name="a"),
TwoFields(f1=2, f2=2, name="b"),
]
)
self.assertEqual(TwoFields.objects.count(), 2)
obj1 = TwoFields.objects.get(f1=1)
obj2 = TwoFields.objects.get(f1=2)
conflicting_objects = [
TwoFields(pk=obj1.pk, f1=3, f2=3, name="c"),
TwoFields(pk=obj2.pk, f1=4, f2=4, name="d"),
]
TwoFields.objects.bulk_create(
conflicting_objects,
update_conflicts=True,
unique_fields=["pk"],
update_fields=["name"],
)
self.assertEqual(TwoFields.objects.count(), 2)
self.assertCountEqual(
TwoFields.objects.values("f1", "f2", "name"),
[
{"f1": 1, "f2": 1, "name": "c"},
{"f1": 2, "f2": 2, "name": "d"},
],
)
@skipUnlessDBFeature(
"supports_update_conflicts", "supports_update_conflicts_with_target"
)
def test_update_conflicts_two_fields_unique_fields_both(self):
with self.assertRaises((OperationalError, ProgrammingError)):
self._test_update_conflicts_two_fields(["f1", "f2"])
@skipUnlessDBFeature("supports_update_conflicts")
@skipIfDBFeature("supports_update_conflicts_with_target")
def test_update_conflicts_two_fields_no_unique_fields(self):
self._test_update_conflicts_two_fields([])
def _test_update_conflicts_unique_two_fields(self, unique_fields):
Country.objects.bulk_create(self.data)
self.assertEqual(Country.objects.count(), 4)
new_data = [
# Conflicting countries.
Country(
name="Germany",
iso_two_letter="DE",
description=("Germany is a country in Central Europe."),
),
Country(
name="Czech Republic",
iso_two_letter="CZ",
description=(
"The Czech Republic is a landlocked country in Central Europe."
),
),
# New countries.
Country(name="Australia", iso_two_letter="AU"),
Country(
name="Japan",
iso_two_letter="JP",
description=("Japan is an island country in East Asia."),
),
]
Country.objects.bulk_create(
new_data,
update_conflicts=True,
update_fields=["description"],
unique_fields=unique_fields,
)
self.assertEqual(Country.objects.count(), 6)
self.assertCountEqual(
Country.objects.values("iso_two_letter", "description"),
[
{"iso_two_letter": "US", "description": ""},
{"iso_two_letter": "NL", "description": ""},
{
"iso_two_letter": "DE",
"description": ("Germany is a country in Central Europe."),
},
{
"iso_two_letter": "CZ",
"description": (
"The Czech Republic is a landlocked country in Central Europe."
),
},
{"iso_two_letter": "AU", "description": ""},
{
"iso_two_letter": "JP",
"description": ("Japan is an island country in East Asia."),
},
],
)
@skipUnlessDBFeature(
"supports_update_conflicts", "supports_update_conflicts_with_target"
)
def test_update_conflicts_unique_two_fields_unique_fields_both(self):
self._test_update_conflicts_unique_two_fields(["iso_two_letter", "name"])
@skipUnlessDBFeature(
"supports_update_conflicts", "supports_update_conflicts_with_target"
)
def test_update_conflicts_unique_two_fields_unique_fields_one(self):
with self.assertRaises((OperationalError, ProgrammingError)):
self._test_update_conflicts_unique_two_fields(["iso_two_letter"])
@skipUnlessDBFeature("supports_update_conflicts")
@skipIfDBFeature("supports_update_conflicts_with_target")
def test_update_conflicts_unique_two_fields_unique_no_unique_fields(self):
self._test_update_conflicts_unique_two_fields([])
def _test_update_conflicts(self, unique_fields):
UpsertConflict.objects.bulk_create(
[
UpsertConflict(number=1, rank=1, name="John"),
UpsertConflict(number=2, rank=2, name="Mary"),
UpsertConflict(number=3, rank=3, name="Hannah"),
]
)
self.assertEqual(UpsertConflict.objects.count(), 3)
conflicting_objects = [
UpsertConflict(number=1, rank=4, name="Steve"),
UpsertConflict(number=2, rank=2, name="Olivia"),
UpsertConflict(number=3, rank=1, name="Hannah"),
]
UpsertConflict.objects.bulk_create(
conflicting_objects,
update_conflicts=True,
update_fields=["name", "rank"],
unique_fields=unique_fields,
)
self.assertEqual(UpsertConflict.objects.count(), 3)
self.assertCountEqual(
UpsertConflict.objects.values("number", "rank", "name"),
[
{"number": 1, "rank": 4, "name": "Steve"},
{"number": 2, "rank": 2, "name": "Olivia"},
{"number": 3, "rank": 1, "name": "Hannah"},
],
)
UpsertConflict.objects.bulk_create(
conflicting_objects + [UpsertConflict(number=4, rank=4, name="Mark")],
update_conflicts=True,
update_fields=["name", "rank"],
unique_fields=unique_fields,
)
self.assertEqual(UpsertConflict.objects.count(), 4)
self.assertCountEqual(
UpsertConflict.objects.values("number", "rank", "name"),
[
{"number": 1, "rank": 4, "name": "Steve"},
{"number": 2, "rank": 2, "name": "Olivia"},
{"number": 3, "rank": 1, "name": "Hannah"},
{"number": 4, "rank": 4, "name": "Mark"},
],
)
@skipUnlessDBFeature(
"supports_update_conflicts", "supports_update_conflicts_with_target"
)
def test_update_conflicts_unique_fields(self):
self._test_update_conflicts(unique_fields=["number"])
@skipUnlessDBFeature("supports_update_conflicts")
@skipIfDBFeature("supports_update_conflicts_with_target")
def test_update_conflicts_no_unique_fields(self):
self._test_update_conflicts([])
|
ba0625cc568d818cf634b10a215feef0c6219d6016543945affa041d928e8d5b | import datetime
from decimal import Decimal
from unittest import mock
from django.core.exceptions import FieldError
from django.db import NotSupportedError, connection
from django.db.models import (
Avg,
Case,
Count,
F,
IntegerField,
Max,
Min,
OuterRef,
Q,
RowRange,
Subquery,
Sum,
Value,
ValueRange,
When,
Window,
WindowFrame,
)
from django.db.models.fields.json import KeyTextTransform, KeyTransform
from django.db.models.functions import (
Cast,
CumeDist,
DenseRank,
ExtractYear,
FirstValue,
Lag,
LastValue,
Lead,
NthValue,
Ntile,
PercentRank,
Rank,
RowNumber,
Upper,
)
from django.db.models.lookups import Exact
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from django.test.utils import CaptureQueriesContext
from .models import Classification, Detail, Employee, PastEmployeeDepartment
@skipUnlessDBFeature("supports_over_clause")
class WindowFunctionTests(TestCase):
@classmethod
def setUpTestData(cls):
classification = Classification.objects.create()
Employee.objects.bulk_create(
[
Employee(
name=e[0],
salary=e[1],
department=e[2],
hire_date=e[3],
age=e[4],
bonus=Decimal(e[1]) / 400,
classification=classification,
)
for e in [
("Jones", 45000, "Accounting", datetime.datetime(2005, 11, 1), 20),
(
"Williams",
37000,
"Accounting",
datetime.datetime(2009, 6, 1),
20,
),
("Jenson", 45000, "Accounting", datetime.datetime(2008, 4, 1), 20),
("Adams", 50000, "Accounting", datetime.datetime(2013, 7, 1), 50),
("Smith", 55000, "Sales", datetime.datetime(2007, 6, 1), 30),
("Brown", 53000, "Sales", datetime.datetime(2009, 9, 1), 30),
("Johnson", 40000, "Marketing", datetime.datetime(2012, 3, 1), 30),
("Smith", 38000, "Marketing", datetime.datetime(2009, 10, 1), 20),
("Wilkinson", 60000, "IT", datetime.datetime(2011, 3, 1), 40),
("Moore", 34000, "IT", datetime.datetime(2013, 8, 1), 40),
("Miller", 100000, "Management", datetime.datetime(2005, 6, 1), 40),
("Johnson", 80000, "Management", datetime.datetime(2005, 7, 1), 50),
]
]
)
employees = list(Employee.objects.order_by("pk"))
PastEmployeeDepartment.objects.bulk_create(
[
PastEmployeeDepartment(employee=employees[6], department="Sales"),
PastEmployeeDepartment(employee=employees[10], department="IT"),
]
)
def test_dense_rank(self):
tests = [
ExtractYear(F("hire_date")).asc(),
F("hire_date__year").asc(),
"hire_date__year",
]
for order_by in tests:
with self.subTest(order_by=order_by):
qs = Employee.objects.annotate(
rank=Window(expression=DenseRank(), order_by=order_by),
)
self.assertQuerySetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 1),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 1),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 1),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 2),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 3),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 4),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 4),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 4),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 5),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 6),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 7),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 7),
],
lambda entry: (
entry.name,
entry.salary,
entry.department,
entry.hire_date,
entry.rank,
),
ordered=False,
)
def test_department_salary(self):
qs = Employee.objects.annotate(
department_sum=Window(
expression=Sum("salary"),
partition_by=F("department"),
order_by=[F("hire_date").asc()],
)
).order_by("department", "department_sum")
self.assertQuerySetEqual(
qs,
[
("Jones", "Accounting", 45000, 45000),
("Jenson", "Accounting", 45000, 90000),
("Williams", "Accounting", 37000, 127000),
("Adams", "Accounting", 50000, 177000),
("Wilkinson", "IT", 60000, 60000),
("Moore", "IT", 34000, 94000),
("Miller", "Management", 100000, 100000),
("Johnson", "Management", 80000, 180000),
("Smith", "Marketing", 38000, 38000),
("Johnson", "Marketing", 40000, 78000),
("Smith", "Sales", 55000, 55000),
("Brown", "Sales", 53000, 108000),
],
lambda entry: (
entry.name,
entry.department,
entry.salary,
entry.department_sum,
),
)
def test_rank(self):
"""
Rank the employees based on the year they're were hired. Since there
are multiple employees hired in different years, this will contain
gaps.
"""
qs = Employee.objects.annotate(
rank=Window(
expression=Rank(),
order_by=F("hire_date__year").asc(),
)
)
self.assertQuerySetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 1),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 1),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 1),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 4),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 5),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 6),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 6),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 6),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 9),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 10),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 11),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 11),
],
lambda entry: (
entry.name,
entry.salary,
entry.department,
entry.hire_date,
entry.rank,
),
ordered=False,
)
def test_row_number(self):
"""
The row number window function computes the number based on the order
in which the tuples were inserted. Depending on the backend,
Oracle requires an ordering-clause in the Window expression.
"""
qs = Employee.objects.annotate(
row_number=Window(
expression=RowNumber(),
order_by=F("pk").asc(),
)
).order_by("pk")
self.assertQuerySetEqual(
qs,
[
("Jones", "Accounting", 1),
("Williams", "Accounting", 2),
("Jenson", "Accounting", 3),
("Adams", "Accounting", 4),
("Smith", "Sales", 5),
("Brown", "Sales", 6),
("Johnson", "Marketing", 7),
("Smith", "Marketing", 8),
("Wilkinson", "IT", 9),
("Moore", "IT", 10),
("Miller", "Management", 11),
("Johnson", "Management", 12),
],
lambda entry: (entry.name, entry.department, entry.row_number),
)
def test_row_number_no_ordering(self):
"""
The row number window function computes the number based on the order
in which the tuples were inserted.
"""
# Add a default ordering for consistent results across databases.
qs = Employee.objects.annotate(
row_number=Window(
expression=RowNumber(),
)
).order_by("pk")
self.assertQuerySetEqual(
qs,
[
("Jones", "Accounting", 1),
("Williams", "Accounting", 2),
("Jenson", "Accounting", 3),
("Adams", "Accounting", 4),
("Smith", "Sales", 5),
("Brown", "Sales", 6),
("Johnson", "Marketing", 7),
("Smith", "Marketing", 8),
("Wilkinson", "IT", 9),
("Moore", "IT", 10),
("Miller", "Management", 11),
("Johnson", "Management", 12),
],
lambda entry: (entry.name, entry.department, entry.row_number),
)
def test_avg_salary_department(self):
qs = Employee.objects.annotate(
avg_salary=Window(
expression=Avg("salary"),
order_by=F("department").asc(),
partition_by="department",
)
).order_by("department", "-salary", "name")
self.assertQuerySetEqual(
qs,
[
("Adams", 50000, "Accounting", 44250.00),
("Jenson", 45000, "Accounting", 44250.00),
("Jones", 45000, "Accounting", 44250.00),
("Williams", 37000, "Accounting", 44250.00),
("Wilkinson", 60000, "IT", 47000.00),
("Moore", 34000, "IT", 47000.00),
("Miller", 100000, "Management", 90000.00),
("Johnson", 80000, "Management", 90000.00),
("Johnson", 40000, "Marketing", 39000.00),
("Smith", 38000, "Marketing", 39000.00),
("Smith", 55000, "Sales", 54000.00),
("Brown", 53000, "Sales", 54000.00),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.avg_salary,
),
)
def test_lag(self):
"""
Compute the difference between an employee's salary and the next
highest salary in the employee's department. Return None if the
employee has the lowest salary.
"""
qs = Employee.objects.annotate(
lag=Window(
expression=Lag(expression="salary", offset=1),
partition_by=F("department"),
order_by=[F("salary").asc(), F("name").asc()],
)
).order_by("department", F("salary").asc(), F("name").asc())
self.assertQuerySetEqual(
qs,
[
("Williams", 37000, "Accounting", None),
("Jenson", 45000, "Accounting", 37000),
("Jones", 45000, "Accounting", 45000),
("Adams", 50000, "Accounting", 45000),
("Moore", 34000, "IT", None),
("Wilkinson", 60000, "IT", 34000),
("Johnson", 80000, "Management", None),
("Miller", 100000, "Management", 80000),
("Smith", 38000, "Marketing", None),
("Johnson", 40000, "Marketing", 38000),
("Brown", 53000, "Sales", None),
("Smith", 55000, "Sales", 53000),
],
transform=lambda row: (row.name, row.salary, row.department, row.lag),
)
def test_lag_decimalfield(self):
qs = Employee.objects.annotate(
lag=Window(
expression=Lag(expression="bonus", offset=1),
partition_by=F("department"),
order_by=[F("bonus").asc(), F("name").asc()],
)
).order_by("department", F("bonus").asc(), F("name").asc())
self.assertQuerySetEqual(
qs,
[
("Williams", 92.5, "Accounting", None),
("Jenson", 112.5, "Accounting", 92.5),
("Jones", 112.5, "Accounting", 112.5),
("Adams", 125, "Accounting", 112.5),
("Moore", 85, "IT", None),
("Wilkinson", 150, "IT", 85),
("Johnson", 200, "Management", None),
("Miller", 250, "Management", 200),
("Smith", 95, "Marketing", None),
("Johnson", 100, "Marketing", 95),
("Brown", 132.5, "Sales", None),
("Smith", 137.5, "Sales", 132.5),
],
transform=lambda row: (row.name, row.bonus, row.department, row.lag),
)
def test_first_value(self):
qs = Employee.objects.annotate(
first_value=Window(
expression=FirstValue("salary"),
partition_by=F("department"),
order_by=F("hire_date").asc(),
)
).order_by("department", "hire_date")
self.assertQuerySetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 45000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 45000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 45000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 45000),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 60000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 60000),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 100000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 100000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 38000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 38000),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 55000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 55000),
],
lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.first_value,
),
)
def test_last_value(self):
qs = Employee.objects.annotate(
last_value=Window(
expression=LastValue("hire_date"),
partition_by=F("department"),
order_by=F("hire_date").asc(),
)
)
self.assertQuerySetEqual(
qs,
[
(
"Adams",
"Accounting",
datetime.date(2013, 7, 1),
50000,
datetime.date(2013, 7, 1),
),
(
"Jenson",
"Accounting",
datetime.date(2008, 4, 1),
45000,
datetime.date(2008, 4, 1),
),
(
"Jones",
"Accounting",
datetime.date(2005, 11, 1),
45000,
datetime.date(2005, 11, 1),
),
(
"Williams",
"Accounting",
datetime.date(2009, 6, 1),
37000,
datetime.date(2009, 6, 1),
),
(
"Moore",
"IT",
datetime.date(2013, 8, 1),
34000,
datetime.date(2013, 8, 1),
),
(
"Wilkinson",
"IT",
datetime.date(2011, 3, 1),
60000,
datetime.date(2011, 3, 1),
),
(
"Miller",
"Management",
datetime.date(2005, 6, 1),
100000,
datetime.date(2005, 6, 1),
),
(
"Johnson",
"Management",
datetime.date(2005, 7, 1),
80000,
datetime.date(2005, 7, 1),
),
(
"Johnson",
"Marketing",
datetime.date(2012, 3, 1),
40000,
datetime.date(2012, 3, 1),
),
(
"Smith",
"Marketing",
datetime.date(2009, 10, 1),
38000,
datetime.date(2009, 10, 1),
),
(
"Brown",
"Sales",
datetime.date(2009, 9, 1),
53000,
datetime.date(2009, 9, 1),
),
(
"Smith",
"Sales",
datetime.date(2007, 6, 1),
55000,
datetime.date(2007, 6, 1),
),
],
transform=lambda row: (
row.name,
row.department,
row.hire_date,
row.salary,
row.last_value,
),
ordered=False,
)
def test_function_list_of_values(self):
qs = (
Employee.objects.annotate(
lead=Window(
expression=Lead(expression="salary"),
order_by=[F("hire_date").asc(), F("name").desc()],
partition_by="department",
)
)
.values_list("name", "salary", "department", "hire_date", "lead")
.order_by("department", F("hire_date").asc(), F("name").desc())
)
self.assertNotIn("GROUP BY", str(qs.query))
self.assertSequenceEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 45000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 37000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 50000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), None),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 34000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), None),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 80000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), None),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 40000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), None),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 53000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), None),
],
)
def test_min_department(self):
"""An alternative way to specify a query for FirstValue."""
qs = Employee.objects.annotate(
min_salary=Window(
expression=Min("salary"),
partition_by=F("department"),
order_by=[F("salary").asc(), F("name").asc()],
)
).order_by("department", "salary", "name")
self.assertQuerySetEqual(
qs,
[
("Williams", "Accounting", 37000, 37000),
("Jenson", "Accounting", 45000, 37000),
("Jones", "Accounting", 45000, 37000),
("Adams", "Accounting", 50000, 37000),
("Moore", "IT", 34000, 34000),
("Wilkinson", "IT", 60000, 34000),
("Johnson", "Management", 80000, 80000),
("Miller", "Management", 100000, 80000),
("Smith", "Marketing", 38000, 38000),
("Johnson", "Marketing", 40000, 38000),
("Brown", "Sales", 53000, 53000),
("Smith", "Sales", 55000, 53000),
],
lambda row: (row.name, row.department, row.salary, row.min_salary),
)
def test_max_per_year(self):
"""
Find the maximum salary awarded in the same year as the
employee was hired, regardless of the department.
"""
qs = Employee.objects.annotate(
max_salary_year=Window(
expression=Max("salary"),
order_by=ExtractYear("hire_date").asc(),
partition_by=ExtractYear("hire_date"),
)
).order_by(ExtractYear("hire_date"), "salary")
self.assertQuerySetEqual(
qs,
[
("Jones", "Accounting", 45000, 2005, 100000),
("Johnson", "Management", 80000, 2005, 100000),
("Miller", "Management", 100000, 2005, 100000),
("Smith", "Sales", 55000, 2007, 55000),
("Jenson", "Accounting", 45000, 2008, 45000),
("Williams", "Accounting", 37000, 2009, 53000),
("Smith", "Marketing", 38000, 2009, 53000),
("Brown", "Sales", 53000, 2009, 53000),
("Wilkinson", "IT", 60000, 2011, 60000),
("Johnson", "Marketing", 40000, 2012, 40000),
("Moore", "IT", 34000, 2013, 50000),
("Adams", "Accounting", 50000, 2013, 50000),
],
lambda row: (
row.name,
row.department,
row.salary,
row.hire_date.year,
row.max_salary_year,
),
)
def test_cume_dist(self):
"""
Compute the cumulative distribution for the employees based on the
salary in increasing order. Equal to rank/total number of rows (12).
"""
qs = Employee.objects.annotate(
cume_dist=Window(
expression=CumeDist(),
order_by=F("salary").asc(),
)
).order_by("salary", "name")
# Round result of cume_dist because Oracle uses greater precision.
self.assertQuerySetEqual(
qs,
[
("Moore", "IT", 34000, 0.0833333333),
("Williams", "Accounting", 37000, 0.1666666667),
("Smith", "Marketing", 38000, 0.25),
("Johnson", "Marketing", 40000, 0.3333333333),
("Jenson", "Accounting", 45000, 0.5),
("Jones", "Accounting", 45000, 0.5),
("Adams", "Accounting", 50000, 0.5833333333),
("Brown", "Sales", 53000, 0.6666666667),
("Smith", "Sales", 55000, 0.75),
("Wilkinson", "IT", 60000, 0.8333333333),
("Johnson", "Management", 80000, 0.9166666667),
("Miller", "Management", 100000, 1),
],
lambda row: (
row.name,
row.department,
row.salary,
round(row.cume_dist, 10),
),
)
def test_nthvalue(self):
qs = Employee.objects.annotate(
nth_value=Window(
expression=NthValue(expression="salary", nth=2),
order_by=[F("hire_date").asc(), F("name").desc()],
partition_by=F("department"),
)
).order_by("department", "hire_date", "name")
self.assertQuerySetEqual(
qs,
[
("Jones", "Accounting", datetime.date(2005, 11, 1), 45000, None),
("Jenson", "Accounting", datetime.date(2008, 4, 1), 45000, 45000),
("Williams", "Accounting", datetime.date(2009, 6, 1), 37000, 45000),
("Adams", "Accounting", datetime.date(2013, 7, 1), 50000, 45000),
("Wilkinson", "IT", datetime.date(2011, 3, 1), 60000, None),
("Moore", "IT", datetime.date(2013, 8, 1), 34000, 34000),
("Miller", "Management", datetime.date(2005, 6, 1), 100000, None),
("Johnson", "Management", datetime.date(2005, 7, 1), 80000, 80000),
("Smith", "Marketing", datetime.date(2009, 10, 1), 38000, None),
("Johnson", "Marketing", datetime.date(2012, 3, 1), 40000, 40000),
("Smith", "Sales", datetime.date(2007, 6, 1), 55000, None),
("Brown", "Sales", datetime.date(2009, 9, 1), 53000, 53000),
],
lambda row: (
row.name,
row.department,
row.hire_date,
row.salary,
row.nth_value,
),
)
def test_lead(self):
"""
Determine what the next person hired in the same department makes.
Because the dataset is ambiguous, the name is also part of the
ordering clause. No default is provided, so None/NULL should be
returned.
"""
qs = Employee.objects.annotate(
lead=Window(
expression=Lead(expression="salary"),
order_by=[F("hire_date").asc(), F("name").desc()],
partition_by="department",
)
).order_by("department", F("hire_date").asc(), F("name").desc())
self.assertQuerySetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 45000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 37000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 50000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), None),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 34000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), None),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 80000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), None),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 40000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), None),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 53000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), None),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.lead,
),
)
def test_lead_offset(self):
"""
Determine what the person hired after someone makes. Due to
ambiguity, the name is also included in the ordering.
"""
qs = Employee.objects.annotate(
lead=Window(
expression=Lead("salary", offset=2),
partition_by="department",
order_by=F("hire_date").asc(),
)
)
self.assertQuerySetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 37000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 50000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), None),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), None),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), None),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), None),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), None),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), None),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), None),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), None),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), None),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), None),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.lead,
),
ordered=False,
)
@skipUnlessDBFeature("supports_default_in_lead_lag")
def test_lead_default(self):
qs = Employee.objects.annotate(
lead_default=Window(
expression=Lead(expression="salary", offset=5, default=60000),
partition_by=F("department"),
order_by=F("department").asc(),
)
)
self.assertEqual(
list(qs.values_list("lead_default", flat=True).distinct()), [60000]
)
def test_ntile(self):
"""
Compute the group for each of the employees across the entire company,
based on how high the salary is for them. There are twelve employees
so it divides evenly into four groups.
"""
qs = Employee.objects.annotate(
ntile=Window(
expression=Ntile(num_buckets=4),
order_by="-salary",
)
).order_by("ntile", "-salary", "name")
self.assertQuerySetEqual(
qs,
[
("Miller", "Management", 100000, 1),
("Johnson", "Management", 80000, 1),
("Wilkinson", "IT", 60000, 1),
("Smith", "Sales", 55000, 2),
("Brown", "Sales", 53000, 2),
("Adams", "Accounting", 50000, 2),
("Jenson", "Accounting", 45000, 3),
("Jones", "Accounting", 45000, 3),
("Johnson", "Marketing", 40000, 3),
("Smith", "Marketing", 38000, 4),
("Williams", "Accounting", 37000, 4),
("Moore", "IT", 34000, 4),
],
lambda x: (x.name, x.department, x.salary, x.ntile),
)
def test_percent_rank(self):
"""
Calculate the percentage rank of the employees across the entire
company based on salary and name (in case of ambiguity).
"""
qs = Employee.objects.annotate(
percent_rank=Window(
expression=PercentRank(),
order_by=[F("salary").asc(), F("name").asc()],
)
).order_by("percent_rank")
# Round to account for precision differences among databases.
self.assertQuerySetEqual(
qs,
[
("Moore", "IT", 34000, 0.0),
("Williams", "Accounting", 37000, 0.0909090909),
("Smith", "Marketing", 38000, 0.1818181818),
("Johnson", "Marketing", 40000, 0.2727272727),
("Jenson", "Accounting", 45000, 0.3636363636),
("Jones", "Accounting", 45000, 0.4545454545),
("Adams", "Accounting", 50000, 0.5454545455),
("Brown", "Sales", 53000, 0.6363636364),
("Smith", "Sales", 55000, 0.7272727273),
("Wilkinson", "IT", 60000, 0.8181818182),
("Johnson", "Management", 80000, 0.9090909091),
("Miller", "Management", 100000, 1.0),
],
transform=lambda row: (
row.name,
row.department,
row.salary,
round(row.percent_rank, 10),
),
)
def test_nth_returns_null(self):
"""
Find the nth row of the data set. None is returned since there are
fewer than 20 rows in the test data.
"""
qs = Employee.objects.annotate(
nth_value=Window(
expression=NthValue("salary", nth=20), order_by=F("salary").asc()
)
)
self.assertEqual(
list(qs.values_list("nth_value", flat=True).distinct()), [None]
)
def test_multiple_partitioning(self):
"""
Find the maximum salary for each department for people hired in the
same year.
"""
qs = Employee.objects.annotate(
max=Window(
expression=Max("salary"),
partition_by=[F("department"), F("hire_date__year")],
)
).order_by("department", "hire_date", "name")
self.assertQuerySetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 45000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 45000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 37000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 50000),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 60000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 34000),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 100000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 100000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 38000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 40000),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 55000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 53000),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.max,
),
)
def test_multiple_ordering(self):
"""
Accumulate the salaries over the departments based on hire_date.
If two people were hired on the same date in the same department, the
ordering clause will render a different result for those people.
"""
qs = Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
partition_by="department",
order_by=[F("hire_date").asc(), F("name").asc()],
)
).order_by("department", "sum")
self.assertQuerySetEqual(
qs,
[
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 45000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 90000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 127000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 177000),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 60000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 94000),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 100000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 180000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 38000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 78000),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 55000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 108000),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.sum,
),
)
def test_related_ordering_with_count(self):
qs = Employee.objects.annotate(
department_sum=Window(
expression=Sum("salary"),
partition_by=F("department"),
order_by=["classification__code"],
)
)
self.assertEqual(qs.count(), 12)
def test_filter(self):
qs = Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
),
department_avg_age_diff=(
Window(Avg("age"), partition_by="department") - F("age")
),
).order_by("department", "name")
# Direct window reference.
self.assertQuerySetEqual(
qs.filter(department_salary_rank=1),
["Adams", "Wilkinson", "Miller", "Johnson", "Smith"],
lambda employee: employee.name,
)
# Through a combined expression containing a window.
self.assertQuerySetEqual(
qs.filter(department_avg_age_diff__gt=0),
["Jenson", "Jones", "Williams", "Miller", "Smith"],
lambda employee: employee.name,
)
# Intersection of multiple windows.
self.assertQuerySetEqual(
qs.filter(department_salary_rank=1, department_avg_age_diff__gt=0),
["Miller"],
lambda employee: employee.name,
)
# Union of multiple windows.
self.assertQuerySetEqual(
qs.filter(Q(department_salary_rank=1) | Q(department_avg_age_diff__gt=0)),
[
"Adams",
"Jenson",
"Jones",
"Williams",
"Wilkinson",
"Miller",
"Johnson",
"Smith",
"Smith",
],
lambda employee: employee.name,
)
def test_filter_conditional_annotation(self):
qs = (
Employee.objects.annotate(
rank=Window(Rank(), partition_by="department", order_by="-salary"),
case_first_rank=Case(
When(rank=1, then=True),
default=False,
),
q_first_rank=Q(rank=1),
)
.order_by("name")
.values_list("name", flat=True)
)
for annotation in ["case_first_rank", "q_first_rank"]:
with self.subTest(annotation=annotation):
self.assertSequenceEqual(
qs.filter(**{annotation: True}),
["Adams", "Johnson", "Miller", "Smith", "Wilkinson"],
)
def test_filter_conditional_expression(self):
qs = (
Employee.objects.filter(
Exact(Window(Rank(), partition_by="department", order_by="-salary"), 1)
)
.order_by("name")
.values_list("name", flat=True)
)
self.assertSequenceEqual(
qs, ["Adams", "Johnson", "Miller", "Smith", "Wilkinson"]
)
def test_filter_column_ref_rhs(self):
qs = (
Employee.objects.annotate(
max_dept_salary=Window(Max("salary"), partition_by="department")
)
.filter(max_dept_salary=F("salary"))
.order_by("name")
.values_list("name", flat=True)
)
self.assertSequenceEqual(
qs, ["Adams", "Johnson", "Miller", "Smith", "Wilkinson"]
)
def test_filter_values(self):
qs = (
Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
),
)
.order_by("department", "name")
.values_list(Upper("name"), flat=True)
)
self.assertSequenceEqual(
qs.filter(department_salary_rank=1),
["ADAMS", "WILKINSON", "MILLER", "JOHNSON", "SMITH"],
)
def test_filter_alias(self):
qs = Employee.objects.alias(
department_avg_age_diff=(
Window(Avg("age"), partition_by="department") - F("age")
),
).order_by("department", "name")
self.assertQuerySetEqual(
qs.filter(department_avg_age_diff__gt=0),
["Jenson", "Jones", "Williams", "Miller", "Smith"],
lambda employee: employee.name,
)
def test_filter_select_related(self):
qs = (
Employee.objects.alias(
department_avg_age_diff=(
Window(Avg("age"), partition_by="department") - F("age")
),
)
.select_related("classification")
.filter(department_avg_age_diff__gt=0)
.order_by("department", "name")
)
self.assertQuerySetEqual(
qs,
["Jenson", "Jones", "Williams", "Miller", "Smith"],
lambda employee: employee.name,
)
with self.assertNumQueries(0):
qs[0].classification
def test_exclude(self):
qs = Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
),
department_avg_age_diff=(
Window(Avg("age"), partition_by="department") - F("age")
),
).order_by("department", "name")
# Direct window reference.
self.assertQuerySetEqual(
qs.exclude(department_salary_rank__gt=1),
["Adams", "Wilkinson", "Miller", "Johnson", "Smith"],
lambda employee: employee.name,
)
# Through a combined expression containing a window.
self.assertQuerySetEqual(
qs.exclude(department_avg_age_diff__lte=0),
["Jenson", "Jones", "Williams", "Miller", "Smith"],
lambda employee: employee.name,
)
# Union of multiple windows.
self.assertQuerySetEqual(
qs.exclude(
Q(department_salary_rank__gt=1) | Q(department_avg_age_diff__lte=0)
),
["Miller"],
lambda employee: employee.name,
)
# Intersection of multiple windows.
self.assertQuerySetEqual(
qs.exclude(department_salary_rank__gt=1, department_avg_age_diff__lte=0),
[
"Adams",
"Jenson",
"Jones",
"Williams",
"Wilkinson",
"Miller",
"Johnson",
"Smith",
"Smith",
],
lambda employee: employee.name,
)
def test_heterogeneous_filter(self):
qs = (
Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
),
)
.order_by("name")
.values_list("name", flat=True)
)
# Heterogeneous filter between window function and aggregates pushes
# the WHERE clause to the QUALIFY outer query.
self.assertSequenceEqual(
qs.filter(
department_salary_rank=1, department__in=["Accounting", "Management"]
),
["Adams", "Miller"],
)
self.assertSequenceEqual(
qs.filter(
Q(department_salary_rank=1)
| Q(department__in=["Accounting", "Management"])
),
[
"Adams",
"Jenson",
"Johnson",
"Johnson",
"Jones",
"Miller",
"Smith",
"Wilkinson",
"Williams",
],
)
# Heterogeneous filter between window function and aggregates pushes
# the HAVING clause to the QUALIFY outer query.
qs = qs.annotate(past_department_count=Count("past_departments"))
self.assertSequenceEqual(
qs.filter(department_salary_rank=1, past_department_count__gte=1),
["Johnson", "Miller"],
)
self.assertSequenceEqual(
qs.filter(Q(department_salary_rank=1) | Q(past_department_count__gte=1)),
["Adams", "Johnson", "Miller", "Smith", "Wilkinson"],
)
def test_limited_filter(self):
"""
A query filtering against a window function have its limit applied
after window filtering takes place.
"""
self.assertQuerySetEqual(
Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
)
)
.filter(department_salary_rank=1)
.order_by("department")[0:3],
["Adams", "Wilkinson", "Miller"],
lambda employee: employee.name,
)
def test_filter_count(self):
with CaptureQueriesContext(connection) as ctx:
self.assertEqual(
Employee.objects.annotate(
department_salary_rank=Window(
Rank(), partition_by="department", order_by="-salary"
)
)
.filter(department_salary_rank=1)
.count(),
5,
)
self.assertEqual(len(ctx.captured_queries), 1)
sql = ctx.captured_queries[0]["sql"].lower()
self.assertEqual(sql.count("select"), 3)
self.assertNotIn("group by", sql)
@skipUnlessDBFeature("supports_frame_range_fixed_distance")
def test_range_n_preceding_and_following(self):
qs = Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
order_by=F("salary").asc(),
partition_by="department",
frame=ValueRange(start=-2, end=2),
)
)
self.assertIn("RANGE BETWEEN 2 PRECEDING AND 2 FOLLOWING", str(qs.query))
self.assertQuerySetEqual(
qs,
[
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 37000),
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 90000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 90000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 50000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 53000),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 55000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 40000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 38000),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 60000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 34000),
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 100000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 80000),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.sum,
),
ordered=False,
)
def test_range_unbound(self):
"""A query with RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING."""
qs = Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
partition_by="age",
order_by=[F("age").asc()],
frame=ValueRange(start=None, end=None),
)
).order_by("department", "hire_date", "name")
self.assertIn(
"RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING", str(qs.query)
)
self.assertQuerySetEqual(
qs,
[
("Jones", "Accounting", 45000, datetime.date(2005, 11, 1), 165000),
("Jenson", "Accounting", 45000, datetime.date(2008, 4, 1), 165000),
("Williams", "Accounting", 37000, datetime.date(2009, 6, 1), 165000),
("Adams", "Accounting", 50000, datetime.date(2013, 7, 1), 130000),
("Wilkinson", "IT", 60000, datetime.date(2011, 3, 1), 194000),
("Moore", "IT", 34000, datetime.date(2013, 8, 1), 194000),
("Miller", "Management", 100000, datetime.date(2005, 6, 1), 194000),
("Johnson", "Management", 80000, datetime.date(2005, 7, 1), 130000),
("Smith", "Marketing", 38000, datetime.date(2009, 10, 1), 165000),
("Johnson", "Marketing", 40000, datetime.date(2012, 3, 1), 148000),
("Smith", "Sales", 55000, datetime.date(2007, 6, 1), 148000),
("Brown", "Sales", 53000, datetime.date(2009, 9, 1), 148000),
],
transform=lambda row: (
row.name,
row.department,
row.salary,
row.hire_date,
row.sum,
),
)
def test_subquery_row_range_rank(self):
qs = Employee.objects.annotate(
highest_avg_salary_date=Subquery(
Employee.objects.filter(
department=OuterRef("department"),
)
.annotate(
avg_salary=Window(
expression=Avg("salary"),
order_by=[F("hire_date").asc()],
frame=RowRange(start=-1, end=1),
),
)
.order_by("-avg_salary", "hire_date")
.values("hire_date")[:1],
),
).order_by("department", "name")
self.assertQuerySetEqual(
qs,
[
("Adams", "Accounting", datetime.date(2005, 11, 1)),
("Jenson", "Accounting", datetime.date(2005, 11, 1)),
("Jones", "Accounting", datetime.date(2005, 11, 1)),
("Williams", "Accounting", datetime.date(2005, 11, 1)),
("Moore", "IT", datetime.date(2011, 3, 1)),
("Wilkinson", "IT", datetime.date(2011, 3, 1)),
("Johnson", "Management", datetime.date(2005, 6, 1)),
("Miller", "Management", datetime.date(2005, 6, 1)),
("Johnson", "Marketing", datetime.date(2009, 10, 1)),
("Smith", "Marketing", datetime.date(2009, 10, 1)),
("Brown", "Sales", datetime.date(2007, 6, 1)),
("Smith", "Sales", datetime.date(2007, 6, 1)),
],
transform=lambda row: (
row.name,
row.department,
row.highest_avg_salary_date,
),
)
def test_row_range_rank(self):
"""
A query with ROWS BETWEEN UNBOUNDED PRECEDING AND 3 FOLLOWING.
The resulting sum is the sum of the three next (if they exist) and all
previous rows according to the ordering clause.
"""
qs = Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
order_by=[F("hire_date").asc(), F("name").desc()],
frame=RowRange(start=None, end=3),
)
).order_by("sum", "hire_date")
self.assertIn("ROWS BETWEEN UNBOUNDED PRECEDING AND 3 FOLLOWING", str(qs.query))
self.assertQuerySetEqual(
qs,
[
("Miller", 100000, "Management", datetime.date(2005, 6, 1), 280000),
("Johnson", 80000, "Management", datetime.date(2005, 7, 1), 325000),
("Jones", 45000, "Accounting", datetime.date(2005, 11, 1), 362000),
("Smith", 55000, "Sales", datetime.date(2007, 6, 1), 415000),
("Jenson", 45000, "Accounting", datetime.date(2008, 4, 1), 453000),
("Williams", 37000, "Accounting", datetime.date(2009, 6, 1), 513000),
("Brown", 53000, "Sales", datetime.date(2009, 9, 1), 553000),
("Smith", 38000, "Marketing", datetime.date(2009, 10, 1), 603000),
("Wilkinson", 60000, "IT", datetime.date(2011, 3, 1), 637000),
("Johnson", 40000, "Marketing", datetime.date(2012, 3, 1), 637000),
("Adams", 50000, "Accounting", datetime.date(2013, 7, 1), 637000),
("Moore", 34000, "IT", datetime.date(2013, 8, 1), 637000),
],
transform=lambda row: (
row.name,
row.salary,
row.department,
row.hire_date,
row.sum,
),
)
@skipUnlessDBFeature("can_distinct_on_fields")
def test_distinct_window_function(self):
"""
Window functions are not aggregates, and hence a query to filter out
duplicates may be useful.
"""
qs = (
Employee.objects.annotate(
sum=Window(
expression=Sum("salary"),
partition_by=ExtractYear("hire_date"),
order_by=ExtractYear("hire_date"),
),
year=ExtractYear("hire_date"),
)
.filter(sum__gte=45000)
.values("year", "sum")
.distinct("year")
.order_by("year")
)
results = [
{"year": 2005, "sum": 225000},
{"year": 2007, "sum": 55000},
{"year": 2008, "sum": 45000},
{"year": 2009, "sum": 128000},
{"year": 2011, "sum": 60000},
{"year": 2013, "sum": 84000},
]
for idx, val in zip(range(len(results)), results):
with self.subTest(result=val):
self.assertEqual(qs[idx], val)
def test_fail_update(self):
"""Window expressions can't be used in an UPDATE statement."""
msg = (
"Window expressions are not allowed in this query (salary=<Window: "
"Max(Col(expressions_window_employee, expressions_window.Employee.salary)) "
"OVER (PARTITION BY Col(expressions_window_employee, "
"expressions_window.Employee.department))>)."
)
with self.assertRaisesMessage(FieldError, msg):
Employee.objects.filter(department="Management").update(
salary=Window(expression=Max("salary"), partition_by="department"),
)
def test_fail_insert(self):
"""Window expressions can't be used in an INSERT statement."""
msg = (
"Window expressions are not allowed in this query (salary=<Window: "
"Sum(Value(10000), order_by=OrderBy(F(pk), descending=False)) OVER ()"
)
with self.assertRaisesMessage(FieldError, msg):
Employee.objects.create(
name="Jameson",
department="Management",
hire_date=datetime.date(2007, 7, 1),
salary=Window(expression=Sum(Value(10000), order_by=F("pk").asc())),
)
def test_window_expression_within_subquery(self):
subquery_qs = Employee.objects.annotate(
highest=Window(
FirstValue("id"),
partition_by=F("department"),
order_by=F("salary").desc(),
)
).values("highest")
highest_salary = Employee.objects.filter(pk__in=subquery_qs)
self.assertCountEqual(
highest_salary.values("department", "salary"),
[
{"department": "Accounting", "salary": 50000},
{"department": "Sales", "salary": 55000},
{"department": "Marketing", "salary": 40000},
{"department": "IT", "salary": 60000},
{"department": "Management", "salary": 100000},
],
)
@skipUnlessDBFeature("supports_json_field")
def test_key_transform(self):
Detail.objects.bulk_create(
[
Detail(value={"department": "IT", "name": "Smith", "salary": 37000}),
Detail(value={"department": "IT", "name": "Nowak", "salary": 32000}),
Detail(value={"department": "HR", "name": "Brown", "salary": 50000}),
Detail(value={"department": "HR", "name": "Smith", "salary": 55000}),
Detail(value={"department": "PR", "name": "Moore", "salary": 90000}),
]
)
tests = [
(KeyTransform("department", "value"), KeyTransform("name", "value")),
(F("value__department"), F("value__name")),
]
for partition_by, order_by in tests:
with self.subTest(partition_by=partition_by, order_by=order_by):
qs = Detail.objects.annotate(
department_sum=Window(
expression=Sum(
Cast(
KeyTextTransform("salary", "value"),
output_field=IntegerField(),
)
),
partition_by=[partition_by],
order_by=[order_by],
)
).order_by("value__department", "department_sum")
self.assertQuerySetEqual(
qs,
[
("Brown", "HR", 50000, 50000),
("Smith", "HR", 55000, 105000),
("Nowak", "IT", 32000, 32000),
("Smith", "IT", 37000, 69000),
("Moore", "PR", 90000, 90000),
],
lambda entry: (
entry.value["name"],
entry.value["department"],
entry.value["salary"],
entry.department_sum,
),
)
def test_invalid_start_value_range(self):
msg = "start argument must be a negative integer, zero, or None, but got '3'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=ValueRange(start=3),
)
)
)
def test_invalid_end_value_range(self):
msg = "end argument must be a positive integer, zero, or None, but got '-3'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=ValueRange(end=-3),
)
)
)
def test_invalid_type_end_value_range(self):
msg = "end argument must be a positive integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=ValueRange(end="a"),
)
)
)
def test_invalid_type_start_value_range(self):
msg = "start argument must be a negative integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
frame=ValueRange(start="a"),
)
)
)
def test_invalid_type_end_row_range(self):
msg = "end argument must be a positive integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
frame=RowRange(end="a"),
)
)
)
@skipUnlessDBFeature("only_supports_unbounded_with_preceding_and_following")
def test_unsupported_range_frame_start(self):
msg = (
"%s only supports UNBOUNDED together with PRECEDING and FOLLOWING."
% connection.display_name
)
with self.assertRaisesMessage(NotSupportedError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=ValueRange(start=-1),
)
)
)
@skipUnlessDBFeature("only_supports_unbounded_with_preceding_and_following")
def test_unsupported_range_frame_end(self):
msg = (
"%s only supports UNBOUNDED together with PRECEDING and FOLLOWING."
% connection.display_name
)
with self.assertRaisesMessage(NotSupportedError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=ValueRange(end=1),
)
)
)
def test_invalid_type_start_row_range(self):
msg = "start argument must be a negative integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(
Employee.objects.annotate(
test=Window(
expression=Sum("salary"),
order_by=F("hire_date").asc(),
frame=RowRange(start="a"),
)
)
)
def test_invalid_filter(self):
msg = (
"Heterogeneous disjunctive predicates against window functions are not "
"implemented when performing conditional aggregation."
)
qs = Employee.objects.annotate(
window=Window(Rank()),
past_dept_cnt=Count("past_departments"),
)
with self.assertRaisesMessage(NotImplementedError, msg):
list(qs.filter(Q(window=1) | Q(department="Accounting")))
with self.assertRaisesMessage(NotImplementedError, msg):
list(qs.exclude(window=1, department="Accounting"))
class WindowUnsupportedTests(TestCase):
def test_unsupported_backend(self):
msg = "This backend does not support window expressions."
with mock.patch.object(connection.features, "supports_over_clause", False):
with self.assertRaisesMessage(NotSupportedError, msg):
Employee.objects.annotate(
dense_rank=Window(expression=DenseRank())
).get()
class NonQueryWindowTests(SimpleTestCase):
def test_window_repr(self):
self.assertEqual(
repr(Window(expression=Sum("salary"), partition_by="department")),
"<Window: Sum(F(salary)) OVER (PARTITION BY F(department))>",
)
self.assertEqual(
repr(Window(expression=Avg("salary"), order_by=F("department").asc())),
"<Window: Avg(F(salary)) OVER (OrderByList(OrderBy(F(department), "
"descending=False)))>",
)
def test_window_frame_repr(self):
self.assertEqual(
repr(RowRange(start=-1)),
"<RowRange: ROWS BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING>",
)
self.assertEqual(
repr(ValueRange(start=None, end=1)),
"<ValueRange: RANGE BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING>",
)
self.assertEqual(
repr(ValueRange(start=0, end=0)),
"<ValueRange: RANGE BETWEEN CURRENT ROW AND CURRENT ROW>",
)
self.assertEqual(
repr(RowRange(start=0, end=0)),
"<RowRange: ROWS BETWEEN CURRENT ROW AND CURRENT ROW>",
)
def test_empty_group_by_cols(self):
window = Window(expression=Sum("pk"))
self.assertEqual(window.get_group_by_cols(), [])
self.assertFalse(window.contains_aggregate)
def test_frame_empty_group_by_cols(self):
frame = WindowFrame()
self.assertEqual(frame.get_group_by_cols(), [])
def test_frame_window_frame_notimplemented(self):
frame = WindowFrame()
msg = "Subclasses must implement window_frame_start_end()."
with self.assertRaisesMessage(NotImplementedError, msg):
frame.window_frame_start_end(None, None, None)
def test_invalid_order_by(self):
msg = (
"Window.order_by must be either a string reference to a field, an "
"expression, or a list or tuple of them."
)
with self.assertRaisesMessage(ValueError, msg):
Window(expression=Sum("power"), order_by={"-horse"})
def test_invalid_source_expression(self):
msg = "Expression 'Upper' isn't compatible with OVER clauses."
with self.assertRaisesMessage(ValueError, msg):
Window(expression=Upper("name"))
|
fd267d97f75659467ac57d4809a0df2aa2dcbcbecfd696a56285b5a4a32758be | import base64
import hashlib
import os
import shutil
import sys
import tempfile as sys_tempfile
import unittest
from io import BytesIO, StringIO
from unittest import mock
from urllib.parse import quote
from django.core.exceptions import SuspiciousFileOperation
from django.core.files import temp as tempfile
from django.core.files.uploadedfile import SimpleUploadedFile, UploadedFile
from django.http.multipartparser import (
FILE,
MultiPartParser,
MultiPartParserError,
Parser,
)
from django.test import SimpleTestCase, TestCase, client, override_settings
from . import uploadhandler
from .models import FileModel
UNICODE_FILENAME = "test-0123456789_中文_Orléans.jpg"
MEDIA_ROOT = sys_tempfile.mkdtemp()
UPLOAD_TO = os.path.join(MEDIA_ROOT, "test_upload")
CANDIDATE_TRAVERSAL_FILE_NAMES = [
"/tmp/hax0rd.txt", # Absolute path, *nix-style.
"C:\\Windows\\hax0rd.txt", # Absolute path, win-style.
"C:/Windows/hax0rd.txt", # Absolute path, broken-style.
"\\tmp\\hax0rd.txt", # Absolute path, broken in a different way.
"/tmp\\hax0rd.txt", # Absolute path, broken by mixing.
"subdir/hax0rd.txt", # Descendant path, *nix-style.
"subdir\\hax0rd.txt", # Descendant path, win-style.
"sub/dir\\hax0rd.txt", # Descendant path, mixed.
"../../hax0rd.txt", # Relative path, *nix-style.
"..\\..\\hax0rd.txt", # Relative path, win-style.
"../..\\hax0rd.txt", # Relative path, mixed.
"../hax0rd.txt", # HTML entities.
"../hax0rd.txt", # HTML entities.
]
CANDIDATE_INVALID_FILE_NAMES = [
"/tmp/", # Directory, *nix-style.
"c:\\tmp\\", # Directory, win-style.
"/tmp/.", # Directory dot, *nix-style.
"c:\\tmp\\.", # Directory dot, *nix-style.
"/tmp/..", # Parent directory, *nix-style.
"c:\\tmp\\..", # Parent directory, win-style.
"", # Empty filename.
]
@override_settings(
MEDIA_ROOT=MEDIA_ROOT, ROOT_URLCONF="file_uploads.urls", MIDDLEWARE=[]
)
class FileUploadTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
os.makedirs(MEDIA_ROOT, exist_ok=True)
cls.addClassCleanup(shutil.rmtree, MEDIA_ROOT)
def test_upload_name_is_validated(self):
candidates = [
"/tmp/",
"/tmp/..",
"/tmp/.",
]
if sys.platform == "win32":
candidates.extend(
[
"c:\\tmp\\",
"c:\\tmp\\..",
"c:\\tmp\\.",
]
)
for file_name in candidates:
with self.subTest(file_name=file_name):
self.assertRaises(SuspiciousFileOperation, UploadedFile, name=file_name)
def test_simple_upload(self):
with open(__file__, "rb") as fp:
post_data = {
"name": "Ringo",
"file_field": fp,
}
response = self.client.post("/upload/", post_data)
self.assertEqual(response.status_code, 200)
def test_large_upload(self):
file = tempfile.NamedTemporaryFile
with file(suffix=".file1") as file1, file(suffix=".file2") as file2:
file1.write(b"a" * (2**21))
file1.seek(0)
file2.write(b"a" * (10 * 2**20))
file2.seek(0)
post_data = {
"name": "Ringo",
"file_field1": file1,
"file_field2": file2,
}
for key in list(post_data):
try:
post_data[key + "_hash"] = hashlib.sha1(
post_data[key].read()
).hexdigest()
post_data[key].seek(0)
except AttributeError:
post_data[key + "_hash"] = hashlib.sha1(
post_data[key].encode()
).hexdigest()
response = self.client.post("/verify/", post_data)
self.assertEqual(response.status_code, 200)
def _test_base64_upload(self, content, encode=base64.b64encode):
payload = client.FakePayload(
"\r\n".join(
[
"--" + client.BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="test.txt"',
"Content-Type: application/octet-stream",
"Content-Transfer-Encoding: base64",
"",
]
)
)
payload.write(b"\r\n" + encode(content.encode()) + b"\r\n")
payload.write("--" + client.BOUNDARY + "--\r\n")
r = {
"CONTENT_LENGTH": len(payload),
"CONTENT_TYPE": client.MULTIPART_CONTENT,
"PATH_INFO": "/echo_content/",
"REQUEST_METHOD": "POST",
"wsgi.input": payload,
}
response = self.client.request(**r)
self.assertEqual(response.json()["file"], content)
def test_base64_upload(self):
self._test_base64_upload("This data will be transmitted base64-encoded.")
def test_big_base64_upload(self):
self._test_base64_upload("Big data" * 68000) # > 512Kb
def test_big_base64_newlines_upload(self):
self._test_base64_upload("Big data" * 68000, encode=base64.encodebytes)
def test_base64_invalid_upload(self):
payload = client.FakePayload(
"\r\n".join(
[
"--" + client.BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="test.txt"',
"Content-Type: application/octet-stream",
"Content-Transfer-Encoding: base64",
"",
]
)
)
payload.write(b"\r\n!\r\n")
payload.write("--" + client.BOUNDARY + "--\r\n")
r = {
"CONTENT_LENGTH": len(payload),
"CONTENT_TYPE": client.MULTIPART_CONTENT,
"PATH_INFO": "/echo_content/",
"REQUEST_METHOD": "POST",
"wsgi.input": payload,
}
response = self.client.request(**r)
self.assertEqual(response.json()["file"], "")
def test_unicode_file_name(self):
with sys_tempfile.TemporaryDirectory() as temp_dir:
# This file contains Chinese symbols and an accented char in the name.
with open(os.path.join(temp_dir, UNICODE_FILENAME), "w+b") as file1:
file1.write(b"b" * (2**10))
file1.seek(0)
response = self.client.post("/unicode_name/", {"file_unicode": file1})
self.assertEqual(response.status_code, 200)
def test_unicode_file_name_rfc2231(self):
"""
Receiving file upload when filename is encoded with RFC 2231.
"""
payload = client.FakePayload()
payload.write(
"\r\n".join(
[
"--" + client.BOUNDARY,
'Content-Disposition: form-data; name="file_unicode"; '
"filename*=UTF-8''%s" % quote(UNICODE_FILENAME),
"Content-Type: application/octet-stream",
"",
"You got pwnd.\r\n",
"\r\n--" + client.BOUNDARY + "--\r\n",
]
)
)
r = {
"CONTENT_LENGTH": len(payload),
"CONTENT_TYPE": client.MULTIPART_CONTENT,
"PATH_INFO": "/unicode_name/",
"REQUEST_METHOD": "POST",
"wsgi.input": payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
def test_unicode_name_rfc2231(self):
"""
Receiving file upload when filename is encoded with RFC 2231.
"""
payload = client.FakePayload()
payload.write(
"\r\n".join(
[
"--" + client.BOUNDARY,
"Content-Disposition: form-data; name*=UTF-8''file_unicode; "
"filename*=UTF-8''%s" % quote(UNICODE_FILENAME),
"Content-Type: application/octet-stream",
"",
"You got pwnd.\r\n",
"\r\n--" + client.BOUNDARY + "--\r\n",
]
)
)
r = {
"CONTENT_LENGTH": len(payload),
"CONTENT_TYPE": client.MULTIPART_CONTENT,
"PATH_INFO": "/unicode_name/",
"REQUEST_METHOD": "POST",
"wsgi.input": payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
def test_unicode_file_name_rfc2231_with_double_quotes(self):
payload = client.FakePayload()
payload.write(
"\r\n".join(
[
"--" + client.BOUNDARY,
'Content-Disposition: form-data; name="file_unicode"; '
"filename*=\"UTF-8''%s\"" % quote(UNICODE_FILENAME),
"Content-Type: application/octet-stream",
"",
"You got pwnd.\r\n",
"\r\n--" + client.BOUNDARY + "--\r\n",
]
)
)
r = {
"CONTENT_LENGTH": len(payload),
"CONTENT_TYPE": client.MULTIPART_CONTENT,
"PATH_INFO": "/unicode_name/",
"REQUEST_METHOD": "POST",
"wsgi.input": payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
def test_unicode_name_rfc2231_with_double_quotes(self):
payload = client.FakePayload()
payload.write(
"\r\n".join(
[
"--" + client.BOUNDARY,
"Content-Disposition: form-data; name*=\"UTF-8''file_unicode\"; "
"filename*=\"UTF-8''%s\"" % quote(UNICODE_FILENAME),
"Content-Type: application/octet-stream",
"",
"You got pwnd.\r\n",
"\r\n--" + client.BOUNDARY + "--\r\n",
]
)
)
r = {
"CONTENT_LENGTH": len(payload),
"CONTENT_TYPE": client.MULTIPART_CONTENT,
"PATH_INFO": "/unicode_name/",
"REQUEST_METHOD": "POST",
"wsgi.input": payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
def test_blank_filenames(self):
"""
Receiving file upload when filename is blank (before and after
sanitization) should be okay.
"""
filenames = [
"",
# Normalized by MultiPartParser.IE_sanitize().
"C:\\Windows\\",
# Normalized by os.path.basename().
"/",
"ends-with-slash/",
]
payload = client.FakePayload()
for i, name in enumerate(filenames):
payload.write(
"\r\n".join(
[
"--" + client.BOUNDARY,
'Content-Disposition: form-data; name="file%s"; filename="%s"'
% (i, name),
"Content-Type: application/octet-stream",
"",
"You got pwnd.\r\n",
]
)
)
payload.write("\r\n--" + client.BOUNDARY + "--\r\n")
r = {
"CONTENT_LENGTH": len(payload),
"CONTENT_TYPE": client.MULTIPART_CONTENT,
"PATH_INFO": "/echo/",
"REQUEST_METHOD": "POST",
"wsgi.input": payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
# Empty filenames should be ignored
received = response.json()
for i, name in enumerate(filenames):
self.assertIsNone(received.get("file%s" % i))
def test_non_printable_chars_in_file_names(self):
file_name = "non-\x00printable\x00\n_chars.txt\x00"
payload = client.FakePayload()
payload.write(
"\r\n".join(
[
"--" + client.BOUNDARY,
f'Content-Disposition: form-data; name="file"; '
f'filename="{file_name}"',
"Content-Type: application/octet-stream",
"",
"You got pwnd.\r\n",
]
)
)
payload.write("\r\n--" + client.BOUNDARY + "--\r\n")
r = {
"CONTENT_LENGTH": len(payload),
"CONTENT_TYPE": client.MULTIPART_CONTENT,
"PATH_INFO": "/echo/",
"REQUEST_METHOD": "POST",
"wsgi.input": payload,
}
response = self.client.request(**r)
# Non-printable chars are sanitized.
received = response.json()
self.assertEqual(received["file"], "non-printable_chars.txt")
def test_dangerous_file_names(self):
"""Uploaded file names should be sanitized before ever reaching the view."""
# This test simulates possible directory traversal attacks by a
# malicious uploader We have to do some monkeybusiness here to construct
# a malicious payload with an invalid file name (containing os.sep or
# os.pardir). This similar to what an attacker would need to do when
# trying such an attack.
payload = client.FakePayload()
for i, name in enumerate(CANDIDATE_TRAVERSAL_FILE_NAMES):
payload.write(
"\r\n".join(
[
"--" + client.BOUNDARY,
'Content-Disposition: form-data; name="file%s"; filename="%s"'
% (i, name),
"Content-Type: application/octet-stream",
"",
"You got pwnd.\r\n",
]
)
)
payload.write("\r\n--" + client.BOUNDARY + "--\r\n")
r = {
"CONTENT_LENGTH": len(payload),
"CONTENT_TYPE": client.MULTIPART_CONTENT,
"PATH_INFO": "/echo/",
"REQUEST_METHOD": "POST",
"wsgi.input": payload,
}
response = self.client.request(**r)
# The filenames should have been sanitized by the time it got to the view.
received = response.json()
for i, name in enumerate(CANDIDATE_TRAVERSAL_FILE_NAMES):
got = received["file%s" % i]
self.assertEqual(got, "hax0rd.txt")
def test_filename_overflow(self):
"""File names over 256 characters (dangerous on some platforms) get fixed up."""
long_str = "f" * 300
cases = [
# field name, filename, expected
("long_filename", "%s.txt" % long_str, "%s.txt" % long_str[:251]),
("long_extension", "foo.%s" % long_str, ".%s" % long_str[:254]),
("no_extension", long_str, long_str[:255]),
("no_filename", ".%s" % long_str, ".%s" % long_str[:254]),
("long_everything", "%s.%s" % (long_str, long_str), ".%s" % long_str[:254]),
]
payload = client.FakePayload()
for name, filename, _ in cases:
payload.write(
"\r\n".join(
[
"--" + client.BOUNDARY,
'Content-Disposition: form-data; name="{}"; filename="{}"',
"Content-Type: application/octet-stream",
"",
"Oops.",
"",
]
).format(name, filename)
)
payload.write("\r\n--" + client.BOUNDARY + "--\r\n")
r = {
"CONTENT_LENGTH": len(payload),
"CONTENT_TYPE": client.MULTIPART_CONTENT,
"PATH_INFO": "/echo/",
"REQUEST_METHOD": "POST",
"wsgi.input": payload,
}
response = self.client.request(**r)
result = response.json()
for name, _, expected in cases:
got = result[name]
self.assertEqual(expected, got, "Mismatch for {}".format(name))
self.assertLess(
len(got), 256, "Got a long file name (%s characters)." % len(got)
)
def test_file_content(self):
file = tempfile.NamedTemporaryFile
with file(suffix=".ctype_extra") as no_content_type, file(
suffix=".ctype_extra"
) as simple_file:
no_content_type.write(b"no content")
no_content_type.seek(0)
simple_file.write(b"text content")
simple_file.seek(0)
simple_file.content_type = "text/plain"
string_io = StringIO("string content")
bytes_io = BytesIO(b"binary content")
response = self.client.post(
"/echo_content/",
{
"no_content_type": no_content_type,
"simple_file": simple_file,
"string": string_io,
"binary": bytes_io,
},
)
received = response.json()
self.assertEqual(received["no_content_type"], "no content")
self.assertEqual(received["simple_file"], "text content")
self.assertEqual(received["string"], "string content")
self.assertEqual(received["binary"], "binary content")
def test_content_type_extra(self):
"""Uploaded files may have content type parameters available."""
file = tempfile.NamedTemporaryFile
with file(suffix=".ctype_extra") as no_content_type, file(
suffix=".ctype_extra"
) as simple_file:
no_content_type.write(b"something")
no_content_type.seek(0)
simple_file.write(b"something")
simple_file.seek(0)
simple_file.content_type = "text/plain; test-key=test_value"
response = self.client.post(
"/echo_content_type_extra/",
{
"no_content_type": no_content_type,
"simple_file": simple_file,
},
)
received = response.json()
self.assertEqual(received["no_content_type"], {})
self.assertEqual(received["simple_file"], {"test-key": "test_value"})
def test_truncated_multipart_handled_gracefully(self):
"""
If passed an incomplete multipart message, MultiPartParser does not
attempt to read beyond the end of the stream, and simply will handle
the part that can be parsed gracefully.
"""
payload_str = "\r\n".join(
[
"--" + client.BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="foo.txt"',
"Content-Type: application/octet-stream",
"",
"file contents" "--" + client.BOUNDARY + "--",
"",
]
)
payload = client.FakePayload(payload_str[:-10])
r = {
"CONTENT_LENGTH": len(payload),
"CONTENT_TYPE": client.MULTIPART_CONTENT,
"PATH_INFO": "/echo/",
"REQUEST_METHOD": "POST",
"wsgi.input": payload,
}
self.assertEqual(self.client.request(**r).json(), {})
def test_empty_multipart_handled_gracefully(self):
"""
If passed an empty multipart message, MultiPartParser will return
an empty QueryDict.
"""
r = {
"CONTENT_LENGTH": 0,
"CONTENT_TYPE": client.MULTIPART_CONTENT,
"PATH_INFO": "/echo/",
"REQUEST_METHOD": "POST",
"wsgi.input": client.FakePayload(b""),
}
self.assertEqual(self.client.request(**r).json(), {})
def test_custom_upload_handler(self):
file = tempfile.NamedTemporaryFile
with file() as smallfile, file() as bigfile:
# A small file (under the 5M quota)
smallfile.write(b"a" * (2**21))
smallfile.seek(0)
# A big file (over the quota)
bigfile.write(b"a" * (10 * 2**20))
bigfile.seek(0)
# Small file posting should work.
self.assertIn("f", self.client.post("/quota/", {"f": smallfile}).json())
# Large files don't go through.
self.assertNotIn("f", self.client.post("/quota/", {"f": bigfile}).json())
def test_broken_custom_upload_handler(self):
with tempfile.NamedTemporaryFile() as file:
file.write(b"a" * (2**21))
file.seek(0)
msg = (
"You cannot alter upload handlers after the upload has been processed."
)
with self.assertRaisesMessage(AttributeError, msg):
self.client.post("/quota/broken/", {"f": file})
def test_stop_upload_temporary_file_handler(self):
with tempfile.NamedTemporaryFile() as temp_file:
temp_file.write(b"a")
temp_file.seek(0)
response = self.client.post("/temp_file/stop_upload/", {"file": temp_file})
temp_path = response.json()["temp_path"]
self.assertIs(os.path.exists(temp_path), False)
def test_upload_interrupted_temporary_file_handler(self):
# Simulate an interrupted upload by omitting the closing boundary.
class MockedParser(Parser):
def __iter__(self):
for item in super().__iter__():
item_type, meta_data, field_stream = item
yield item_type, meta_data, field_stream
if item_type == FILE:
return
with tempfile.NamedTemporaryFile() as temp_file:
temp_file.write(b"a")
temp_file.seek(0)
with mock.patch(
"django.http.multipartparser.Parser",
MockedParser,
):
response = self.client.post(
"/temp_file/upload_interrupted/",
{"file": temp_file},
)
temp_path = response.json()["temp_path"]
self.assertIs(os.path.exists(temp_path), False)
def test_fileupload_getlist(self):
file = tempfile.NamedTemporaryFile
with file() as file1, file() as file2, file() as file2a:
file1.write(b"a" * (2**23))
file1.seek(0)
file2.write(b"a" * (2 * 2**18))
file2.seek(0)
file2a.write(b"a" * (5 * 2**20))
file2a.seek(0)
response = self.client.post(
"/getlist_count/",
{
"file1": file1,
"field1": "test",
"field2": "test3",
"field3": "test5",
"field4": "test6",
"field5": "test7",
"file2": (file2, file2a),
},
)
got = response.json()
self.assertEqual(got.get("file1"), 1)
self.assertEqual(got.get("file2"), 2)
def test_fileuploads_closed_at_request_end(self):
file = tempfile.NamedTemporaryFile
with file() as f1, file() as f2a, file() as f2b:
response = self.client.post(
"/fd_closing/t/",
{
"file": f1,
"file2": (f2a, f2b),
},
)
request = response.wsgi_request
# The files were parsed.
self.assertTrue(hasattr(request, "_files"))
file = request._files["file"]
self.assertTrue(file.closed)
files = request._files.getlist("file2")
self.assertTrue(files[0].closed)
self.assertTrue(files[1].closed)
def test_no_parsing_triggered_by_fd_closing(self):
file = tempfile.NamedTemporaryFile
with file() as f1, file() as f2a, file() as f2b:
response = self.client.post(
"/fd_closing/f/",
{
"file": f1,
"file2": (f2a, f2b),
},
)
request = response.wsgi_request
# The fd closing logic doesn't trigger parsing of the stream
self.assertFalse(hasattr(request, "_files"))
def test_file_error_blocking(self):
"""
The server should not block when there are upload errors (bug #8622).
This can happen if something -- i.e. an exception handler -- tries to
access POST while handling an error in parsing POST. This shouldn't
cause an infinite loop!
"""
class POSTAccessingHandler(client.ClientHandler):
"""A handler that'll access POST during an exception."""
def handle_uncaught_exception(self, request, resolver, exc_info):
ret = super().handle_uncaught_exception(request, resolver, exc_info)
request.POST # evaluate
return ret
# Maybe this is a little more complicated that it needs to be; but if
# the django.test.client.FakePayload.read() implementation changes then
# this test would fail. So we need to know exactly what kind of error
# it raises when there is an attempt to read more than the available bytes:
try:
client.FakePayload(b"a").read(2)
except Exception as err:
reference_error = err
# install the custom handler that tries to access request.POST
self.client.handler = POSTAccessingHandler()
with open(__file__, "rb") as fp:
post_data = {
"name": "Ringo",
"file_field": fp,
}
try:
self.client.post("/upload_errors/", post_data)
except reference_error.__class__ as err:
self.assertNotEqual(
str(err),
str(reference_error),
"Caught a repeated exception that'll cause an infinite loop in "
"file uploads.",
)
except Exception as err:
# CustomUploadError is the error that should have been raised
self.assertEqual(err.__class__, uploadhandler.CustomUploadError)
def test_filename_case_preservation(self):
"""
The storage backend shouldn't mess with the case of the filenames
uploaded.
"""
# Synthesize the contents of a file upload with a mixed case filename
# so we don't have to carry such a file in the Django tests source code
# tree.
vars = {"boundary": "oUrBoUnDaRyStRiNg"}
post_data = [
"--%(boundary)s",
'Content-Disposition: form-data; name="file_field"; '
'filename="MiXeD_cAsE.txt"',
"Content-Type: application/octet-stream",
"",
"file contents\n",
"--%(boundary)s--\r\n",
]
response = self.client.post(
"/filename_case/",
"\r\n".join(post_data) % vars,
"multipart/form-data; boundary=%(boundary)s" % vars,
)
self.assertEqual(response.status_code, 200)
id = int(response.content)
obj = FileModel.objects.get(pk=id)
# The name of the file uploaded and the file stored in the server-side
# shouldn't differ.
self.assertEqual(os.path.basename(obj.testfile.path), "MiXeD_cAsE.txt")
def test_filename_traversal_upload(self):
os.makedirs(UPLOAD_TO, exist_ok=True)
tests = [
"../test.txt",
"../test.txt",
]
for file_name in tests:
with self.subTest(file_name=file_name):
payload = client.FakePayload()
payload.write(
"\r\n".join(
[
"--" + client.BOUNDARY,
'Content-Disposition: form-data; name="my_file"; '
'filename="%s";' % file_name,
"Content-Type: text/plain",
"",
"file contents.\r\n",
"\r\n--" + client.BOUNDARY + "--\r\n",
]
),
)
r = {
"CONTENT_LENGTH": len(payload),
"CONTENT_TYPE": client.MULTIPART_CONTENT,
"PATH_INFO": "/upload_traversal/",
"REQUEST_METHOD": "POST",
"wsgi.input": payload,
}
response = self.client.request(**r)
result = response.json()
self.assertEqual(response.status_code, 200)
self.assertEqual(result["file_name"], "test.txt")
self.assertIs(
os.path.exists(os.path.join(MEDIA_ROOT, "test.txt")),
False,
)
self.assertIs(
os.path.exists(os.path.join(UPLOAD_TO, "test.txt")),
True,
)
@override_settings(MEDIA_ROOT=MEDIA_ROOT)
class DirectoryCreationTests(SimpleTestCase):
"""
Tests for error handling during directory creation
via _save_FIELD_file (ticket #6450)
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
os.makedirs(MEDIA_ROOT, exist_ok=True)
cls.addClassCleanup(shutil.rmtree, MEDIA_ROOT)
def setUp(self):
self.obj = FileModel()
@unittest.skipIf(
sys.platform == "win32", "Python on Windows doesn't have working os.chmod()."
)
def test_readonly_root(self):
"""Permission errors are not swallowed"""
os.chmod(MEDIA_ROOT, 0o500)
self.addCleanup(os.chmod, MEDIA_ROOT, 0o700)
with self.assertRaises(PermissionError):
self.obj.testfile.save(
"foo.txt", SimpleUploadedFile("foo.txt", b"x"), save=False
)
def test_not_a_directory(self):
# Create a file with the upload directory name
open(UPLOAD_TO, "wb").close()
self.addCleanup(os.remove, UPLOAD_TO)
msg = "%s exists and is not a directory." % UPLOAD_TO
with self.assertRaisesMessage(FileExistsError, msg):
with SimpleUploadedFile("foo.txt", b"x") as file:
self.obj.testfile.save("foo.txt", file, save=False)
class MultiParserTests(SimpleTestCase):
def test_empty_upload_handlers(self):
# We're not actually parsing here; just checking if the parser properly
# instantiates with empty upload handlers.
MultiPartParser(
{
"CONTENT_TYPE": "multipart/form-data; boundary=_foo",
"CONTENT_LENGTH": "1",
},
StringIO("x"),
[],
"utf-8",
)
def test_invalid_content_type(self):
with self.assertRaisesMessage(
MultiPartParserError, "Invalid Content-Type: text/plain"
):
MultiPartParser(
{
"CONTENT_TYPE": "text/plain",
"CONTENT_LENGTH": "1",
},
StringIO("x"),
[],
"utf-8",
)
def test_negative_content_length(self):
with self.assertRaisesMessage(
MultiPartParserError, "Invalid content length: -1"
):
MultiPartParser(
{
"CONTENT_TYPE": "multipart/form-data; boundary=_foo",
"CONTENT_LENGTH": -1,
},
StringIO("x"),
[],
"utf-8",
)
def test_bad_type_content_length(self):
multipart_parser = MultiPartParser(
{
"CONTENT_TYPE": "multipart/form-data; boundary=_foo",
"CONTENT_LENGTH": "a",
},
StringIO("x"),
[],
"utf-8",
)
self.assertEqual(multipart_parser._content_length, 0)
def test_sanitize_file_name(self):
parser = MultiPartParser(
{
"CONTENT_TYPE": "multipart/form-data; boundary=_foo",
"CONTENT_LENGTH": "1",
},
StringIO("x"),
[],
"utf-8",
)
for file_name in CANDIDATE_TRAVERSAL_FILE_NAMES:
with self.subTest(file_name=file_name):
self.assertEqual(parser.sanitize_file_name(file_name), "hax0rd.txt")
def test_sanitize_invalid_file_name(self):
parser = MultiPartParser(
{
"CONTENT_TYPE": "multipart/form-data; boundary=_foo",
"CONTENT_LENGTH": "1",
},
StringIO("x"),
[],
"utf-8",
)
for file_name in CANDIDATE_INVALID_FILE_NAMES:
with self.subTest(file_name=file_name):
self.assertIsNone(parser.sanitize_file_name(file_name))
|
f4aec54aa437f0120fcd8eba9016950e56feaa9720bb92db8c2444f12bec4d9f | import mimetypes
import os
import shutil
import socket
import sys
import tempfile
from email import charset, message_from_binary_file, message_from_bytes
from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr
from io import StringIO
from pathlib import Path
from smtplib import SMTP, SMTPException
from ssl import SSLError
from unittest import mock, skipUnless
from django.core import mail
from django.core.mail import (
DNS_NAME,
EmailMessage,
EmailMultiAlternatives,
mail_admins,
mail_managers,
send_mail,
send_mass_mail,
)
from django.core.mail.backends import console, dummy, filebased, locmem, smtp
from django.core.mail.message import BadHeaderError, sanitize_address
from django.test import SimpleTestCase, override_settings
from django.test.utils import requires_tz_support
from django.utils.translation import gettext_lazy
from django.utils.version import PY311
try:
from aiosmtpd.controller import Controller
HAS_AIOSMTPD = True
except ImportError:
HAS_AIOSMTPD = False
class HeadersCheckMixin:
def assertMessageHasHeaders(self, message, headers):
"""
Asserts that the `message` has all `headers`.
message: can be an instance of an email.Message subclass or a string
with the contents of an email message.
headers: should be a set of (header-name, header-value) tuples.
"""
if isinstance(message, bytes):
message = message_from_bytes(message)
msg_headers = set(message.items())
self.assertTrue(
headers.issubset(msg_headers),
msg="Message is missing "
"the following headers: %s" % (headers - msg_headers),
)
class MailTests(HeadersCheckMixin, SimpleTestCase):
"""
Non-backend specific tests.
"""
def get_decoded_attachments(self, django_message):
"""
Encode the specified django.core.mail.message.EmailMessage, then decode
it using Python's email.parser module and, for each attachment of the
message, return a list of tuples with (filename, content, mimetype).
"""
msg_bytes = django_message.message().as_bytes()
email_message = message_from_bytes(msg_bytes)
def iter_attachments():
for i in email_message.walk():
if i.get_content_disposition() == "attachment":
filename = i.get_filename()
content = i.get_payload(decode=True)
mimetype = i.get_content_type()
yield filename, content, mimetype
return list(iter_attachments())
def test_ascii(self):
email = EmailMessage(
"Subject", "Content", "[email protected]", ["[email protected]"]
)
message = email.message()
self.assertEqual(message["Subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["From"], "[email protected]")
self.assertEqual(message["To"], "[email protected]")
def test_multiple_recipients(self):
email = EmailMessage(
"Subject",
"Content",
"[email protected]",
["[email protected]", "[email protected]"],
)
message = email.message()
self.assertEqual(message["Subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["From"], "[email protected]")
self.assertEqual(message["To"], "[email protected], [email protected]")
def test_header_omitted_for_no_to_recipients(self):
message = EmailMessage(
"Subject", "Content", "[email protected]", cc=["[email protected]"]
).message()
self.assertNotIn("To", message)
def test_recipients_with_empty_strings(self):
"""
Empty strings in various recipient arguments are always stripped
off the final recipient list.
"""
email = EmailMessage(
"Subject",
"Content",
"[email protected]",
["[email protected]", ""],
cc=["[email protected]", ""],
bcc=["", "[email protected]"],
reply_to=["", None],
)
self.assertEqual(
email.recipients(), ["[email protected]", "[email protected]", "[email protected]"]
)
def test_cc(self):
"""Regression test for #7722"""
email = EmailMessage(
"Subject",
"Content",
"[email protected]",
["[email protected]"],
cc=["[email protected]"],
)
message = email.message()
self.assertEqual(message["Cc"], "[email protected]")
self.assertEqual(email.recipients(), ["[email protected]", "[email protected]"])
# Test multiple CC with multiple To
email = EmailMessage(
"Subject",
"Content",
"[email protected]",
["[email protected]", "[email protected]"],
cc=["[email protected]", "[email protected]"],
)
message = email.message()
self.assertEqual(message["Cc"], "[email protected], [email protected]")
self.assertEqual(
email.recipients(),
[
"[email protected]",
"[email protected]",
"[email protected]",
"[email protected]",
],
)
# Testing with Bcc
email = EmailMessage(
"Subject",
"Content",
"[email protected]",
["[email protected]", "[email protected]"],
cc=["[email protected]", "[email protected]"],
bcc=["[email protected]"],
)
message = email.message()
self.assertEqual(message["Cc"], "[email protected], [email protected]")
self.assertEqual(
email.recipients(),
[
"[email protected]",
"[email protected]",
"[email protected]",
"[email protected]",
"[email protected]",
],
)
def test_cc_headers(self):
message = EmailMessage(
"Subject",
"Content",
"[email protected]",
["[email protected]"],
cc=["[email protected]"],
headers={"Cc": "[email protected]"},
).message()
self.assertEqual(message["Cc"], "[email protected]")
def test_cc_in_headers_only(self):
message = EmailMessage(
"Subject",
"Content",
"[email protected]",
["[email protected]"],
headers={"Cc": "[email protected]"},
).message()
self.assertEqual(message["Cc"], "[email protected]")
def test_reply_to(self):
email = EmailMessage(
"Subject",
"Content",
"[email protected]",
["[email protected]"],
reply_to=["[email protected]"],
)
message = email.message()
self.assertEqual(message["Reply-To"], "[email protected]")
email = EmailMessage(
"Subject",
"Content",
"[email protected]",
["[email protected]"],
reply_to=["[email protected]", "[email protected]"],
)
message = email.message()
self.assertEqual(
message["Reply-To"], "[email protected], [email protected]"
)
def test_recipients_as_tuple(self):
email = EmailMessage(
"Subject",
"Content",
"[email protected]",
("[email protected]", "[email protected]"),
cc=("[email protected]", "[email protected]"),
bcc=("[email protected]",),
)
message = email.message()
self.assertEqual(message["Cc"], "[email protected], [email protected]")
self.assertEqual(
email.recipients(),
[
"[email protected]",
"[email protected]",
"[email protected]",
"[email protected]",
"[email protected]",
],
)
def test_recipients_as_string(self):
with self.assertRaisesMessage(
TypeError, '"to" argument must be a list or tuple'
):
EmailMessage(to="[email protected]")
with self.assertRaisesMessage(
TypeError, '"cc" argument must be a list or tuple'
):
EmailMessage(cc="[email protected]")
with self.assertRaisesMessage(
TypeError, '"bcc" argument must be a list or tuple'
):
EmailMessage(bcc="[email protected]")
with self.assertRaisesMessage(
TypeError, '"reply_to" argument must be a list or tuple'
):
EmailMessage(reply_to="[email protected]")
def test_header_injection(self):
msg = "Header values can't contain newlines "
email = EmailMessage(
"Subject\nInjection Test", "Content", "[email protected]", ["[email protected]"]
)
with self.assertRaisesMessage(BadHeaderError, msg):
email.message()
email = EmailMessage(
gettext_lazy("Subject\nInjection Test"),
"Content",
"[email protected]",
["[email protected]"],
)
with self.assertRaisesMessage(BadHeaderError, msg):
email.message()
with self.assertRaisesMessage(BadHeaderError, msg):
EmailMessage(
"Subject",
"Content",
"[email protected]",
["Name\nInjection test <[email protected]>"],
).message()
def test_space_continuation(self):
"""
Test for space continuation character in long (ASCII) subject headers (#7747)
"""
email = EmailMessage(
"Long subject lines that get wrapped should contain a space continuation "
"character to get expected behavior in Outlook and Thunderbird",
"Content",
"[email protected]",
["[email protected]"],
)
message = email.message()
self.assertEqual(
message["Subject"].encode(),
b"Long subject lines that get wrapped should contain a space continuation\n"
b" character to get expected behavior in Outlook and Thunderbird",
)
def test_message_header_overrides(self):
"""
Specifying dates or message-ids in the extra headers overrides the
default values (#9233)
"""
headers = {"date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
email = EmailMessage(
"subject",
"content",
"[email protected]",
["[email protected]"],
headers=headers,
)
self.assertMessageHasHeaders(
email.message(),
{
("Content-Transfer-Encoding", "7bit"),
("Content-Type", 'text/plain; charset="utf-8"'),
("From", "[email protected]"),
("MIME-Version", "1.0"),
("Message-ID", "foo"),
("Subject", "subject"),
("To", "[email protected]"),
("date", "Fri, 09 Nov 2001 01:08:47 -0000"),
},
)
def test_from_header(self):
"""
Make sure we can manually set the From header (#9214)
"""
email = EmailMessage(
"Subject",
"Content",
"[email protected]",
["[email protected]"],
headers={"From": "[email protected]"},
)
message = email.message()
self.assertEqual(message["From"], "[email protected]")
def test_to_header(self):
"""
Make sure we can manually set the To header (#17444)
"""
email = EmailMessage(
"Subject",
"Content",
"[email protected]",
["[email protected]", "[email protected]"],
headers={"To": "[email protected]"},
)
message = email.message()
self.assertEqual(message["To"], "[email protected]")
self.assertEqual(
email.to, ["[email protected]", "[email protected]"]
)
# If we don't set the To header manually, it should default to the `to`
# argument to the constructor.
email = EmailMessage(
"Subject",
"Content",
"[email protected]",
["[email protected]", "[email protected]"],
)
message = email.message()
self.assertEqual(
message["To"], "[email protected], [email protected]"
)
self.assertEqual(
email.to, ["[email protected]", "[email protected]"]
)
def test_to_in_headers_only(self):
message = EmailMessage(
"Subject",
"Content",
"[email protected]",
headers={"To": "[email protected]"},
).message()
self.assertEqual(message["To"], "[email protected]")
def test_reply_to_header(self):
"""
Specifying 'Reply-To' in headers should override reply_to.
"""
email = EmailMessage(
"Subject",
"Content",
"[email protected]",
["[email protected]"],
reply_to=["[email protected]"],
headers={"Reply-To": "[email protected]"},
)
message = email.message()
self.assertEqual(message["Reply-To"], "[email protected]")
def test_reply_to_in_headers_only(self):
message = EmailMessage(
"Subject",
"Content",
"[email protected]",
["[email protected]"],
headers={"Reply-To": "[email protected]"},
).message()
self.assertEqual(message["Reply-To"], "[email protected]")
def test_multiple_message_call(self):
"""
Regression for #13259 - Make sure that headers are not changed when
calling EmailMessage.message()
"""
email = EmailMessage(
"Subject",
"Content",
"[email protected]",
["[email protected]"],
headers={"From": "[email protected]"},
)
message = email.message()
self.assertEqual(message["From"], "[email protected]")
message = email.message()
self.assertEqual(message["From"], "[email protected]")
def test_unicode_address_header(self):
"""
Regression for #11144 - When a to/from/cc header contains Unicode,
make sure the email addresses are parsed correctly (especially with
regards to commas)
"""
email = EmailMessage(
"Subject",
"Content",
"[email protected]",
['"Firstname Sürname" <[email protected]>', "[email protected]"],
)
self.assertEqual(
email.message()["To"],
"=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>, [email protected]",
)
email = EmailMessage(
"Subject",
"Content",
"[email protected]",
['"Sürname, Firstname" <[email protected]>', "[email protected]"],
)
self.assertEqual(
email.message()["To"],
"=?utf-8?q?S=C3=BCrname=2C_Firstname?= <[email protected]>, [email protected]",
)
def test_unicode_headers(self):
email = EmailMessage(
"Gżegżółka",
"Content",
"[email protected]",
["[email protected]"],
headers={
"Sender": '"Firstname Sürname" <[email protected]>',
"Comments": "My Sürname is non-ASCII",
},
)
message = email.message()
self.assertEqual(message["Subject"], "=?utf-8?b?R8W8ZWfFvMOzxYJrYQ==?=")
self.assertEqual(
message["Sender"], "=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>"
)
self.assertEqual(
message["Comments"], "=?utf-8?q?My_S=C3=BCrname_is_non-ASCII?="
)
def test_safe_mime_multipart(self):
"""
Make sure headers can be set with a different encoding than utf-8 in
SafeMIMEMultipart as well
"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
from_email, to = "[email protected]", '"Sürname, Firstname" <[email protected]>'
text_content = "This is an important message."
html_content = "<p>This is an <strong>important</strong> message.</p>"
msg = EmailMultiAlternatives(
"Message from Firstname Sürname",
text_content,
from_email,
[to],
headers=headers,
)
msg.attach_alternative(html_content, "text/html")
msg.encoding = "iso-8859-1"
self.assertEqual(
msg.message()["To"],
"=?iso-8859-1?q?S=FCrname=2C_Firstname?= <[email protected]>",
)
self.assertEqual(
msg.message()["Subject"],
"=?iso-8859-1?q?Message_from_Firstname_S=FCrname?=",
)
def test_safe_mime_multipart_with_attachments(self):
"""
EmailMultiAlternatives includes alternatives if the body is empty and
it has attachments.
"""
msg = EmailMultiAlternatives(body="")
html_content = "<p>This is <strong>html</strong></p>"
msg.attach_alternative(html_content, "text/html")
msg.attach("example.txt", "Text file content", "text/plain")
self.assertIn(html_content, msg.message().as_string())
def test_none_body(self):
msg = EmailMessage("subject", None, "[email protected]", ["[email protected]"])
self.assertEqual(msg.body, "")
self.assertEqual(msg.message().get_payload(), "")
@mock.patch("socket.getfqdn", return_value="漢字")
def test_non_ascii_dns_non_unicode_email(self, mocked_getfqdn):
delattr(DNS_NAME, "_fqdn")
email = EmailMessage(
"subject", "content", "[email protected]", ["[email protected]"]
)
email.encoding = "iso-8859-1"
self.assertIn("@xn--p8s937b>", email.message()["Message-ID"])
def test_encoding(self):
"""
Regression for #12791 - Encode body correctly with other encodings
than utf-8
"""
email = EmailMessage(
"Subject",
"Firstname Sürname is a great guy.",
"[email protected]",
["[email protected]"],
)
email.encoding = "iso-8859-1"
message = email.message()
self.assertMessageHasHeaders(
message,
{
("MIME-Version", "1.0"),
("Content-Type", 'text/plain; charset="iso-8859-1"'),
("Content-Transfer-Encoding", "quoted-printable"),
("Subject", "Subject"),
("From", "[email protected]"),
("To", "[email protected]"),
},
)
self.assertEqual(message.get_payload(), "Firstname S=FCrname is a great guy.")
# MIME attachments works correctly with other encodings than utf-8.
text_content = "Firstname Sürname is a great guy."
html_content = "<p>Firstname Sürname is a <strong>great</strong> guy.</p>"
msg = EmailMultiAlternatives(
"Subject", text_content, "[email protected]", ["[email protected]"]
)
msg.encoding = "iso-8859-1"
msg.attach_alternative(html_content, "text/html")
payload0 = msg.message().get_payload(0)
self.assertMessageHasHeaders(
payload0,
{
("MIME-Version", "1.0"),
("Content-Type", 'text/plain; charset="iso-8859-1"'),
("Content-Transfer-Encoding", "quoted-printable"),
},
)
self.assertTrue(
payload0.as_bytes().endswith(b"\n\nFirstname S=FCrname is a great guy.")
)
payload1 = msg.message().get_payload(1)
self.assertMessageHasHeaders(
payload1,
{
("MIME-Version", "1.0"),
("Content-Type", 'text/html; charset="iso-8859-1"'),
("Content-Transfer-Encoding", "quoted-printable"),
},
)
self.assertTrue(
payload1.as_bytes().endswith(
b"\n\n<p>Firstname S=FCrname is a <strong>great</strong> guy.</p>"
)
)
def test_attachments(self):
"""Regression test for #9367"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = "hello", "[email protected]", "[email protected]"
text_content = "This is an important message."
html_content = "<p>This is an <strong>important</strong> message.</p>"
msg = EmailMultiAlternatives(
subject, text_content, from_email, [to], headers=headers
)
msg.attach_alternative(html_content, "text/html")
msg.attach("an attachment.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_bytes = msg.message().as_bytes()
message = message_from_bytes(msg_bytes)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_content_type(), "multipart/mixed")
self.assertEqual(message.get_default_type(), "text/plain")
payload = message.get_payload()
self.assertEqual(payload[0].get_content_type(), "multipart/alternative")
self.assertEqual(payload[1].get_content_type(), "application/pdf")
def test_attachments_two_tuple(self):
msg = EmailMessage(attachments=[("filename1", "content1")])
filename, content, mimetype = self.get_decoded_attachments(msg)[0]
self.assertEqual(filename, "filename1")
self.assertEqual(content, b"content1")
self.assertEqual(mimetype, "application/octet-stream")
def test_attachments_MIMEText(self):
txt = MIMEText("content1")
msg = EmailMessage(attachments=[txt])
payload = msg.message().get_payload()
self.assertEqual(payload[0], txt)
def test_non_ascii_attachment_filename(self):
"""Regression test for #14964"""
headers = {"Date": "Fri, 09 Nov 2001 01:08:47 -0000", "Message-ID": "foo"}
subject, from_email, to = "hello", "[email protected]", "[email protected]"
content = "This is the message."
msg = EmailMessage(subject, content, from_email, [to], headers=headers)
# Unicode in file name
msg.attach("une pièce jointe.pdf", b"%PDF-1.4.%...", mimetype="application/pdf")
msg_bytes = msg.message().as_bytes()
message = message_from_bytes(msg_bytes)
payload = message.get_payload()
self.assertEqual(payload[1].get_filename(), "une pièce jointe.pdf")
def test_attach_file(self):
"""
Test attaching a file against different mimetypes and make sure that
a file will be attached and sent properly even if an invalid mimetype
is specified.
"""
files = (
# filename, actual mimetype
("file.txt", "text/plain"),
("file.png", "image/png"),
("file_txt", None),
("file_png", None),
("file_txt.png", "image/png"),
("file_png.txt", "text/plain"),
("file.eml", "message/rfc822"),
)
test_mimetypes = ["text/plain", "image/png", None]
for basename, real_mimetype in files:
for mimetype in test_mimetypes:
email = EmailMessage(
"subject", "body", "[email protected]", ["[email protected]"]
)
self.assertEqual(mimetypes.guess_type(basename)[0], real_mimetype)
self.assertEqual(email.attachments, [])
file_path = os.path.join(
os.path.dirname(__file__), "attachments", basename
)
email.attach_file(file_path, mimetype=mimetype)
self.assertEqual(len(email.attachments), 1)
self.assertIn(basename, email.attachments[0])
msgs_sent_num = email.send()
self.assertEqual(msgs_sent_num, 1)
def test_attach_text_as_bytes(self):
msg = EmailMessage("subject", "body", "[email protected]", ["[email protected]"])
msg.attach("file.txt", b"file content")
sent_num = msg.send()
self.assertEqual(sent_num, 1)
filename, content, mimetype = self.get_decoded_attachments(msg)[0]
self.assertEqual(filename, "file.txt")
self.assertEqual(content, b"file content")
self.assertEqual(mimetype, "text/plain")
def test_attach_utf8_text_as_bytes(self):
"""
Non-ASCII characters encoded as valid UTF-8 are correctly transported
and decoded.
"""
msg = EmailMessage("subject", "body", "[email protected]", ["[email protected]"])
msg.attach("file.txt", b"\xc3\xa4") # UTF-8 encoded a umlaut.
filename, content, mimetype = self.get_decoded_attachments(msg)[0]
self.assertEqual(filename, "file.txt")
self.assertEqual(content, b"\xc3\xa4")
self.assertEqual(mimetype, "text/plain")
def test_attach_non_utf8_text_as_bytes(self):
"""
Binary data that can't be decoded as UTF-8 overrides the MIME type
instead of decoding the data.
"""
msg = EmailMessage("subject", "body", "[email protected]", ["[email protected]"])
msg.attach("file.txt", b"\xff") # Invalid UTF-8.
filename, content, mimetype = self.get_decoded_attachments(msg)[0]
self.assertEqual(filename, "file.txt")
# Content should be passed through unmodified.
self.assertEqual(content, b"\xff")
self.assertEqual(mimetype, "application/octet-stream")
def test_attach_mimetext_content_mimetype(self):
email_msg = EmailMessage()
txt = MIMEText("content")
msg = (
"content and mimetype must not be given when a MIMEBase instance "
"is provided."
)
with self.assertRaisesMessage(ValueError, msg):
email_msg.attach(txt, content="content")
with self.assertRaisesMessage(ValueError, msg):
email_msg.attach(txt, mimetype="text/plain")
def test_attach_content_none(self):
email_msg = EmailMessage()
msg = "content must be provided."
with self.assertRaisesMessage(ValueError, msg):
email_msg.attach("file.txt", mimetype="application/pdf")
def test_dummy_backend(self):
"""
Make sure that dummy backends returns correct number of sent messages
"""
connection = dummy.EmailBackend()
email = EmailMessage(
"Subject",
"Content",
"[email protected]",
["[email protected]"],
headers={"From": "[email protected]"},
)
self.assertEqual(connection.send_messages([email, email, email]), 3)
def test_arbitrary_keyword(self):
"""
Make sure that get_connection() accepts arbitrary keyword that might be
used with custom backends.
"""
c = mail.get_connection(fail_silently=True, foo="bar")
self.assertTrue(c.fail_silently)
def test_custom_backend(self):
"""Test custom backend defined in this suite."""
conn = mail.get_connection("mail.custombackend.EmailBackend")
self.assertTrue(hasattr(conn, "test_outbox"))
email = EmailMessage(
"Subject",
"Content",
"[email protected]",
["[email protected]"],
headers={"From": "[email protected]"},
)
conn.send_messages([email])
self.assertEqual(len(conn.test_outbox), 1)
def test_backend_arg(self):
"""Test backend argument of mail.get_connection()"""
self.assertIsInstance(
mail.get_connection("django.core.mail.backends.smtp.EmailBackend"),
smtp.EmailBackend,
)
self.assertIsInstance(
mail.get_connection("django.core.mail.backends.locmem.EmailBackend"),
locmem.EmailBackend,
)
self.assertIsInstance(
mail.get_connection("django.core.mail.backends.dummy.EmailBackend"),
dummy.EmailBackend,
)
self.assertIsInstance(
mail.get_connection("django.core.mail.backends.console.EmailBackend"),
console.EmailBackend,
)
with tempfile.TemporaryDirectory() as tmp_dir:
self.assertIsInstance(
mail.get_connection(
"django.core.mail.backends.filebased.EmailBackend",
file_path=tmp_dir,
),
filebased.EmailBackend,
)
if sys.platform == "win32" and not PY311:
msg = (
"_getfullpathname: path should be string, bytes or os.PathLike, not "
"object"
)
else:
msg = "expected str, bytes or os.PathLike object, not object"
with self.assertRaisesMessage(TypeError, msg):
mail.get_connection(
"django.core.mail.backends.filebased.EmailBackend", file_path=object()
)
self.assertIsInstance(mail.get_connection(), locmem.EmailBackend)
@override_settings(
EMAIL_BACKEND="django.core.mail.backends.locmem.EmailBackend",
ADMINS=[("nobody", "[email protected]")],
MANAGERS=[("nobody", "[email protected]")],
)
def test_connection_arg(self):
"""Test connection argument to send_mail(), et. al."""
mail.outbox = []
# Send using non-default connection
connection = mail.get_connection("mail.custombackend.EmailBackend")
send_mail(
"Subject",
"Content",
"[email protected]",
["[email protected]"],
connection=connection,
)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, "Subject")
connection = mail.get_connection("mail.custombackend.EmailBackend")
send_mass_mail(
[
("Subject1", "Content1", "[email protected]", ["[email protected]"]),
("Subject2", "Content2", "[email protected]", ["[email protected]"]),
],
connection=connection,
)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 2)
self.assertEqual(connection.test_outbox[0].subject, "Subject1")
self.assertEqual(connection.test_outbox[1].subject, "Subject2")
connection = mail.get_connection("mail.custombackend.EmailBackend")
mail_admins("Admin message", "Content", connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, "[Django] Admin message")
connection = mail.get_connection("mail.custombackend.EmailBackend")
mail_managers("Manager message", "Content", connection=connection)
self.assertEqual(mail.outbox, [])
self.assertEqual(len(connection.test_outbox), 1)
self.assertEqual(connection.test_outbox[0].subject, "[Django] Manager message")
def test_dont_mangle_from_in_body(self):
# Regression for #13433 - Make sure that EmailMessage doesn't mangle
# 'From ' in message body.
email = EmailMessage(
"Subject",
"From the future",
"[email protected]",
["[email protected]"],
headers={"From": "[email protected]"},
)
self.assertNotIn(b">From the future", email.message().as_bytes())
def test_dont_base64_encode(self):
# Ticket #3472
# Shouldn't use Base64 encoding at all
msg = EmailMessage(
"Subject",
"UTF-8 encoded body",
"[email protected]",
["[email protected]"],
headers={"From": "[email protected]"},
)
self.assertIn(b"Content-Transfer-Encoding: 7bit", msg.message().as_bytes())
# Ticket #11212
# Shouldn't use quoted printable, should detect it can represent
# content with 7 bit data.
msg = EmailMessage(
"Subject",
"Body with only ASCII characters.",
"[email protected]",
["[email protected]"],
headers={"From": "[email protected]"},
)
s = msg.message().as_bytes()
self.assertIn(b"Content-Transfer-Encoding: 7bit", s)
# Shouldn't use quoted printable, should detect it can represent
# content with 8 bit data.
msg = EmailMessage(
"Subject",
"Body with latin characters: àáä.",
"[email protected]",
["[email protected]"],
headers={"From": "[email protected]"},
)
s = msg.message().as_bytes()
self.assertIn(b"Content-Transfer-Encoding: 8bit", s)
s = msg.message().as_string()
self.assertIn("Content-Transfer-Encoding: 8bit", s)
msg = EmailMessage(
"Subject",
"Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.",
"[email protected]",
["[email protected]"],
headers={"From": "[email protected]"},
)
s = msg.message().as_bytes()
self.assertIn(b"Content-Transfer-Encoding: 8bit", s)
s = msg.message().as_string()
self.assertIn("Content-Transfer-Encoding: 8bit", s)
def test_dont_base64_encode_message_rfc822(self):
# Ticket #18967
# Shouldn't use base64 encoding for a child EmailMessage attachment.
# Create a child message first
child_msg = EmailMessage(
"Child Subject",
"Some body of child message",
"[email protected]",
["[email protected]"],
headers={"From": "[email protected]"},
)
child_s = child_msg.message().as_string()
# Now create a parent
parent_msg = EmailMessage(
"Parent Subject",
"Some parent body",
"[email protected]",
["[email protected]"],
headers={"From": "[email protected]"},
)
# Attach to parent as a string
parent_msg.attach(content=child_s, mimetype="message/rfc822")
parent_s = parent_msg.message().as_string()
# The child message header is not base64 encoded
self.assertIn("Child Subject", parent_s)
# Feature test: try attaching email.Message object directly to the mail.
parent_msg = EmailMessage(
"Parent Subject",
"Some parent body",
"[email protected]",
["[email protected]"],
headers={"From": "[email protected]"},
)
parent_msg.attach(content=child_msg.message(), mimetype="message/rfc822")
parent_s = parent_msg.message().as_string()
# The child message header is not base64 encoded
self.assertIn("Child Subject", parent_s)
# Feature test: try attaching Django's EmailMessage object directly to the mail.
parent_msg = EmailMessage(
"Parent Subject",
"Some parent body",
"[email protected]",
["[email protected]"],
headers={"From": "[email protected]"},
)
parent_msg.attach(content=child_msg, mimetype="message/rfc822")
parent_s = parent_msg.message().as_string()
# The child message header is not base64 encoded
self.assertIn("Child Subject", parent_s)
def test_custom_utf8_encoding(self):
"""A UTF-8 charset with a custom body encoding is respected."""
body = "Body with latin characters: àáä."
msg = EmailMessage("Subject", body, "[email protected]", ["[email protected]"])
encoding = charset.Charset("utf-8")
encoding.body_encoding = charset.QP
msg.encoding = encoding
message = msg.message()
self.assertMessageHasHeaders(
message,
{
("MIME-Version", "1.0"),
("Content-Type", 'text/plain; charset="utf-8"'),
("Content-Transfer-Encoding", "quoted-printable"),
},
)
self.assertEqual(message.get_payload(), encoding.body_encode(body))
def test_sanitize_address(self):
"""Email addresses are properly sanitized."""
for email_address, encoding, expected_result in (
# ASCII addresses.
("[email protected]", "ascii", "[email protected]"),
("[email protected]", "utf-8", "[email protected]"),
(("A name", "[email protected]"), "ascii", "A name <[email protected]>"),
(
("A name", "[email protected]"),
"utf-8",
"A name <[email protected]>",
),
("localpartonly", "ascii", "localpartonly"),
# ASCII addresses with display names.
("A name <[email protected]>", "ascii", "A name <[email protected]>"),
("A name <[email protected]>", "utf-8", "A name <[email protected]>"),
('"A name" <[email protected]>', "ascii", "A name <[email protected]>"),
('"A name" <[email protected]>', "utf-8", "A name <[email protected]>"),
# Unicode addresses (supported per RFC-6532).
("tó@example.com", "utf-8", "[email protected]"),
("to@éxample.com", "utf-8", "[email protected]"),
(
("Tó Example", "tó@example.com"),
"utf-8",
"=?utf-8?q?T=C3=B3_Example?= <[email protected]>",
),
# Unicode addresses with display names.
(
"Tó Example <tó@example.com>",
"utf-8",
"=?utf-8?q?T=C3=B3_Example?= <[email protected]>",
),
(
"To Example <to@éxample.com>",
"ascii",
"To Example <[email protected]>",
),
(
"To Example <to@éxample.com>",
"utf-8",
"To Example <[email protected]>",
),
# Addresses with two @ signs.
('"[email protected]"@example.com', "utf-8", r'"[email protected]"@example.com'),
(
'"[email protected]" <[email protected]>',
"utf-8",
'"[email protected]" <[email protected]>',
),
(
("To Example", "[email protected]@example.com"),
"utf-8",
'To Example <"[email protected]"@example.com>',
),
# Addresses with long unicode display names.
(
"Tó Example very long" * 4 + " <[email protected]>",
"utf-8",
"=?utf-8?q?T=C3=B3_Example_very_longT=C3=B3_Example_very_longT"
"=C3=B3_Example_?=\n"
" =?utf-8?q?very_longT=C3=B3_Example_very_long?= "
"<[email protected]>",
),
(
("Tó Example very long" * 4, "[email protected]"),
"utf-8",
"=?utf-8?q?T=C3=B3_Example_very_longT=C3=B3_Example_very_longT"
"=C3=B3_Example_?=\n"
" =?utf-8?q?very_longT=C3=B3_Example_very_long?= "
"<[email protected]>",
),
# Address with long display name and unicode domain.
(
("To Example very long" * 4, "to@exampl€.com"),
"utf-8",
"To Example very longTo Example very longTo Example very longT"
"o Example very\n"
" long <[email protected]>",
),
):
with self.subTest(email_address=email_address, encoding=encoding):
self.assertEqual(
sanitize_address(email_address, encoding), expected_result
)
def test_sanitize_address_invalid(self):
for email_address in (
# Invalid address with two @ signs.
"[email protected]@example.com",
# Invalid address without the quotes.
"[email protected] <[email protected]>",
# Other invalid addresses.
"@",
"to@",
"@example.com",
):
with self.subTest(email_address=email_address):
with self.assertRaises(ValueError):
sanitize_address(email_address, encoding="utf-8")
def test_sanitize_address_header_injection(self):
msg = "Invalid address; address parts cannot contain newlines."
tests = [
"Name\nInjection <[email protected]>",
("Name\nInjection", "[email protected]"),
"Name <to\[email protected]>",
("Name", "to\[email protected]"),
]
for email_address in tests:
with self.subTest(email_address=email_address):
with self.assertRaisesMessage(ValueError, msg):
sanitize_address(email_address, encoding="utf-8")
def test_email_multi_alternatives_content_mimetype_none(self):
email_msg = EmailMultiAlternatives()
msg = "Both content and mimetype must be provided."
with self.assertRaisesMessage(ValueError, msg):
email_msg.attach_alternative(None, "text/html")
with self.assertRaisesMessage(ValueError, msg):
email_msg.attach_alternative("<p>content</p>", None)
@requires_tz_support
class MailTimeZoneTests(SimpleTestCase):
@override_settings(
EMAIL_USE_LOCALTIME=False, USE_TZ=True, TIME_ZONE="Africa/Algiers"
)
def test_date_header_utc(self):
"""
EMAIL_USE_LOCALTIME=False creates a datetime in UTC.
"""
email = EmailMessage(
"Subject", "Body", "[email protected]", ["[email protected]"]
)
self.assertTrue(email.message()["Date"].endswith("-0000"))
@override_settings(
EMAIL_USE_LOCALTIME=True, USE_TZ=True, TIME_ZONE="Africa/Algiers"
)
def test_date_header_localtime(self):
"""
EMAIL_USE_LOCALTIME=True creates a datetime in the local time zone.
"""
email = EmailMessage(
"Subject", "Body", "[email protected]", ["[email protected]"]
)
self.assertTrue(
email.message()["Date"].endswith("+0100")
) # Africa/Algiers is UTC+1
class PythonGlobalState(SimpleTestCase):
"""
Tests for #12422 -- Django smarts (#2472/#11212) with charset of utf-8 text
parts shouldn't pollute global email Python package charset registry when
django.mail.message is imported.
"""
def test_utf8(self):
txt = MIMEText("UTF-8 encoded body", "plain", "utf-8")
self.assertIn("Content-Transfer-Encoding: base64", txt.as_string())
def test_7bit(self):
txt = MIMEText("Body with only ASCII characters.", "plain", "utf-8")
self.assertIn("Content-Transfer-Encoding: base64", txt.as_string())
def test_8bit_latin(self):
txt = MIMEText("Body with latin characters: àáä.", "plain", "utf-8")
self.assertIn("Content-Transfer-Encoding: base64", txt.as_string())
def test_8bit_non_latin(self):
txt = MIMEText(
"Body with non latin characters: А Б В Г Д Е Ж Ѕ З И І К Л М Н О П.",
"plain",
"utf-8",
)
self.assertIn("Content-Transfer-Encoding: base64", txt.as_string())
class BaseEmailBackendTests(HeadersCheckMixin):
email_backend = None
def setUp(self):
self.settings_override = override_settings(EMAIL_BACKEND=self.email_backend)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def assertStartsWith(self, first, second):
if not first.startswith(second):
self.longMessage = True
self.assertEqual(
first[: len(second)],
second,
"First string doesn't start with the second.",
)
def get_mailbox_content(self):
raise NotImplementedError(
"subclasses of BaseEmailBackendTests must provide a get_mailbox_content() "
"method"
)
def flush_mailbox(self):
raise NotImplementedError(
"subclasses of BaseEmailBackendTests may require a flush_mailbox() method"
)
def get_the_message(self):
mailbox = self.get_mailbox_content()
self.assertEqual(
len(mailbox),
1,
"Expected exactly one message, got %d.\n%r"
% (len(mailbox), [m.as_string() for m in mailbox]),
)
return mailbox[0]
def test_send(self):
email = EmailMessage(
"Subject", "Content", "[email protected]", ["[email protected]"]
)
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message["from"], "[email protected]")
self.assertEqual(message.get_all("to"), ["[email protected]"])
def test_send_unicode(self):
email = EmailMessage(
"Chère maman", "Je t'aime très fort", "[email protected]", ["[email protected]"]
)
num_sent = mail.get_connection().send_messages([email])
self.assertEqual(num_sent, 1)
message = self.get_the_message()
self.assertEqual(message["subject"], "=?utf-8?q?Ch=C3=A8re_maman?=")
self.assertEqual(
message.get_payload(decode=True).decode(), "Je t'aime très fort"
)
def test_send_long_lines(self):
"""
Email line length is limited to 998 chars by the RFC 5322 Section
2.1.1.
Message body containing longer lines are converted to Quoted-Printable
to avoid having to insert newlines, which could be hairy to do properly.
"""
# Unencoded body length is < 998 (840) but > 998 when utf-8 encoded.
email = EmailMessage(
"Subject", "В южных морях " * 60, "[email protected]", ["[email protected]"]
)
email.send()
message = self.get_the_message()
self.assertMessageHasHeaders(
message,
{
("MIME-Version", "1.0"),
("Content-Type", 'text/plain; charset="utf-8"'),
("Content-Transfer-Encoding", "quoted-printable"),
},
)
def test_send_many(self):
email1 = EmailMessage(
"Subject", "Content1", "[email protected]", ["[email protected]"]
)
email2 = EmailMessage(
"Subject", "Content2", "[email protected]", ["[email protected]"]
)
# send_messages() may take a list or an iterator.
emails_lists = ([email1, email2], iter((email1, email2)))
for emails_list in emails_lists:
num_sent = mail.get_connection().send_messages(emails_list)
self.assertEqual(num_sent, 2)
messages = self.get_mailbox_content()
self.assertEqual(len(messages), 2)
self.assertEqual(messages[0].get_payload(), "Content1")
self.assertEqual(messages[1].get_payload(), "Content2")
self.flush_mailbox()
def test_send_verbose_name(self):
email = EmailMessage(
"Subject",
"Content",
'"Firstname Sürname" <[email protected]>',
["[email protected]"],
)
email.send()
message = self.get_the_message()
self.assertEqual(message["subject"], "Subject")
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(
message["from"], "=?utf-8?q?Firstname_S=C3=BCrname?= <[email protected]>"
)
def test_plaintext_send_mail(self):
"""
Test send_mail without the html_message
regression test for adding html_message parameter to send_mail()
"""
send_mail("Subject", "Content", "[email protected]", ["[email protected]"])
message = self.get_the_message()
self.assertEqual(message.get("subject"), "Subject")
self.assertEqual(message.get_all("to"), ["[email protected]"])
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_payload(), "Content")
self.assertEqual(message.get_content_type(), "text/plain")
def test_html_send_mail(self):
"""Test html_message argument to send_mail"""
send_mail(
"Subject",
"Content",
"[email protected]",
["[email protected]"],
html_message="HTML Content",
)
message = self.get_the_message()
self.assertEqual(message.get("subject"), "Subject")
self.assertEqual(message.get_all("to"), ["[email protected]"])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), "Content")
self.assertEqual(message.get_payload(0).get_content_type(), "text/plain")
self.assertEqual(message.get_payload(1).get_payload(), "HTML Content")
self.assertEqual(message.get_payload(1).get_content_type(), "text/html")
@override_settings(MANAGERS=[("nobody", "[email protected]")])
def test_html_mail_managers(self):
"""Test html_message argument to mail_managers"""
mail_managers("Subject", "Content", html_message="HTML Content")
message = self.get_the_message()
self.assertEqual(message.get("subject"), "[Django] Subject")
self.assertEqual(message.get_all("to"), ["[email protected]"])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), "Content")
self.assertEqual(message.get_payload(0).get_content_type(), "text/plain")
self.assertEqual(message.get_payload(1).get_payload(), "HTML Content")
self.assertEqual(message.get_payload(1).get_content_type(), "text/html")
@override_settings(ADMINS=[("nobody", "[email protected]")])
def test_html_mail_admins(self):
"""Test html_message argument to mail_admins"""
mail_admins("Subject", "Content", html_message="HTML Content")
message = self.get_the_message()
self.assertEqual(message.get("subject"), "[Django] Subject")
self.assertEqual(message.get_all("to"), ["[email protected]"])
self.assertTrue(message.is_multipart())
self.assertEqual(len(message.get_payload()), 2)
self.assertEqual(message.get_payload(0).get_payload(), "Content")
self.assertEqual(message.get_payload(0).get_content_type(), "text/plain")
self.assertEqual(message.get_payload(1).get_payload(), "HTML Content")
self.assertEqual(message.get_payload(1).get_content_type(), "text/html")
@override_settings(
ADMINS=[("nobody", "[email protected]")],
MANAGERS=[("nobody", "[email protected]")],
)
def test_manager_and_admin_mail_prefix(self):
"""
String prefix + lazy translated subject = bad output
Regression for #13494
"""
mail_managers(gettext_lazy("Subject"), "Content")
message = self.get_the_message()
self.assertEqual(message.get("subject"), "[Django] Subject")
self.flush_mailbox()
mail_admins(gettext_lazy("Subject"), "Content")
message = self.get_the_message()
self.assertEqual(message.get("subject"), "[Django] Subject")
@override_settings(ADMINS=[], MANAGERS=[])
def test_empty_admins(self):
"""
mail_admins/mail_managers doesn't connect to the mail server
if there are no recipients (#9383)
"""
mail_admins("hi", "there")
self.assertEqual(self.get_mailbox_content(), [])
mail_managers("hi", "there")
self.assertEqual(self.get_mailbox_content(), [])
def test_wrong_admins_managers(self):
tests = (
"[email protected]",
("[email protected]",),
["[email protected]", "[email protected]"],
("[email protected]", "[email protected]"),
)
for setting, mail_func in (
("ADMINS", mail_admins),
("MANAGERS", mail_managers),
):
msg = "The %s setting must be a list of 2-tuples." % setting
for value in tests:
with self.subTest(setting=setting, value=value), self.settings(
**{setting: value}
):
with self.assertRaisesMessage(ValueError, msg):
mail_func("subject", "content")
def test_message_cc_header(self):
"""
Regression test for #7722
"""
email = EmailMessage(
"Subject",
"Content",
"[email protected]",
["[email protected]"],
cc=["[email protected]"],
)
mail.get_connection().send_messages([email])
message = self.get_the_message()
self.assertMessageHasHeaders(
message,
{
("MIME-Version", "1.0"),
("Content-Type", 'text/plain; charset="utf-8"'),
("Content-Transfer-Encoding", "7bit"),
("Subject", "Subject"),
("From", "[email protected]"),
("To", "[email protected]"),
("Cc", "[email protected]"),
},
)
self.assertIn("\nDate: ", message.as_string())
def test_idn_send(self):
"""
Regression test for #14301
"""
self.assertTrue(send_mail("Subject", "Content", "from@öäü.com", ["to@öäü.com"]))
message = self.get_the_message()
self.assertEqual(message.get("subject"), "Subject")
self.assertEqual(message.get("from"), "[email protected]")
self.assertEqual(message.get("to"), "[email protected]")
self.flush_mailbox()
m = EmailMessage(
"Subject", "Content", "from@öäü.com", ["to@öäü.com"], cc=["cc@öäü.com"]
)
m.send()
message = self.get_the_message()
self.assertEqual(message.get("subject"), "Subject")
self.assertEqual(message.get("from"), "[email protected]")
self.assertEqual(message.get("to"), "[email protected]")
self.assertEqual(message.get("cc"), "[email protected]")
def test_recipient_without_domain(self):
"""
Regression test for #15042
"""
self.assertTrue(send_mail("Subject", "Content", "tester", ["django"]))
message = self.get_the_message()
self.assertEqual(message.get("subject"), "Subject")
self.assertEqual(message.get("from"), "tester")
self.assertEqual(message.get("to"), "django")
def test_lazy_addresses(self):
"""
Email sending should support lazy email addresses (#24416).
"""
_ = gettext_lazy
self.assertTrue(send_mail("Subject", "Content", _("tester"), [_("django")]))
message = self.get_the_message()
self.assertEqual(message.get("from"), "tester")
self.assertEqual(message.get("to"), "django")
self.flush_mailbox()
m = EmailMessage(
"Subject",
"Content",
_("tester"),
[_("to1"), _("to2")],
cc=[_("cc1"), _("cc2")],
bcc=[_("bcc")],
reply_to=[_("reply")],
)
self.assertEqual(m.recipients(), ["to1", "to2", "cc1", "cc2", "bcc"])
m.send()
message = self.get_the_message()
self.assertEqual(message.get("from"), "tester")
self.assertEqual(message.get("to"), "to1, to2")
self.assertEqual(message.get("cc"), "cc1, cc2")
self.assertEqual(message.get("Reply-To"), "reply")
def test_close_connection(self):
"""
Connection can be closed (even when not explicitly opened)
"""
conn = mail.get_connection(username="", password="")
conn.close()
def test_use_as_contextmanager(self):
"""
The connection can be used as a contextmanager.
"""
opened = [False]
closed = [False]
conn = mail.get_connection(username="", password="")
def open():
opened[0] = True
conn.open = open
def close():
closed[0] = True
conn.close = close
with conn as same_conn:
self.assertTrue(opened[0])
self.assertIs(same_conn, conn)
self.assertFalse(closed[0])
self.assertTrue(closed[0])
class LocmemBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = "django.core.mail.backends.locmem.EmailBackend"
def get_mailbox_content(self):
return [m.message() for m in mail.outbox]
def flush_mailbox(self):
mail.outbox = []
def tearDown(self):
super().tearDown()
mail.outbox = []
def test_locmem_shared_messages(self):
"""
Make sure that the locmen backend populates the outbox.
"""
connection = locmem.EmailBackend()
connection2 = locmem.EmailBackend()
email = EmailMessage(
"Subject",
"Content",
"[email protected]",
["[email protected]"],
headers={"From": "[email protected]"},
)
connection.send_messages([email])
connection2.send_messages([email])
self.assertEqual(len(mail.outbox), 2)
def test_validate_multiline_headers(self):
# Ticket #18861 - Validate emails when using the locmem backend
with self.assertRaises(BadHeaderError):
send_mail(
"Subject\nMultiline", "Content", "[email protected]", ["[email protected]"]
)
class FileBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = "django.core.mail.backends.filebased.EmailBackend"
def setUp(self):
super().setUp()
self.tmp_dir = self.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp_dir)
self._settings_override = override_settings(EMAIL_FILE_PATH=self.tmp_dir)
self._settings_override.enable()
def tearDown(self):
self._settings_override.disable()
super().tearDown()
def mkdtemp(self):
return tempfile.mkdtemp()
def flush_mailbox(self):
for filename in os.listdir(self.tmp_dir):
os.unlink(os.path.join(self.tmp_dir, filename))
def get_mailbox_content(self):
messages = []
for filename in os.listdir(self.tmp_dir):
with open(os.path.join(self.tmp_dir, filename), "rb") as fp:
session = fp.read().split(b"\n" + (b"-" * 79) + b"\n")
messages.extend(message_from_bytes(m) for m in session if m)
return messages
def test_file_sessions(self):
"""Make sure opening a connection creates a new file"""
msg = EmailMessage(
"Subject",
"Content",
"[email protected]",
["[email protected]"],
headers={"From": "[email protected]"},
)
connection = mail.get_connection()
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 1)
with open(os.path.join(self.tmp_dir, os.listdir(self.tmp_dir)[0]), "rb") as fp:
message = message_from_binary_file(fp)
self.assertEqual(message.get_content_type(), "text/plain")
self.assertEqual(message.get("subject"), "Subject")
self.assertEqual(message.get("from"), "[email protected]")
self.assertEqual(message.get("to"), "[email protected]")
connection2 = mail.get_connection()
connection2.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
connection.send_messages([msg])
self.assertEqual(len(os.listdir(self.tmp_dir)), 2)
msg.connection = mail.get_connection()
self.assertTrue(connection.open())
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
msg.send()
self.assertEqual(len(os.listdir(self.tmp_dir)), 3)
connection.close()
class FileBackendPathLibTests(FileBackendTests):
def mkdtemp(self):
tmp_dir = super().mkdtemp()
return Path(tmp_dir)
class ConsoleBackendTests(BaseEmailBackendTests, SimpleTestCase):
email_backend = "django.core.mail.backends.console.EmailBackend"
def setUp(self):
super().setUp()
self.__stdout = sys.stdout
self.stream = sys.stdout = StringIO()
def tearDown(self):
del self.stream
sys.stdout = self.__stdout
del self.__stdout
super().tearDown()
def flush_mailbox(self):
self.stream = sys.stdout = StringIO()
def get_mailbox_content(self):
messages = self.stream.getvalue().split("\n" + ("-" * 79) + "\n")
return [message_from_bytes(m.encode()) for m in messages if m]
def test_console_stream_kwarg(self):
"""
The console backend can be pointed at an arbitrary stream.
"""
s = StringIO()
connection = mail.get_connection(
"django.core.mail.backends.console.EmailBackend", stream=s
)
send_mail(
"Subject",
"Content",
"[email protected]",
["[email protected]"],
connection=connection,
)
message = s.getvalue().split("\n" + ("-" * 79) + "\n")[0].encode()
self.assertMessageHasHeaders(
message,
{
("MIME-Version", "1.0"),
("Content-Type", 'text/plain; charset="utf-8"'),
("Content-Transfer-Encoding", "7bit"),
("Subject", "Subject"),
("From", "[email protected]"),
("To", "[email protected]"),
},
)
self.assertIn(b"\nDate: ", message)
class SMTPHandler:
def __init__(self, *args, **kwargs):
self.mailbox = []
async def handle_DATA(self, server, session, envelope):
data = envelope.content
mail_from = envelope.mail_from
message = message_from_bytes(data.rstrip())
message_addr = parseaddr(message.get("from"))[1]
if mail_from != message_addr:
# According to the spec, mail_from does not necessarily match the
# From header - this is the case where the local part isn't
# encoded, so try to correct that.
lp, domain = mail_from.split("@", 1)
lp = Header(lp, "utf-8").encode()
mail_from = "@".join([lp, domain])
if mail_from != message_addr:
return f"553 '{mail_from}' != '{message_addr}'"
self.mailbox.append(message)
return "250 OK"
def flush_mailbox(self):
self.mailbox[:] = []
@skipUnless(HAS_AIOSMTPD, "No aiosmtpd library detected.")
class SMTPBackendTestsBase(SimpleTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# Find a free port.
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
port = s.getsockname()[1]
cls.smtp_handler = SMTPHandler()
cls.smtp_controller = Controller(
cls.smtp_handler,
hostname="127.0.0.1",
port=port,
)
cls._settings_override = override_settings(
EMAIL_HOST=cls.smtp_controller.hostname,
EMAIL_PORT=cls.smtp_controller.port,
)
cls._settings_override.enable()
cls.addClassCleanup(cls._settings_override.disable)
cls.smtp_controller.start()
cls.addClassCleanup(cls.stop_smtp)
@classmethod
def stop_smtp(cls):
cls.smtp_controller.stop()
@skipUnless(HAS_AIOSMTPD, "No aiosmtpd library detected.")
class SMTPBackendTests(BaseEmailBackendTests, SMTPBackendTestsBase):
email_backend = "django.core.mail.backends.smtp.EmailBackend"
def setUp(self):
super().setUp()
self.smtp_handler.flush_mailbox()
def tearDown(self):
self.smtp_handler.flush_mailbox()
super().tearDown()
def flush_mailbox(self):
self.smtp_handler.flush_mailbox()
def get_mailbox_content(self):
return self.smtp_handler.mailbox
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password",
)
def test_email_authentication_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.username, "not empty username")
self.assertEqual(backend.password, "not empty password")
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password",
)
def test_email_authentication_override_settings(self):
backend = smtp.EmailBackend(username="username", password="password")
self.assertEqual(backend.username, "username")
self.assertEqual(backend.password, "password")
@override_settings(
EMAIL_HOST_USER="not empty username",
EMAIL_HOST_PASSWORD="not empty password",
)
def test_email_disabled_authentication(self):
backend = smtp.EmailBackend(username="", password="")
self.assertEqual(backend.username, "")
self.assertEqual(backend.password, "")
def test_auth_attempted(self):
"""
Opening the backend with non empty username/password tries
to authenticate against the SMTP server.
"""
backend = smtp.EmailBackend(
username="not empty username", password="not empty password"
)
with self.assertRaisesMessage(
SMTPException, "SMTP AUTH extension not supported by server."
):
with backend:
pass
def test_server_open(self):
"""
open() returns whether it opened a connection.
"""
backend = smtp.EmailBackend(username="", password="")
self.assertIsNone(backend.connection)
opened = backend.open()
backend.close()
self.assertIs(opened, True)
def test_reopen_connection(self):
backend = smtp.EmailBackend()
# Simulate an already open connection.
backend.connection = mock.Mock(spec=object())
self.assertIs(backend.open(), False)
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_use_settings(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_tls)
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_override_settings(self):
backend = smtp.EmailBackend(use_tls=False)
self.assertFalse(backend.use_tls)
def test_email_tls_default_disabled(self):
backend = smtp.EmailBackend()
self.assertFalse(backend.use_tls)
def test_ssl_tls_mutually_exclusive(self):
msg = (
"EMAIL_USE_TLS/EMAIL_USE_SSL are mutually exclusive, so only set "
"one of those settings to True."
)
with self.assertRaisesMessage(ValueError, msg):
smtp.EmailBackend(use_ssl=True, use_tls=True)
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_use_settings(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_ssl)
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_override_settings(self):
backend = smtp.EmailBackend(use_ssl=False)
self.assertFalse(backend.use_ssl)
def test_email_ssl_default_disabled(self):
backend = smtp.EmailBackend()
self.assertFalse(backend.use_ssl)
@override_settings(EMAIL_SSL_CERTFILE="foo")
def test_email_ssl_certfile_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.ssl_certfile, "foo")
@override_settings(EMAIL_SSL_CERTFILE="foo")
def test_email_ssl_certfile_override_settings(self):
backend = smtp.EmailBackend(ssl_certfile="bar")
self.assertEqual(backend.ssl_certfile, "bar")
def test_email_ssl_certfile_default_disabled(self):
backend = smtp.EmailBackend()
self.assertIsNone(backend.ssl_certfile)
@override_settings(EMAIL_SSL_KEYFILE="foo")
def test_email_ssl_keyfile_use_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.ssl_keyfile, "foo")
@override_settings(EMAIL_SSL_KEYFILE="foo")
def test_email_ssl_keyfile_override_settings(self):
backend = smtp.EmailBackend(ssl_keyfile="bar")
self.assertEqual(backend.ssl_keyfile, "bar")
def test_email_ssl_keyfile_default_disabled(self):
backend = smtp.EmailBackend()
self.assertIsNone(backend.ssl_keyfile)
@override_settings(EMAIL_USE_TLS=True)
def test_email_tls_attempts_starttls(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_tls)
with self.assertRaisesMessage(
SMTPException, "STARTTLS extension not supported by server."
):
with backend:
pass
@override_settings(EMAIL_USE_SSL=True)
def test_email_ssl_attempts_ssl_connection(self):
backend = smtp.EmailBackend()
self.assertTrue(backend.use_ssl)
with self.assertRaises(SSLError):
with backend:
pass
def test_connection_timeout_default(self):
"""The connection's timeout value is None by default."""
connection = mail.get_connection("django.core.mail.backends.smtp.EmailBackend")
self.assertIsNone(connection.timeout)
def test_connection_timeout_custom(self):
"""The timeout parameter can be customized."""
class MyEmailBackend(smtp.EmailBackend):
def __init__(self, *args, **kwargs):
kwargs.setdefault("timeout", 42)
super().__init__(*args, **kwargs)
myemailbackend = MyEmailBackend()
myemailbackend.open()
self.assertEqual(myemailbackend.timeout, 42)
self.assertEqual(myemailbackend.connection.timeout, 42)
myemailbackend.close()
@override_settings(EMAIL_TIMEOUT=10)
def test_email_timeout_override_settings(self):
backend = smtp.EmailBackend()
self.assertEqual(backend.timeout, 10)
def test_email_msg_uses_crlf(self):
"""#23063 -- RFC-compliant messages are sent over SMTP."""
send = SMTP.send
try:
smtp_messages = []
def mock_send(self, s):
smtp_messages.append(s)
return send(self, s)
SMTP.send = mock_send
email = EmailMessage(
"Subject", "Content", "[email protected]", ["[email protected]"]
)
mail.get_connection().send_messages([email])
# Find the actual message
msg = None
for i, m in enumerate(smtp_messages):
if m[:4] == "data":
msg = smtp_messages[i + 1]
break
self.assertTrue(msg)
msg = msg.decode()
# The message only contains CRLF and not combinations of CRLF, LF, and CR.
msg = msg.replace("\r\n", "")
self.assertNotIn("\r", msg)
self.assertNotIn("\n", msg)
finally:
SMTP.send = send
def test_send_messages_after_open_failed(self):
"""
send_messages() shouldn't try to send messages if open() raises an
exception after initializing the connection.
"""
backend = smtp.EmailBackend()
# Simulate connection initialization success and a subsequent
# connection exception.
backend.connection = mock.Mock(spec=object())
backend.open = lambda: None
email = EmailMessage(
"Subject", "Content", "[email protected]", ["[email protected]"]
)
self.assertEqual(backend.send_messages([email]), 0)
def test_send_messages_empty_list(self):
backend = smtp.EmailBackend()
backend.connection = mock.Mock(spec=object())
self.assertEqual(backend.send_messages([]), 0)
def test_send_messages_zero_sent(self):
"""A message isn't sent if it doesn't have any recipients."""
backend = smtp.EmailBackend()
backend.connection = mock.Mock(spec=object())
email = EmailMessage("Subject", "Content", "[email protected]", to=[])
sent = backend.send_messages([email])
self.assertEqual(sent, 0)
@skipUnless(HAS_AIOSMTPD, "No aiosmtpd library detected.")
class SMTPBackendStoppedServerTests(SMTPBackendTestsBase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.backend = smtp.EmailBackend(username="", password="")
cls.smtp_controller.stop()
@classmethod
def stop_smtp(cls):
# SMTP controller is stopped in setUpClass().
pass
def test_server_stopped(self):
"""
Closing the backend while the SMTP server is stopped doesn't raise an
exception.
"""
self.backend.close()
def test_fail_silently_on_connection_error(self):
"""
A socket connection error is silenced with fail_silently=True.
"""
with self.assertRaises(ConnectionError):
self.backend.open()
self.backend.fail_silently = True
self.backend.open()
|
93eb2ea926cbaa4caf484e78140e3dd01417294ca58e4e02b2c3e6b9d14d551c | import datetime
import decimal
import gettext as gettext_module
import os
import pickle
import re
import tempfile
from contextlib import contextmanager
from importlib import import_module
from pathlib import Path
from unittest import mock
from asgiref.local import Local
from django import forms
from django.apps import AppConfig
from django.conf import settings
from django.conf.locale import LANG_INFO
from django.conf.urls.i18n import i18n_patterns
from django.template import Context, Template
from django.test import (
RequestFactory,
SimpleTestCase,
TestCase,
ignore_warnings,
override_settings,
)
from django.utils import translation
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.formats import (
date_format,
get_format,
iter_format_modules,
localize,
localize_input,
reset_format_cache,
sanitize_separators,
sanitize_strftime_format,
time_format,
)
from django.utils.numberformat import format as nformat
from django.utils.safestring import SafeString, mark_safe
from django.utils.translation import (
activate,
check_for_language,
deactivate,
get_language,
get_language_bidi,
get_language_from_request,
get_language_info,
gettext,
gettext_lazy,
ngettext,
ngettext_lazy,
npgettext,
npgettext_lazy,
pgettext,
round_away_from_one,
to_language,
to_locale,
trans_null,
trans_real,
)
from django.utils.translation.reloader import (
translation_file_changed,
watch_for_translation_changes,
)
from .forms import CompanyForm, I18nForm, SelectDateForm
from .models import Company, TestModel
here = os.path.dirname(os.path.abspath(__file__))
extended_locale_paths = settings.LOCALE_PATHS + [
os.path.join(here, "other", "locale"),
]
class AppModuleStub:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
@contextmanager
def patch_formats(lang, **settings):
from django.utils.formats import _format_cache
# Populate _format_cache with temporary values
for key, value in settings.items():
_format_cache[(key, lang)] = value
try:
yield
finally:
reset_format_cache()
class TranslationTests(SimpleTestCase):
@translation.override("fr")
def test_plural(self):
"""
Test plurals with ngettext. French differs from English in that 0 is singular.
"""
self.assertEqual(
ngettext("%(num)d year", "%(num)d years", 0) % {"num": 0},
"0 année",
)
self.assertEqual(
ngettext("%(num)d year", "%(num)d years", 2) % {"num": 2},
"2 années",
)
self.assertEqual(
ngettext("%(size)d byte", "%(size)d bytes", 0) % {"size": 0}, "0 octet"
)
self.assertEqual(
ngettext("%(size)d byte", "%(size)d bytes", 2) % {"size": 2}, "2 octets"
)
def test_plural_null(self):
g = trans_null.ngettext
self.assertEqual(g("%(num)d year", "%(num)d years", 0) % {"num": 0}, "0 years")
self.assertEqual(g("%(num)d year", "%(num)d years", 1) % {"num": 1}, "1 year")
self.assertEqual(g("%(num)d year", "%(num)d years", 2) % {"num": 2}, "2 years")
@override_settings(LOCALE_PATHS=extended_locale_paths)
@translation.override("fr")
def test_multiple_plurals_per_language(self):
"""
Normally, French has 2 plurals. As other/locale/fr/LC_MESSAGES/django.po
has a different plural equation with 3 plurals, this tests if those
plural are honored.
"""
self.assertEqual(ngettext("%d singular", "%d plural", 0) % 0, "0 pluriel1")
self.assertEqual(ngettext("%d singular", "%d plural", 1) % 1, "1 singulier")
self.assertEqual(ngettext("%d singular", "%d plural", 2) % 2, "2 pluriel2")
french = trans_real.catalog()
# Internal _catalog can query subcatalogs (from different po files).
self.assertEqual(french._catalog[("%d singular", 0)], "%d singulier")
self.assertEqual(french._catalog[("%(num)d hour", 0)], "%(num)d heure")
def test_override(self):
activate("de")
try:
with translation.override("pl"):
self.assertEqual(get_language(), "pl")
self.assertEqual(get_language(), "de")
with translation.override(None):
self.assertIsNone(get_language())
with translation.override("pl"):
pass
self.assertIsNone(get_language())
self.assertEqual(get_language(), "de")
finally:
deactivate()
def test_override_decorator(self):
@translation.override("pl")
def func_pl():
self.assertEqual(get_language(), "pl")
@translation.override(None)
def func_none():
self.assertIsNone(get_language())
try:
activate("de")
func_pl()
self.assertEqual(get_language(), "de")
func_none()
self.assertEqual(get_language(), "de")
finally:
deactivate()
def test_override_exit(self):
"""
The language restored is the one used when the function was
called, not the one used when the decorator was initialized (#23381).
"""
activate("fr")
@translation.override("pl")
def func_pl():
pass
deactivate()
try:
activate("en")
func_pl()
self.assertEqual(get_language(), "en")
finally:
deactivate()
def test_lazy_objects(self):
"""
Format string interpolation should work with *_lazy objects.
"""
s = gettext_lazy("Add %(name)s")
d = {"name": "Ringo"}
self.assertEqual("Add Ringo", s % d)
with translation.override("de", deactivate=True):
self.assertEqual("Ringo hinzuf\xfcgen", s % d)
with translation.override("pl"):
self.assertEqual("Dodaj Ringo", s % d)
# It should be possible to compare *_lazy objects.
s1 = gettext_lazy("Add %(name)s")
self.assertEqual(s, s1)
s2 = gettext_lazy("Add %(name)s")
s3 = gettext_lazy("Add %(name)s")
self.assertEqual(s2, s3)
self.assertEqual(s, s2)
s4 = gettext_lazy("Some other string")
self.assertNotEqual(s, s4)
def test_lazy_pickle(self):
s1 = gettext_lazy("test")
self.assertEqual(str(s1), "test")
s2 = pickle.loads(pickle.dumps(s1))
self.assertEqual(str(s2), "test")
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_ngettext_lazy(self):
simple_with_format = ngettext_lazy("%d good result", "%d good results")
simple_context_with_format = npgettext_lazy(
"Exclamation", "%d good result", "%d good results"
)
simple_without_format = ngettext_lazy("good result", "good results")
with translation.override("de"):
self.assertEqual(simple_with_format % 1, "1 gutes Resultat")
self.assertEqual(simple_with_format % 4, "4 guten Resultate")
self.assertEqual(simple_context_with_format % 1, "1 gutes Resultat!")
self.assertEqual(simple_context_with_format % 4, "4 guten Resultate!")
self.assertEqual(simple_without_format % 1, "gutes Resultat")
self.assertEqual(simple_without_format % 4, "guten Resultate")
complex_nonlazy = ngettext_lazy(
"Hi %(name)s, %(num)d good result", "Hi %(name)s, %(num)d good results", 4
)
complex_deferred = ngettext_lazy(
"Hi %(name)s, %(num)d good result",
"Hi %(name)s, %(num)d good results",
"num",
)
complex_context_nonlazy = npgettext_lazy(
"Greeting",
"Hi %(name)s, %(num)d good result",
"Hi %(name)s, %(num)d good results",
4,
)
complex_context_deferred = npgettext_lazy(
"Greeting",
"Hi %(name)s, %(num)d good result",
"Hi %(name)s, %(num)d good results",
"num",
)
with translation.override("de"):
self.assertEqual(
complex_nonlazy % {"num": 4, "name": "Jim"},
"Hallo Jim, 4 guten Resultate",
)
self.assertEqual(
complex_deferred % {"name": "Jim", "num": 1},
"Hallo Jim, 1 gutes Resultat",
)
self.assertEqual(
complex_deferred % {"name": "Jim", "num": 5},
"Hallo Jim, 5 guten Resultate",
)
with self.assertRaisesMessage(KeyError, "Your dictionary lacks key"):
complex_deferred % {"name": "Jim"}
self.assertEqual(
complex_context_nonlazy % {"num": 4, "name": "Jim"},
"Willkommen Jim, 4 guten Resultate",
)
self.assertEqual(
complex_context_deferred % {"name": "Jim", "num": 1},
"Willkommen Jim, 1 gutes Resultat",
)
self.assertEqual(
complex_context_deferred % {"name": "Jim", "num": 5},
"Willkommen Jim, 5 guten Resultate",
)
with self.assertRaisesMessage(KeyError, "Your dictionary lacks key"):
complex_context_deferred % {"name": "Jim"}
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_ngettext_lazy_format_style(self):
simple_with_format = ngettext_lazy("{} good result", "{} good results")
simple_context_with_format = npgettext_lazy(
"Exclamation", "{} good result", "{} good results"
)
with translation.override("de"):
self.assertEqual(simple_with_format.format(1), "1 gutes Resultat")
self.assertEqual(simple_with_format.format(4), "4 guten Resultate")
self.assertEqual(simple_context_with_format.format(1), "1 gutes Resultat!")
self.assertEqual(simple_context_with_format.format(4), "4 guten Resultate!")
complex_nonlazy = ngettext_lazy(
"Hi {name}, {num} good result", "Hi {name}, {num} good results", 4
)
complex_deferred = ngettext_lazy(
"Hi {name}, {num} good result", "Hi {name}, {num} good results", "num"
)
complex_context_nonlazy = npgettext_lazy(
"Greeting",
"Hi {name}, {num} good result",
"Hi {name}, {num} good results",
4,
)
complex_context_deferred = npgettext_lazy(
"Greeting",
"Hi {name}, {num} good result",
"Hi {name}, {num} good results",
"num",
)
with translation.override("de"):
self.assertEqual(
complex_nonlazy.format(num=4, name="Jim"),
"Hallo Jim, 4 guten Resultate",
)
self.assertEqual(
complex_deferred.format(name="Jim", num=1),
"Hallo Jim, 1 gutes Resultat",
)
self.assertEqual(
complex_deferred.format(name="Jim", num=5),
"Hallo Jim, 5 guten Resultate",
)
with self.assertRaisesMessage(KeyError, "Your dictionary lacks key"):
complex_deferred.format(name="Jim")
self.assertEqual(
complex_context_nonlazy.format(num=4, name="Jim"),
"Willkommen Jim, 4 guten Resultate",
)
self.assertEqual(
complex_context_deferred.format(name="Jim", num=1),
"Willkommen Jim, 1 gutes Resultat",
)
self.assertEqual(
complex_context_deferred.format(name="Jim", num=5),
"Willkommen Jim, 5 guten Resultate",
)
with self.assertRaisesMessage(KeyError, "Your dictionary lacks key"):
complex_context_deferred.format(name="Jim")
def test_ngettext_lazy_bool(self):
self.assertTrue(ngettext_lazy("%d good result", "%d good results"))
self.assertFalse(ngettext_lazy("", ""))
def test_ngettext_lazy_pickle(self):
s1 = ngettext_lazy("%d good result", "%d good results")
self.assertEqual(s1 % 1, "1 good result")
self.assertEqual(s1 % 8, "8 good results")
s2 = pickle.loads(pickle.dumps(s1))
self.assertEqual(s2 % 1, "1 good result")
self.assertEqual(s2 % 8, "8 good results")
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_pgettext(self):
trans_real._active = Local()
trans_real._translations = {}
with translation.override("de"):
self.assertEqual(pgettext("unexisting", "May"), "May")
self.assertEqual(pgettext("month name", "May"), "Mai")
self.assertEqual(pgettext("verb", "May"), "Kann")
self.assertEqual(
npgettext("search", "%d result", "%d results", 4) % 4, "4 Resultate"
)
def test_empty_value(self):
"""Empty value must stay empty after being translated (#23196)."""
with translation.override("de"):
self.assertEqual("", gettext(""))
s = mark_safe("")
self.assertEqual(s, gettext(s))
@override_settings(LOCALE_PATHS=extended_locale_paths)
def test_safe_status(self):
"""
Translating a string requiring no auto-escaping with gettext or pgettext
shouldn't change the "safe" status.
"""
trans_real._active = Local()
trans_real._translations = {}
s1 = mark_safe("Password")
s2 = mark_safe("May")
with translation.override("de", deactivate=True):
self.assertIs(type(gettext(s1)), SafeString)
self.assertIs(type(pgettext("month name", s2)), SafeString)
self.assertEqual("aPassword", SafeString("a") + s1)
self.assertEqual("Passworda", s1 + SafeString("a"))
self.assertEqual("Passworda", s1 + mark_safe("a"))
self.assertEqual("aPassword", mark_safe("a") + s1)
self.assertEqual("as", mark_safe("a") + mark_safe("s"))
def test_maclines(self):
"""
Translations on files with Mac or DOS end of lines will be converted
to unix EOF in .po catalogs.
"""
ca_translation = trans_real.translation("ca")
ca_translation._catalog["Mac\nEOF\n"] = "Catalan Mac\nEOF\n"
ca_translation._catalog["Win\nEOF\n"] = "Catalan Win\nEOF\n"
with translation.override("ca", deactivate=True):
self.assertEqual("Catalan Mac\nEOF\n", gettext("Mac\rEOF\r"))
self.assertEqual("Catalan Win\nEOF\n", gettext("Win\r\nEOF\r\n"))
def test_to_locale(self):
tests = (
("en", "en"),
("EN", "en"),
("en-us", "en_US"),
("EN-US", "en_US"),
("en_US", "en_US"),
# With > 2 characters after the dash.
("sr-latn", "sr_Latn"),
("sr-LATN", "sr_Latn"),
("sr_Latn", "sr_Latn"),
# 3-char language codes.
("ber-MA", "ber_MA"),
("BER-MA", "ber_MA"),
("BER_MA", "ber_MA"),
("ber_MA", "ber_MA"),
# With private use subtag (x-informal).
("nl-nl-x-informal", "nl_NL-x-informal"),
("NL-NL-X-INFORMAL", "nl_NL-x-informal"),
("sr-latn-x-informal", "sr_Latn-x-informal"),
("SR-LATN-X-INFORMAL", "sr_Latn-x-informal"),
)
for lang, locale in tests:
with self.subTest(lang=lang):
self.assertEqual(to_locale(lang), locale)
def test_to_language(self):
self.assertEqual(to_language("en_US"), "en-us")
self.assertEqual(to_language("sr_Lat"), "sr-lat")
def test_language_bidi(self):
self.assertIs(get_language_bidi(), False)
with translation.override(None):
self.assertIs(get_language_bidi(), False)
def test_language_bidi_null(self):
self.assertIs(trans_null.get_language_bidi(), False)
with override_settings(LANGUAGE_CODE="he"):
self.assertIs(get_language_bidi(), True)
class TranslationLoadingTests(SimpleTestCase):
def setUp(self):
"""Clear translation state."""
self._old_language = get_language()
self._old_translations = trans_real._translations
deactivate()
trans_real._translations = {}
def tearDown(self):
trans_real._translations = self._old_translations
activate(self._old_language)
@override_settings(
USE_I18N=True,
LANGUAGE_CODE="en",
LANGUAGES=[
("en", "English"),
("en-ca", "English (Canada)"),
("en-nz", "English (New Zealand)"),
("en-au", "English (Australia)"),
],
LOCALE_PATHS=[os.path.join(here, "loading")],
INSTALLED_APPS=["i18n.loading_app"],
)
def test_translation_loading(self):
"""
"loading_app" does not have translations for all languages provided by
"loading". Catalogs are merged correctly.
"""
tests = [
("en", "local country person"),
("en_AU", "aussie"),
("en_NZ", "kiwi"),
("en_CA", "canuck"),
]
# Load all relevant translations.
for language, _ in tests:
activate(language)
# Catalogs are merged correctly.
for language, nickname in tests:
with self.subTest(language=language):
activate(language)
self.assertEqual(gettext("local country person"), nickname)
class TranslationThreadSafetyTests(SimpleTestCase):
def setUp(self):
self._old_language = get_language()
self._translations = trans_real._translations
# here we rely on .split() being called inside the _fetch()
# in trans_real.translation()
class sideeffect_str(str):
def split(self, *args, **kwargs):
res = str.split(self, *args, **kwargs)
trans_real._translations["en-YY"] = None
return res
trans_real._translations = {sideeffect_str("en-XX"): None}
def tearDown(self):
trans_real._translations = self._translations
activate(self._old_language)
def test_bug14894_translation_activate_thread_safety(self):
translation_count = len(trans_real._translations)
# May raise RuntimeError if translation.activate() isn't thread-safe.
translation.activate("pl")
# make sure sideeffect_str actually added a new translation
self.assertLess(translation_count, len(trans_real._translations))
class FormattingTests(SimpleTestCase):
def setUp(self):
super().setUp()
self.n = decimal.Decimal("66666.666")
self.f = 99999.999
self.d = datetime.date(2009, 12, 31)
self.dt = datetime.datetime(2009, 12, 31, 20, 50)
self.t = datetime.time(10, 15, 48)
self.long = 10000
self.ctxt = Context(
{
"n": self.n,
"t": self.t,
"d": self.d,
"dt": self.dt,
"f": self.f,
"l": self.long,
}
)
def test_all_format_strings(self):
all_locales = LANG_INFO.keys()
some_date = datetime.date(2017, 10, 14)
some_datetime = datetime.datetime(2017, 10, 14, 10, 23)
for locale in all_locales:
with self.subTest(locale=locale), translation.override(locale):
self.assertIn(
"2017", date_format(some_date)
) # Uses DATE_FORMAT by default
self.assertIn(
"23", time_format(some_datetime)
) # Uses TIME_FORMAT by default
self.assertIn(
"2017",
date_format(some_datetime, format=get_format("DATETIME_FORMAT")),
)
self.assertIn(
"2017",
date_format(some_date, format=get_format("YEAR_MONTH_FORMAT")),
)
self.assertIn(
"14", date_format(some_date, format=get_format("MONTH_DAY_FORMAT"))
)
self.assertIn(
"2017",
date_format(some_date, format=get_format("SHORT_DATE_FORMAT")),
)
self.assertIn(
"2017",
date_format(
some_datetime, format=get_format("SHORT_DATETIME_FORMAT")
),
)
def test_locale_independent(self):
"""
Localization of numbers
"""
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual(
"66666.66",
nformat(
self.n, decimal_sep=".", decimal_pos=2, grouping=3, thousand_sep=","
),
)
self.assertEqual(
"66666A6",
nformat(
self.n, decimal_sep="A", decimal_pos=1, grouping=1, thousand_sep="B"
),
)
self.assertEqual(
"66666",
nformat(
self.n, decimal_sep="X", decimal_pos=0, grouping=1, thousand_sep="Y"
),
)
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual(
"66,666.66",
nformat(
self.n, decimal_sep=".", decimal_pos=2, grouping=3, thousand_sep=","
),
)
self.assertEqual(
"6B6B6B6B6A6",
nformat(
self.n, decimal_sep="A", decimal_pos=1, grouping=1, thousand_sep="B"
),
)
self.assertEqual(
"-66666.6", nformat(-66666.666, decimal_sep=".", decimal_pos=1)
)
self.assertEqual(
"-66666.0", nformat(int("-66666"), decimal_sep=".", decimal_pos=1)
)
self.assertEqual(
"10000.0", nformat(self.long, decimal_sep=".", decimal_pos=1)
)
self.assertEqual(
"10,00,00,000.00",
nformat(
100000000.00,
decimal_sep=".",
decimal_pos=2,
grouping=(3, 2, 0),
thousand_sep=",",
),
)
self.assertEqual(
"1,0,00,000,0000.00",
nformat(
10000000000.00,
decimal_sep=".",
decimal_pos=2,
grouping=(4, 3, 2, 1, 0),
thousand_sep=",",
),
)
self.assertEqual(
"10000,00,000.00",
nformat(
1000000000.00,
decimal_sep=".",
decimal_pos=2,
grouping=(3, 2, -1),
thousand_sep=",",
),
)
# This unusual grouping/force_grouping combination may be triggered
# by the intcomma filter.
self.assertEqual(
"10000",
nformat(
self.long,
decimal_sep=".",
decimal_pos=0,
grouping=0,
force_grouping=True,
),
)
# date filter
self.assertEqual(
"31.12.2009 в 20:50",
Template('{{ dt|date:"d.m.Y в H:i" }}').render(self.ctxt),
)
self.assertEqual(
"⌚ 10:15", Template('{{ t|time:"⌚ H:i" }}').render(self.ctxt)
)
@ignore_warnings(category=RemovedInDjango50Warning)
@override_settings(USE_L10N=False)
def test_l10n_disabled(self):
"""
Catalan locale with format i18n disabled translations will be used,
but not formats
"""
with translation.override("ca", deactivate=True):
self.maxDiff = 3000
self.assertEqual("N j, Y", get_format("DATE_FORMAT"))
self.assertEqual(0, get_format("FIRST_DAY_OF_WEEK"))
self.assertEqual(".", get_format("DECIMAL_SEPARATOR"))
self.assertEqual("10:15 a.m.", time_format(self.t))
self.assertEqual("Des. 31, 2009", date_format(self.d))
self.assertEqual("desembre 2009", date_format(self.d, "YEAR_MONTH_FORMAT"))
self.assertEqual(
"12/31/2009 8:50 p.m.", date_format(self.dt, "SHORT_DATETIME_FORMAT")
)
self.assertEqual("No localizable", localize("No localizable"))
self.assertEqual("66666.666", localize(self.n))
self.assertEqual("99999.999", localize(self.f))
self.assertEqual("10000", localize(self.long))
self.assertEqual("Des. 31, 2009", localize(self.d))
self.assertEqual("Des. 31, 2009, 8:50 p.m.", localize(self.dt))
self.assertEqual("66666.666", Template("{{ n }}").render(self.ctxt))
self.assertEqual("99999.999", Template("{{ f }}").render(self.ctxt))
self.assertEqual("Des. 31, 2009", Template("{{ d }}").render(self.ctxt))
self.assertEqual(
"Des. 31, 2009, 8:50 p.m.", Template("{{ dt }}").render(self.ctxt)
)
self.assertEqual(
"66666.67", Template('{{ n|floatformat:"2u" }}').render(self.ctxt)
)
self.assertEqual(
"100000.0", Template('{{ f|floatformat:"u" }}').render(self.ctxt)
)
self.assertEqual(
"66666.67",
Template('{{ n|floatformat:"2gu" }}').render(self.ctxt),
)
self.assertEqual(
"100000.0",
Template('{{ f|floatformat:"ug" }}').render(self.ctxt),
)
self.assertEqual(
"10:15 a.m.", Template('{{ t|time:"TIME_FORMAT" }}').render(self.ctxt)
)
self.assertEqual(
"12/31/2009",
Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt),
)
self.assertEqual(
"12/31/2009 8:50 p.m.",
Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt),
)
form = I18nForm(
{
"decimal_field": "66666,666",
"float_field": "99999,999",
"date_field": "31/12/2009",
"datetime_field": "31/12/2009 20:50",
"time_field": "20:50",
"integer_field": "1.234",
}
)
self.assertFalse(form.is_valid())
self.assertEqual(["Introdu\xefu un n\xfamero."], form.errors["float_field"])
self.assertEqual(
["Introdu\xefu un n\xfamero."], form.errors["decimal_field"]
)
self.assertEqual(
["Introdu\xefu una data v\xe0lida."], form.errors["date_field"]
)
self.assertEqual(
["Introdu\xefu una data/hora v\xe0lides."],
form.errors["datetime_field"],
)
self.assertEqual(
["Introdu\xefu un n\xfamero enter."], form.errors["integer_field"]
)
form2 = SelectDateForm(
{
"date_field_month": "12",
"date_field_day": "31",
"date_field_year": "2009",
}
)
self.assertTrue(form2.is_valid())
self.assertEqual(
datetime.date(2009, 12, 31), form2.cleaned_data["date_field"]
)
self.assertHTMLEqual(
'<select name="mydate_month" id="id_mydate_month">'
'<option value="">---</option>'
'<option value="1">gener</option>'
'<option value="2">febrer</option>'
'<option value="3">mar\xe7</option>'
'<option value="4">abril</option>'
'<option value="5">maig</option>'
'<option value="6">juny</option>'
'<option value="7">juliol</option>'
'<option value="8">agost</option>'
'<option value="9">setembre</option>'
'<option value="10">octubre</option>'
'<option value="11">novembre</option>'
'<option value="12" selected>desembre</option>'
"</select>"
'<select name="mydate_day" id="id_mydate_day">'
'<option value="">---</option>'
'<option value="1">1</option>'
'<option value="2">2</option>'
'<option value="3">3</option>'
'<option value="4">4</option>'
'<option value="5">5</option>'
'<option value="6">6</option>'
'<option value="7">7</option>'
'<option value="8">8</option>'
'<option value="9">9</option>'
'<option value="10">10</option>'
'<option value="11">11</option>'
'<option value="12">12</option>'
'<option value="13">13</option>'
'<option value="14">14</option>'
'<option value="15">15</option>'
'<option value="16">16</option>'
'<option value="17">17</option>'
'<option value="18">18</option>'
'<option value="19">19</option>'
'<option value="20">20</option>'
'<option value="21">21</option>'
'<option value="22">22</option>'
'<option value="23">23</option>'
'<option value="24">24</option>'
'<option value="25">25</option>'
'<option value="26">26</option>'
'<option value="27">27</option>'
'<option value="28">28</option>'
'<option value="29">29</option>'
'<option value="30">30</option>'
'<option value="31" selected>31</option>'
"</select>"
'<select name="mydate_year" id="id_mydate_year">'
'<option value="">---</option>'
'<option value="2009" selected>2009</option>'
'<option value="2010">2010</option>'
'<option value="2011">2011</option>'
'<option value="2012">2012</option>'
'<option value="2013">2013</option>'
'<option value="2014">2014</option>'
'<option value="2015">2015</option>'
'<option value="2016">2016</option>'
'<option value="2017">2017</option>'
'<option value="2018">2018</option>'
"</select>",
forms.SelectDateWidget(years=range(2009, 2019)).render(
"mydate", datetime.date(2009, 12, 31)
),
)
# We shouldn't change the behavior of the floatformat filter re:
# thousand separator and grouping when localization is disabled
# even if the USE_THOUSAND_SEPARATOR, NUMBER_GROUPING and
# THOUSAND_SEPARATOR settings are specified.
with self.settings(
USE_THOUSAND_SEPARATOR=True, NUMBER_GROUPING=1, THOUSAND_SEPARATOR="!"
):
self.assertEqual(
"66666.67", Template('{{ n|floatformat:"2u" }}').render(self.ctxt)
)
self.assertEqual(
"100000.0", Template('{{ f|floatformat:"u" }}').render(self.ctxt)
)
def test_false_like_locale_formats(self):
"""
The active locale's formats take precedence over the default settings
even if they would be interpreted as False in a conditional test
(e.g. 0 or empty string) (#16938).
"""
with translation.override("fr"):
with self.settings(USE_THOUSAND_SEPARATOR=True, THOUSAND_SEPARATOR="!"):
self.assertEqual("\xa0", get_format("THOUSAND_SEPARATOR"))
# Even a second time (after the format has been cached)...
self.assertEqual("\xa0", get_format("THOUSAND_SEPARATOR"))
with self.settings(FIRST_DAY_OF_WEEK=0):
self.assertEqual(1, get_format("FIRST_DAY_OF_WEEK"))
# Even a second time (after the format has been cached)...
self.assertEqual(1, get_format("FIRST_DAY_OF_WEEK"))
def test_l10n_enabled(self):
self.maxDiff = 3000
# Catalan locale
with translation.override("ca", deactivate=True):
self.assertEqual(r"j E \d\e Y", get_format("DATE_FORMAT"))
self.assertEqual(1, get_format("FIRST_DAY_OF_WEEK"))
self.assertEqual(",", get_format("DECIMAL_SEPARATOR"))
self.assertEqual("10:15", time_format(self.t))
self.assertEqual("31 desembre de 2009", date_format(self.d))
self.assertEqual("1 abril de 2009", date_format(datetime.date(2009, 4, 1)))
self.assertEqual(
"desembre del 2009", date_format(self.d, "YEAR_MONTH_FORMAT")
)
self.assertEqual(
"31/12/2009 20:50", date_format(self.dt, "SHORT_DATETIME_FORMAT")
)
self.assertEqual("No localizable", localize("No localizable"))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual("66.666,666", localize(self.n))
self.assertEqual("99.999,999", localize(self.f))
self.assertEqual("10.000", localize(self.long))
self.assertEqual("True", localize(True))
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual("66666,666", localize(self.n))
self.assertEqual("99999,999", localize(self.f))
self.assertEqual("10000", localize(self.long))
self.assertEqual("31 desembre de 2009", localize(self.d))
self.assertEqual("31 desembre de 2009 a les 20:50", localize(self.dt))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual("66.666,666", Template("{{ n }}").render(self.ctxt))
self.assertEqual("99.999,999", Template("{{ f }}").render(self.ctxt))
self.assertEqual("10.000", Template("{{ l }}").render(self.ctxt))
with self.settings(USE_THOUSAND_SEPARATOR=True):
form3 = I18nForm(
{
"decimal_field": "66.666,666",
"float_field": "99.999,999",
"date_field": "31/12/2009",
"datetime_field": "31/12/2009 20:50",
"time_field": "20:50",
"integer_field": "1.234",
}
)
self.assertTrue(form3.is_valid())
self.assertEqual(
decimal.Decimal("66666.666"), form3.cleaned_data["decimal_field"]
)
self.assertEqual(99999.999, form3.cleaned_data["float_field"])
self.assertEqual(
datetime.date(2009, 12, 31), form3.cleaned_data["date_field"]
)
self.assertEqual(
datetime.datetime(2009, 12, 31, 20, 50),
form3.cleaned_data["datetime_field"],
)
self.assertEqual(
datetime.time(20, 50), form3.cleaned_data["time_field"]
)
self.assertEqual(1234, form3.cleaned_data["integer_field"])
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual("66666,666", Template("{{ n }}").render(self.ctxt))
self.assertEqual("99999,999", Template("{{ f }}").render(self.ctxt))
self.assertEqual(
"31 desembre de 2009", Template("{{ d }}").render(self.ctxt)
)
self.assertEqual(
"31 desembre de 2009 a les 20:50",
Template("{{ dt }}").render(self.ctxt),
)
self.assertEqual(
"66666,67", Template("{{ n|floatformat:2 }}").render(self.ctxt)
)
self.assertEqual(
"100000,0", Template("{{ f|floatformat }}").render(self.ctxt)
)
self.assertEqual(
"66.666,67",
Template('{{ n|floatformat:"2g" }}').render(self.ctxt),
)
self.assertEqual(
"100.000,0",
Template('{{ f|floatformat:"g" }}').render(self.ctxt),
)
self.assertEqual(
"10:15", Template('{{ t|time:"TIME_FORMAT" }}').render(self.ctxt)
)
self.assertEqual(
"31/12/2009",
Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt),
)
self.assertEqual(
"31/12/2009 20:50",
Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt),
)
self.assertEqual(
date_format(datetime.datetime.now(), "DATE_FORMAT"),
Template('{% now "DATE_FORMAT" %}').render(self.ctxt),
)
with self.settings(USE_THOUSAND_SEPARATOR=False):
form4 = I18nForm(
{
"decimal_field": "66666,666",
"float_field": "99999,999",
"date_field": "31/12/2009",
"datetime_field": "31/12/2009 20:50",
"time_field": "20:50",
"integer_field": "1234",
}
)
self.assertTrue(form4.is_valid())
self.assertEqual(
decimal.Decimal("66666.666"), form4.cleaned_data["decimal_field"]
)
self.assertEqual(99999.999, form4.cleaned_data["float_field"])
self.assertEqual(
datetime.date(2009, 12, 31), form4.cleaned_data["date_field"]
)
self.assertEqual(
datetime.datetime(2009, 12, 31, 20, 50),
form4.cleaned_data["datetime_field"],
)
self.assertEqual(
datetime.time(20, 50), form4.cleaned_data["time_field"]
)
self.assertEqual(1234, form4.cleaned_data["integer_field"])
form5 = SelectDateForm(
{
"date_field_month": "12",
"date_field_day": "31",
"date_field_year": "2009",
}
)
self.assertTrue(form5.is_valid())
self.assertEqual(
datetime.date(2009, 12, 31), form5.cleaned_data["date_field"]
)
self.assertHTMLEqual(
'<select name="mydate_day" id="id_mydate_day">'
'<option value="">---</option>'
'<option value="1">1</option>'
'<option value="2">2</option>'
'<option value="3">3</option>'
'<option value="4">4</option>'
'<option value="5">5</option>'
'<option value="6">6</option>'
'<option value="7">7</option>'
'<option value="8">8</option>'
'<option value="9">9</option>'
'<option value="10">10</option>'
'<option value="11">11</option>'
'<option value="12">12</option>'
'<option value="13">13</option>'
'<option value="14">14</option>'
'<option value="15">15</option>'
'<option value="16">16</option>'
'<option value="17">17</option>'
'<option value="18">18</option>'
'<option value="19">19</option>'
'<option value="20">20</option>'
'<option value="21">21</option>'
'<option value="22">22</option>'
'<option value="23">23</option>'
'<option value="24">24</option>'
'<option value="25">25</option>'
'<option value="26">26</option>'
'<option value="27">27</option>'
'<option value="28">28</option>'
'<option value="29">29</option>'
'<option value="30">30</option>'
'<option value="31" selected>31</option>'
"</select>"
'<select name="mydate_month" id="id_mydate_month">'
'<option value="">---</option>'
'<option value="1">gener</option>'
'<option value="2">febrer</option>'
'<option value="3">mar\xe7</option>'
'<option value="4">abril</option>'
'<option value="5">maig</option>'
'<option value="6">juny</option>'
'<option value="7">juliol</option>'
'<option value="8">agost</option>'
'<option value="9">setembre</option>'
'<option value="10">octubre</option>'
'<option value="11">novembre</option>'
'<option value="12" selected>desembre</option>'
"</select>"
'<select name="mydate_year" id="id_mydate_year">'
'<option value="">---</option>'
'<option value="2009" selected>2009</option>'
'<option value="2010">2010</option>'
'<option value="2011">2011</option>'
'<option value="2012">2012</option>'
'<option value="2013">2013</option>'
'<option value="2014">2014</option>'
'<option value="2015">2015</option>'
'<option value="2016">2016</option>'
'<option value="2017">2017</option>'
'<option value="2018">2018</option>'
"</select>",
forms.SelectDateWidget(years=range(2009, 2019)).render(
"mydate", datetime.date(2009, 12, 31)
),
)
# Russian locale (with E as month)
with translation.override("ru", deactivate=True):
self.assertHTMLEqual(
'<select name="mydate_day" id="id_mydate_day">'
'<option value="">---</option>'
'<option value="1">1</option>'
'<option value="2">2</option>'
'<option value="3">3</option>'
'<option value="4">4</option>'
'<option value="5">5</option>'
'<option value="6">6</option>'
'<option value="7">7</option>'
'<option value="8">8</option>'
'<option value="9">9</option>'
'<option value="10">10</option>'
'<option value="11">11</option>'
'<option value="12">12</option>'
'<option value="13">13</option>'
'<option value="14">14</option>'
'<option value="15">15</option>'
'<option value="16">16</option>'
'<option value="17">17</option>'
'<option value="18">18</option>'
'<option value="19">19</option>'
'<option value="20">20</option>'
'<option value="21">21</option>'
'<option value="22">22</option>'
'<option value="23">23</option>'
'<option value="24">24</option>'
'<option value="25">25</option>'
'<option value="26">26</option>'
'<option value="27">27</option>'
'<option value="28">28</option>'
'<option value="29">29</option>'
'<option value="30">30</option>'
'<option value="31" selected>31</option>'
"</select>"
'<select name="mydate_month" id="id_mydate_month">'
'<option value="">---</option>'
'<option value="1">\u042f\u043d\u0432\u0430\u0440\u044c</option>'
'<option value="2">\u0424\u0435\u0432\u0440\u0430\u043b\u044c</option>'
'<option value="3">\u041c\u0430\u0440\u0442</option>'
'<option value="4">\u0410\u043f\u0440\u0435\u043b\u044c</option>'
'<option value="5">\u041c\u0430\u0439</option>'
'<option value="6">\u0418\u044e\u043d\u044c</option>'
'<option value="7">\u0418\u044e\u043b\u044c</option>'
'<option value="8">\u0410\u0432\u0433\u0443\u0441\u0442</option>'
'<option value="9">\u0421\u0435\u043d\u0442\u044f\u0431\u0440\u044c'
"</option>"
'<option value="10">\u041e\u043a\u0442\u044f\u0431\u0440\u044c</option>'
'<option value="11">\u041d\u043e\u044f\u0431\u0440\u044c</option>'
'<option value="12" selected>\u0414\u0435\u043a\u0430\u0431\u0440\u044c'
"</option>"
"</select>"
'<select name="mydate_year" id="id_mydate_year">'
'<option value="">---</option>'
'<option value="2009" selected>2009</option>'
'<option value="2010">2010</option>'
'<option value="2011">2011</option>'
'<option value="2012">2012</option>'
'<option value="2013">2013</option>'
'<option value="2014">2014</option>'
'<option value="2015">2015</option>'
'<option value="2016">2016</option>'
'<option value="2017">2017</option>'
'<option value="2018">2018</option>'
"</select>",
forms.SelectDateWidget(years=range(2009, 2019)).render(
"mydate", datetime.date(2009, 12, 31)
),
)
# English locale
with translation.override("en", deactivate=True):
self.assertEqual("N j, Y", get_format("DATE_FORMAT"))
self.assertEqual(0, get_format("FIRST_DAY_OF_WEEK"))
self.assertEqual(".", get_format("DECIMAL_SEPARATOR"))
self.assertEqual("Dec. 31, 2009", date_format(self.d))
self.assertEqual("December 2009", date_format(self.d, "YEAR_MONTH_FORMAT"))
self.assertEqual(
"12/31/2009 8:50 p.m.", date_format(self.dt, "SHORT_DATETIME_FORMAT")
)
self.assertEqual("No localizable", localize("No localizable"))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual("66,666.666", localize(self.n))
self.assertEqual("99,999.999", localize(self.f))
self.assertEqual("10,000", localize(self.long))
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual("66666.666", localize(self.n))
self.assertEqual("99999.999", localize(self.f))
self.assertEqual("10000", localize(self.long))
self.assertEqual("Dec. 31, 2009", localize(self.d))
self.assertEqual("Dec. 31, 2009, 8:50 p.m.", localize(self.dt))
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual("66,666.666", Template("{{ n }}").render(self.ctxt))
self.assertEqual("99,999.999", Template("{{ f }}").render(self.ctxt))
self.assertEqual("10,000", Template("{{ l }}").render(self.ctxt))
with self.settings(USE_THOUSAND_SEPARATOR=False):
self.assertEqual("66666.666", Template("{{ n }}").render(self.ctxt))
self.assertEqual("99999.999", Template("{{ f }}").render(self.ctxt))
self.assertEqual("Dec. 31, 2009", Template("{{ d }}").render(self.ctxt))
self.assertEqual(
"Dec. 31, 2009, 8:50 p.m.", Template("{{ dt }}").render(self.ctxt)
)
self.assertEqual(
"66666.67", Template("{{ n|floatformat:2 }}").render(self.ctxt)
)
self.assertEqual(
"100000.0", Template("{{ f|floatformat }}").render(self.ctxt)
)
self.assertEqual(
"66,666.67",
Template('{{ n|floatformat:"2g" }}').render(self.ctxt),
)
self.assertEqual(
"100,000.0",
Template('{{ f|floatformat:"g" }}').render(self.ctxt),
)
self.assertEqual(
"12/31/2009",
Template('{{ d|date:"SHORT_DATE_FORMAT" }}').render(self.ctxt),
)
self.assertEqual(
"12/31/2009 8:50 p.m.",
Template('{{ dt|date:"SHORT_DATETIME_FORMAT" }}').render(self.ctxt),
)
form5 = I18nForm(
{
"decimal_field": "66666.666",
"float_field": "99999.999",
"date_field": "12/31/2009",
"datetime_field": "12/31/2009 20:50",
"time_field": "20:50",
"integer_field": "1234",
}
)
self.assertTrue(form5.is_valid())
self.assertEqual(
decimal.Decimal("66666.666"), form5.cleaned_data["decimal_field"]
)
self.assertEqual(99999.999, form5.cleaned_data["float_field"])
self.assertEqual(
datetime.date(2009, 12, 31), form5.cleaned_data["date_field"]
)
self.assertEqual(
datetime.datetime(2009, 12, 31, 20, 50),
form5.cleaned_data["datetime_field"],
)
self.assertEqual(datetime.time(20, 50), form5.cleaned_data["time_field"])
self.assertEqual(1234, form5.cleaned_data["integer_field"])
form6 = SelectDateForm(
{
"date_field_month": "12",
"date_field_day": "31",
"date_field_year": "2009",
}
)
self.assertTrue(form6.is_valid())
self.assertEqual(
datetime.date(2009, 12, 31), form6.cleaned_data["date_field"]
)
self.assertHTMLEqual(
'<select name="mydate_month" id="id_mydate_month">'
'<option value="">---</option>'
'<option value="1">January</option>'
'<option value="2">February</option>'
'<option value="3">March</option>'
'<option value="4">April</option>'
'<option value="5">May</option>'
'<option value="6">June</option>'
'<option value="7">July</option>'
'<option value="8">August</option>'
'<option value="9">September</option>'
'<option value="10">October</option>'
'<option value="11">November</option>'
'<option value="12" selected>December</option>'
"</select>"
'<select name="mydate_day" id="id_mydate_day">'
'<option value="">---</option>'
'<option value="1">1</option>'
'<option value="2">2</option>'
'<option value="3">3</option>'
'<option value="4">4</option>'
'<option value="5">5</option>'
'<option value="6">6</option>'
'<option value="7">7</option>'
'<option value="8">8</option>'
'<option value="9">9</option>'
'<option value="10">10</option>'
'<option value="11">11</option>'
'<option value="12">12</option>'
'<option value="13">13</option>'
'<option value="14">14</option>'
'<option value="15">15</option>'
'<option value="16">16</option>'
'<option value="17">17</option>'
'<option value="18">18</option>'
'<option value="19">19</option>'
'<option value="20">20</option>'
'<option value="21">21</option>'
'<option value="22">22</option>'
'<option value="23">23</option>'
'<option value="24">24</option>'
'<option value="25">25</option>'
'<option value="26">26</option>'
'<option value="27">27</option>'
'<option value="28">28</option>'
'<option value="29">29</option>'
'<option value="30">30</option>'
'<option value="31" selected>31</option>'
"</select>"
'<select name="mydate_year" id="id_mydate_year">'
'<option value="">---</option>'
'<option value="2009" selected>2009</option>'
'<option value="2010">2010</option>'
'<option value="2011">2011</option>'
'<option value="2012">2012</option>'
'<option value="2013">2013</option>'
'<option value="2014">2014</option>'
'<option value="2015">2015</option>'
'<option value="2016">2016</option>'
'<option value="2017">2017</option>'
'<option value="2018">2018</option>'
"</select>",
forms.SelectDateWidget(years=range(2009, 2019)).render(
"mydate", datetime.date(2009, 12, 31)
),
)
def test_sub_locales(self):
"""
Check if sublocales fall back to the main locale
"""
with self.settings(USE_THOUSAND_SEPARATOR=True):
with translation.override("de-at", deactivate=True):
self.assertEqual("66.666,666", Template("{{ n }}").render(self.ctxt))
with translation.override("es-us", deactivate=True):
self.assertEqual("31 de diciembre de 2009", date_format(self.d))
def test_localized_input(self):
"""
Tests if form input is correctly localized
"""
self.maxDiff = 1200
with translation.override("de-at", deactivate=True):
form6 = CompanyForm(
{
"name": "acme",
"date_added": datetime.datetime(2009, 12, 31, 6, 0, 0),
"cents_paid": decimal.Decimal("59.47"),
"products_delivered": 12000,
}
)
self.assertTrue(form6.is_valid())
self.assertHTMLEqual(
form6.as_ul(),
'<li><label for="id_name">Name:</label>'
'<input id="id_name" type="text" name="name" value="acme" '
' maxlength="50" required></li>'
'<li><label for="id_date_added">Date added:</label>'
'<input type="text" name="date_added" value="31.12.2009 06:00:00" '
' id="id_date_added" required></li>'
'<li><label for="id_cents_paid">Cents paid:</label>'
'<input type="text" name="cents_paid" value="59,47" id="id_cents_paid" '
" required></li>"
'<li><label for="id_products_delivered">Products delivered:</label>'
'<input type="text" name="products_delivered" value="12000" '
' id="id_products_delivered" required>'
"</li>",
)
self.assertEqual(
localize_input(datetime.datetime(2009, 12, 31, 6, 0, 0)),
"31.12.2009 06:00:00",
)
self.assertEqual(
datetime.datetime(2009, 12, 31, 6, 0, 0),
form6.cleaned_data["date_added"],
)
with self.settings(USE_THOUSAND_SEPARATOR=True):
# Checking for the localized "products_delivered" field
self.assertInHTML(
'<input type="text" name="products_delivered" '
'value="12.000" id="id_products_delivered" required>',
form6.as_ul(),
)
def test_localized_input_func(self):
tests = (
(True, "True"),
(datetime.date(1, 1, 1), "0001-01-01"),
(datetime.datetime(1, 1, 1), "0001-01-01 00:00:00"),
)
with self.settings(USE_THOUSAND_SEPARATOR=True):
for value, expected in tests:
with self.subTest(value=value):
self.assertEqual(localize_input(value), expected)
def test_sanitize_strftime_format(self):
for year in (1, 99, 999, 1000):
dt = datetime.date(year, 1, 1)
for fmt, expected in [
("%C", "%02d" % (year // 100)),
("%F", "%04d-01-01" % year),
("%G", "%04d" % year),
("%Y", "%04d" % year),
]:
with self.subTest(year=year, fmt=fmt):
fmt = sanitize_strftime_format(fmt)
self.assertEqual(dt.strftime(fmt), expected)
def test_sanitize_strftime_format_with_escaped_percent(self):
dt = datetime.date(1, 1, 1)
for fmt, expected in [
("%%C", "%C"),
("%%F", "%F"),
("%%G", "%G"),
("%%Y", "%Y"),
("%%%%C", "%%C"),
("%%%%F", "%%F"),
("%%%%G", "%%G"),
("%%%%Y", "%%Y"),
]:
with self.subTest(fmt=fmt):
fmt = sanitize_strftime_format(fmt)
self.assertEqual(dt.strftime(fmt), expected)
for year in (1, 99, 999, 1000):
dt = datetime.date(year, 1, 1)
for fmt, expected in [
("%%%C", "%%%02d" % (year // 100)),
("%%%F", "%%%04d-01-01" % year),
("%%%G", "%%%04d" % year),
("%%%Y", "%%%04d" % year),
("%%%%%C", "%%%%%02d" % (year // 100)),
("%%%%%F", "%%%%%04d-01-01" % year),
("%%%%%G", "%%%%%04d" % year),
("%%%%%Y", "%%%%%04d" % year),
]:
with self.subTest(year=year, fmt=fmt):
fmt = sanitize_strftime_format(fmt)
self.assertEqual(dt.strftime(fmt), expected)
def test_sanitize_separators(self):
"""
Tests django.utils.formats.sanitize_separators.
"""
# Non-strings are untouched
self.assertEqual(sanitize_separators(123), 123)
with translation.override("ru", deactivate=True):
# Russian locale has non-breaking space (\xa0) as thousand separator
# Usual space is accepted too when sanitizing inputs
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual(sanitize_separators("1\xa0234\xa0567"), "1234567")
self.assertEqual(sanitize_separators("77\xa0777,777"), "77777.777")
self.assertEqual(sanitize_separators("12 345"), "12345")
self.assertEqual(sanitize_separators("77 777,777"), "77777.777")
with translation.override(None): # RemovedInDjango50Warning
with self.settings(USE_THOUSAND_SEPARATOR=True, THOUSAND_SEPARATOR="."):
self.assertEqual(sanitize_separators("12\xa0345"), "12\xa0345")
with self.settings(USE_THOUSAND_SEPARATOR=True):
with patch_formats(
get_language(), THOUSAND_SEPARATOR=".", DECIMAL_SEPARATOR=","
):
self.assertEqual(sanitize_separators("10.234"), "10234")
# Suspicion that user entered dot as decimal separator (#22171)
self.assertEqual(sanitize_separators("10.10"), "10.10")
# RemovedInDjango50Warning: When the deprecation ends, remove
# @ignore_warnings and USE_L10N=False. The assertions should remain
# because format-related settings will take precedence over
# locale-dictated formats.
with ignore_warnings(category=RemovedInDjango50Warning):
with self.settings(USE_L10N=False):
with self.settings(DECIMAL_SEPARATOR=","):
self.assertEqual(sanitize_separators("1001,10"), "1001.10")
self.assertEqual(sanitize_separators("1001.10"), "1001.10")
with self.settings(
DECIMAL_SEPARATOR=",",
THOUSAND_SEPARATOR=".",
USE_THOUSAND_SEPARATOR=True,
):
self.assertEqual(sanitize_separators("1.001,10"), "1001.10")
self.assertEqual(sanitize_separators("1001,10"), "1001.10")
self.assertEqual(sanitize_separators("1001.10"), "1001.10")
# Invalid output.
self.assertEqual(sanitize_separators("1,001.10"), "1.001.10")
def test_iter_format_modules(self):
"""
Tests the iter_format_modules function.
"""
# Importing some format modules so that we can compare the returned
# modules with these expected modules
default_mod = import_module("django.conf.locale.de.formats")
test_mod = import_module("i18n.other.locale.de.formats")
test_mod2 = import_module("i18n.other2.locale.de.formats")
with translation.override("de-at", deactivate=True):
# Should return the correct default module when no setting is set
self.assertEqual(list(iter_format_modules("de")), [default_mod])
# When the setting is a string, should return the given module and
# the default module
self.assertEqual(
list(iter_format_modules("de", "i18n.other.locale")),
[test_mod, default_mod],
)
# When setting is a list of strings, should return the given
# modules and the default module
self.assertEqual(
list(
iter_format_modules(
"de", ["i18n.other.locale", "i18n.other2.locale"]
)
),
[test_mod, test_mod2, default_mod],
)
def test_iter_format_modules_stability(self):
"""
Tests the iter_format_modules function always yields format modules in
a stable and correct order in presence of both base ll and ll_CC formats.
"""
en_format_mod = import_module("django.conf.locale.en.formats")
en_gb_format_mod = import_module("django.conf.locale.en_GB.formats")
self.assertEqual(
list(iter_format_modules("en-gb")), [en_gb_format_mod, en_format_mod]
)
def test_get_format_modules_lang(self):
with translation.override("de", deactivate=True):
self.assertEqual(".", get_format("DECIMAL_SEPARATOR", lang="en"))
def test_get_format_lazy_format(self):
self.assertEqual(get_format(gettext_lazy("DATE_FORMAT")), "N j, Y")
def test_localize_templatetag_and_filter(self):
"""
Test the {% localize %} templatetag and the localize/unlocalize filters.
"""
context = Context(
{"int": 1455, "float": 3.14, "date": datetime.date(2016, 12, 31)}
)
template1 = Template(
"{% load l10n %}{% localize %}"
"{{ int }}/{{ float }}/{{ date }}{% endlocalize %}; "
"{% localize on %}{{ int }}/{{ float }}/{{ date }}{% endlocalize %}"
)
template2 = Template(
"{% load l10n %}{{ int }}/{{ float }}/{{ date }}; "
"{% localize off %}{{ int }}/{{ float }}/{{ date }};{% endlocalize %} "
"{{ int }}/{{ float }}/{{ date }}"
)
template3 = Template(
"{% load l10n %}{{ int }}/{{ float }}/{{ date }}; "
"{{ int|unlocalize }}/{{ float|unlocalize }}/{{ date|unlocalize }}"
)
template4 = Template(
"{% load l10n %}{{ int }}/{{ float }}/{{ date }}; "
"{{ int|localize }}/{{ float|localize }}/{{ date|localize }}"
)
expected_localized = "1.455/3,14/31. Dezember 2016"
expected_unlocalized = "1455/3.14/Dez. 31, 2016"
output1 = "; ".join([expected_localized, expected_localized])
output2 = "; ".join(
[expected_localized, expected_unlocalized, expected_localized]
)
output3 = "; ".join([expected_localized, expected_unlocalized])
output4 = "; ".join([expected_unlocalized, expected_localized])
with translation.override("de", deactivate=True):
# RemovedInDjango50Warning: When the deprecation ends, remove
# @ignore_warnings and USE_L10N=False. The assertions should remain
# because format-related settings will take precedence over
# locale-dictated formats.
with ignore_warnings(category=RemovedInDjango50Warning):
with self.settings(
USE_L10N=False,
DATE_FORMAT="N j, Y",
DECIMAL_SEPARATOR=".",
NUMBER_GROUPING=0,
USE_THOUSAND_SEPARATOR=True,
):
self.assertEqual(template1.render(context), output1)
self.assertEqual(template4.render(context), output4)
with self.settings(USE_THOUSAND_SEPARATOR=True):
self.assertEqual(template1.render(context), output1)
self.assertEqual(template2.render(context), output2)
self.assertEqual(template3.render(context), output3)
def test_localized_off_numbers(self):
"""A string representation is returned for unlocalized numbers."""
template = Template(
"{% load l10n %}{% localize off %}"
"{{ int }}/{{ float }}/{{ decimal }}{% endlocalize %}"
)
context = Context(
{"int": 1455, "float": 3.14, "decimal": decimal.Decimal("24.1567")}
)
with self.settings(
DECIMAL_SEPARATOR=",",
USE_THOUSAND_SEPARATOR=True,
THOUSAND_SEPARATOR="°",
NUMBER_GROUPING=2,
):
self.assertEqual(template.render(context), "1455/3.14/24.1567")
# RemovedInDjango50Warning.
with ignore_warnings(category=RemovedInDjango50Warning):
with self.settings(
USE_L10N=False,
DECIMAL_SEPARATOR=",",
USE_THOUSAND_SEPARATOR=True,
THOUSAND_SEPARATOR="°",
NUMBER_GROUPING=2,
):
self.assertEqual(template.render(context), "1455/3.14/24.1567")
def test_localized_as_text_as_hidden_input(self):
"""
Form input with 'as_hidden' or 'as_text' is correctly localized.
"""
self.maxDiff = 1200
with translation.override("de-at", deactivate=True):
template = Template(
"{% load l10n %}{{ form.date_added }}; {{ form.cents_paid }}"
)
template_as_text = Template(
"{% load l10n %}"
"{{ form.date_added.as_text }}; {{ form.cents_paid.as_text }}"
)
template_as_hidden = Template(
"{% load l10n %}"
"{{ form.date_added.as_hidden }}; {{ form.cents_paid.as_hidden }}"
)
form = CompanyForm(
{
"name": "acme",
"date_added": datetime.datetime(2009, 12, 31, 6, 0, 0),
"cents_paid": decimal.Decimal("59.47"),
"products_delivered": 12000,
}
)
context = Context({"form": form})
self.assertTrue(form.is_valid())
self.assertHTMLEqual(
template.render(context),
'<input id="id_date_added" name="date_added" type="text" '
'value="31.12.2009 06:00:00" required>;'
'<input id="id_cents_paid" name="cents_paid" type="text" value="59,47" '
"required>",
)
self.assertHTMLEqual(
template_as_text.render(context),
'<input id="id_date_added" name="date_added" type="text" '
'value="31.12.2009 06:00:00" required>;'
'<input id="id_cents_paid" name="cents_paid" type="text" value="59,47" '
"required>",
)
self.assertHTMLEqual(
template_as_hidden.render(context),
'<input id="id_date_added" name="date_added" type="hidden" '
'value="31.12.2009 06:00:00">;'
'<input id="id_cents_paid" name="cents_paid" type="hidden" '
'value="59,47">',
)
def test_format_arbitrary_settings(self):
self.assertEqual(get_format("DEBUG"), "DEBUG")
def test_get_custom_format(self):
reset_format_cache()
with self.settings(FORMAT_MODULE_PATH="i18n.other.locale"):
with translation.override("fr", deactivate=True):
self.assertEqual("d/m/Y CUSTOM", get_format("CUSTOM_DAY_FORMAT"))
def test_admin_javascript_supported_input_formats(self):
"""
The first input format for DATE_INPUT_FORMATS, TIME_INPUT_FORMATS, and
DATETIME_INPUT_FORMATS must not contain %f since that's unsupported by
the admin's time picker widget.
"""
regex = re.compile("%([^BcdHImMpSwxXyY%])")
for language_code, language_name in settings.LANGUAGES:
for format_name in (
"DATE_INPUT_FORMATS",
"TIME_INPUT_FORMATS",
"DATETIME_INPUT_FORMATS",
):
with self.subTest(language=language_code, format=format_name):
formatter = get_format(format_name, lang=language_code)[0]
self.assertEqual(
regex.findall(formatter),
[],
"%s locale's %s uses an unsupported format code."
% (language_code, format_name),
)
class MiscTests(SimpleTestCase):
rf = RequestFactory()
@override_settings(LANGUAGE_CODE="de")
def test_english_fallback(self):
"""
With a non-English LANGUAGE_CODE and if the active language is English
or one of its variants, the untranslated string should be returned
(instead of falling back to LANGUAGE_CODE) (See #24413).
"""
self.assertEqual(gettext("Image"), "Bild")
with translation.override("en"):
self.assertEqual(gettext("Image"), "Image")
with translation.override("en-us"):
self.assertEqual(gettext("Image"), "Image")
with translation.override("en-ca"):
self.assertEqual(gettext("Image"), "Image")
def test_parse_spec_http_header(self):
"""
Testing HTTP header parsing. First, we test that we can parse the
values according to the spec (and that we extract all the pieces in
the right order).
"""
tests = [
# Good headers
("de", [("de", 1.0)]),
("en-AU", [("en-au", 1.0)]),
("es-419", [("es-419", 1.0)]),
("*;q=1.00", [("*", 1.0)]),
("en-AU;q=0.123", [("en-au", 0.123)]),
("en-au;q=0.5", [("en-au", 0.5)]),
("en-au;q=1.0", [("en-au", 1.0)]),
("da, en-gb;q=0.25, en;q=0.5", [("da", 1.0), ("en", 0.5), ("en-gb", 0.25)]),
("en-au-xx", [("en-au-xx", 1.0)]),
(
"de,en-au;q=0.75,en-us;q=0.5,en;q=0.25,es;q=0.125,fa;q=0.125",
[
("de", 1.0),
("en-au", 0.75),
("en-us", 0.5),
("en", 0.25),
("es", 0.125),
("fa", 0.125),
],
),
("*", [("*", 1.0)]),
("de;q=0.", [("de", 0.0)]),
("en; q=1,", [("en", 1.0)]),
("en; q=1.0, * ; q=0.5", [("en", 1.0), ("*", 0.5)]),
# Bad headers
("en-gb;q=1.0000", []),
("en;q=0.1234", []),
("en;q=.2", []),
("abcdefghi-au", []),
("**", []),
("en,,gb", []),
("en-au;q=0.1.0", []),
(("X" * 97) + "Z,en", []),
("da, en-gb;q=0.8, en;q=0.7,#", []),
("de;q=2.0", []),
("de;q=0.a", []),
("12-345", []),
("", []),
("en;q=1e0", []),
("en-au;q=1.0", []),
]
for value, expected in tests:
with self.subTest(value=value):
self.assertEqual(
trans_real.parse_accept_lang_header(value), tuple(expected)
)
def test_parse_literal_http_header(self):
tests = [
("pt-br", "pt-br"),
("pt", "pt"),
("es,de", "es"),
("es-a,de", "es"),
# There isn't a Django translation to a US variation of the Spanish
# language, a safe assumption. When the user sets it as the
# preferred language, the main 'es' translation should be selected
# instead.
("es-us", "es"),
# There isn't a main language (zh) translation of Django but there
# is a translation to variation (zh-hans) the user sets zh-hans as
# the preferred language, it should be selected without falling
# back nor ignoring it.
("zh-hans,de", "zh-hans"),
("NL", "nl"),
("fy", "fy"),
("ia", "ia"),
("sr-latn", "sr-latn"),
("zh-hans", "zh-hans"),
("zh-hant", "zh-hant"),
]
for header, expected in tests:
with self.subTest(header=header):
request = self.rf.get("/", HTTP_ACCEPT_LANGUAGE=header)
self.assertEqual(get_language_from_request(request), expected)
@override_settings(
LANGUAGES=[
("en", "English"),
("zh-hans", "Simplified Chinese"),
("zh-hant", "Traditional Chinese"),
]
)
def test_support_for_deprecated_chinese_language_codes(self):
"""
Some browsers (Firefox, IE, etc.) use deprecated language codes. As these
language codes will be removed in Django 1.9, these will be incorrectly
matched. For example zh-tw (traditional) will be interpreted as zh-hans
(simplified), which is wrong. So we should also accept these deprecated
language codes.
refs #18419 -- this is explicitly for browser compatibility
"""
g = get_language_from_request
request = self.rf.get("/", HTTP_ACCEPT_LANGUAGE="zh-cn,en")
self.assertEqual(g(request), "zh-hans")
request = self.rf.get("/", HTTP_ACCEPT_LANGUAGE="zh-tw,en")
self.assertEqual(g(request), "zh-hant")
def test_special_fallback_language(self):
"""
Some languages may have special fallbacks that don't follow the simple
'fr-ca' -> 'fr' logic (notably Chinese codes).
"""
request = self.rf.get("/", HTTP_ACCEPT_LANGUAGE="zh-my,en")
self.assertEqual(get_language_from_request(request), "zh-hans")
def test_subsequent_code_fallback_language(self):
"""
Subsequent language codes should be used when the language code is not
supported.
"""
tests = [
("zh-Hans-CN", "zh-hans"),
("zh-hans-mo", "zh-hans"),
("zh-hans-HK", "zh-hans"),
("zh-Hant-HK", "zh-hant"),
("zh-hant-tw", "zh-hant"),
("zh-hant-SG", "zh-hant"),
]
for value, expected in tests:
with self.subTest(value=value):
request = self.rf.get("/", HTTP_ACCEPT_LANGUAGE=f"{value},en")
self.assertEqual(get_language_from_request(request), expected)
def test_parse_language_cookie(self):
g = get_language_from_request
request = self.rf.get("/")
request.COOKIES[settings.LANGUAGE_COOKIE_NAME] = "pt-br"
self.assertEqual("pt-br", g(request))
request.COOKIES[settings.LANGUAGE_COOKIE_NAME] = "pt"
self.assertEqual("pt", g(request))
request = self.rf.get("/", HTTP_ACCEPT_LANGUAGE="de")
request.COOKIES[settings.LANGUAGE_COOKIE_NAME] = "es"
self.assertEqual("es", g(request))
# There isn't a Django translation to a US variation of the Spanish
# language, a safe assumption. When the user sets it as the preferred
# language, the main 'es' translation should be selected instead.
request = self.rf.get("/")
request.COOKIES[settings.LANGUAGE_COOKIE_NAME] = "es-us"
self.assertEqual(g(request), "es")
# There isn't a main language (zh) translation of Django but there is a
# translation to variation (zh-hans) the user sets zh-hans as the
# preferred language, it should be selected without falling back nor
# ignoring it.
request = self.rf.get("/", HTTP_ACCEPT_LANGUAGE="de")
request.COOKIES[settings.LANGUAGE_COOKIE_NAME] = "zh-hans"
self.assertEqual(g(request), "zh-hans")
@override_settings(
USE_I18N=True,
LANGUAGES=[
("en", "English"),
("ar-dz", "Algerian Arabic"),
("de", "German"),
("de-at", "Austrian German"),
("pt-BR", "Portuguese (Brazil)"),
],
)
def test_get_supported_language_variant_real(self):
g = trans_real.get_supported_language_variant
self.assertEqual(g("en"), "en")
self.assertEqual(g("en-gb"), "en")
self.assertEqual(g("de"), "de")
self.assertEqual(g("de-at"), "de-at")
self.assertEqual(g("de-ch"), "de")
self.assertEqual(g("pt-br"), "pt-br")
self.assertEqual(g("pt-BR"), "pt-BR")
self.assertEqual(g("pt"), "pt-br")
self.assertEqual(g("pt-pt"), "pt-br")
self.assertEqual(g("ar-dz"), "ar-dz")
self.assertEqual(g("ar-DZ"), "ar-DZ")
with self.assertRaises(LookupError):
g("pt", strict=True)
with self.assertRaises(LookupError):
g("pt-pt", strict=True)
with self.assertRaises(LookupError):
g("xyz")
with self.assertRaises(LookupError):
g("xy-zz")
def test_get_supported_language_variant_null(self):
g = trans_null.get_supported_language_variant
self.assertEqual(g(settings.LANGUAGE_CODE), settings.LANGUAGE_CODE)
with self.assertRaises(LookupError):
g("pt")
with self.assertRaises(LookupError):
g("de")
with self.assertRaises(LookupError):
g("de-at")
with self.assertRaises(LookupError):
g("de", strict=True)
with self.assertRaises(LookupError):
g("de-at", strict=True)
with self.assertRaises(LookupError):
g("xyz")
@override_settings(
LANGUAGES=[
("en", "English"),
("en-latn-us", "Latin English"),
("de", "German"),
("de-1996", "German, orthography of 1996"),
("de-at", "Austrian German"),
("de-ch-1901", "German, Swiss variant, traditional orthography"),
("i-mingo", "Mingo"),
("kl-tunumiit", "Tunumiisiut"),
("nan-hani-tw", "Hanji"),
("pl", "Polish"),
],
)
def test_get_language_from_path_real(self):
g = trans_real.get_language_from_path
tests = [
("/pl/", "pl"),
("/pl", "pl"),
("/xyz/", None),
("/en/", "en"),
("/en-gb/", "en"),
("/en-latn-us/", "en-latn-us"),
("/en-Latn-US/", "en-Latn-US"),
("/de/", "de"),
("/de-1996/", "de-1996"),
("/de-at/", "de-at"),
("/de-AT/", "de-AT"),
("/de-ch/", "de"),
("/de-ch-1901/", "de-ch-1901"),
("/de-simple-page-test/", None),
("/i-mingo/", "i-mingo"),
("/kl-tunumiit/", "kl-tunumiit"),
("/nan-hani-tw/", "nan-hani-tw"),
]
for path, language in tests:
with self.subTest(path=path):
self.assertEqual(g(path), language)
def test_get_language_from_path_null(self):
g = trans_null.get_language_from_path
self.assertIsNone(g("/pl/"))
self.assertIsNone(g("/pl"))
self.assertIsNone(g("/xyz/"))
def test_cache_resetting(self):
"""
After setting LANGUAGE, the cache should be cleared and languages
previously valid should not be used (#14170).
"""
g = get_language_from_request
request = self.rf.get("/", HTTP_ACCEPT_LANGUAGE="pt-br")
self.assertEqual("pt-br", g(request))
with self.settings(LANGUAGES=[("en", "English")]):
self.assertNotEqual("pt-br", g(request))
def test_i18n_patterns_returns_list(self):
with override_settings(USE_I18N=False):
self.assertIsInstance(i18n_patterns([]), list)
with override_settings(USE_I18N=True):
self.assertIsInstance(i18n_patterns([]), list)
class ResolutionOrderI18NTests(SimpleTestCase):
def setUp(self):
super().setUp()
activate("de")
def tearDown(self):
deactivate()
super().tearDown()
def assertGettext(self, msgid, msgstr):
result = gettext(msgid)
self.assertIn(
msgstr,
result,
"The string '%s' isn't in the translation of '%s'; the actual result is "
"'%s'." % (msgstr, msgid, result),
)
class AppResolutionOrderI18NTests(ResolutionOrderI18NTests):
@override_settings(LANGUAGE_CODE="de")
def test_app_translation(self):
# Original translation.
self.assertGettext("Date/time", "Datum/Zeit")
# Different translation.
with self.modify_settings(INSTALLED_APPS={"append": "i18n.resolution"}):
# Force refreshing translations.
activate("de")
# Doesn't work because it's added later in the list.
self.assertGettext("Date/time", "Datum/Zeit")
with self.modify_settings(
INSTALLED_APPS={"remove": "django.contrib.admin.apps.SimpleAdminConfig"}
):
# Force refreshing translations.
activate("de")
# Unless the original is removed from the list.
self.assertGettext("Date/time", "Datum/Zeit (APP)")
@override_settings(LOCALE_PATHS=extended_locale_paths)
class LocalePathsResolutionOrderI18NTests(ResolutionOrderI18NTests):
def test_locale_paths_translation(self):
self.assertGettext("Time", "LOCALE_PATHS")
def test_locale_paths_override_app_translation(self):
with self.settings(INSTALLED_APPS=["i18n.resolution"]):
self.assertGettext("Time", "LOCALE_PATHS")
class DjangoFallbackResolutionOrderI18NTests(ResolutionOrderI18NTests):
def test_django_fallback(self):
self.assertEqual(gettext("Date/time"), "Datum/Zeit")
@override_settings(INSTALLED_APPS=["i18n.territorial_fallback"])
class TranslationFallbackI18NTests(ResolutionOrderI18NTests):
def test_sparse_territory_catalog(self):
"""
Untranslated strings for territorial language variants use the
translations of the generic language. In this case, the de-de
translation falls back to de.
"""
with translation.override("de-de"):
self.assertGettext("Test 1 (en)", "(de-de)")
self.assertGettext("Test 2 (en)", "(de)")
class TestModels(TestCase):
def test_lazy(self):
tm = TestModel()
tm.save()
def test_safestr(self):
c = Company(cents_paid=12, products_delivered=1)
c.name = SafeString("Iñtërnâtiônàlizætiøn1")
c.save()
class TestLanguageInfo(SimpleTestCase):
def test_localized_language_info(self):
li = get_language_info("de")
self.assertEqual(li["code"], "de")
self.assertEqual(li["name_local"], "Deutsch")
self.assertEqual(li["name"], "German")
self.assertIs(li["bidi"], False)
def test_unknown_language_code(self):
with self.assertRaisesMessage(KeyError, "Unknown language code xx"):
get_language_info("xx")
with translation.override("xx"):
# A language with no translation catalogs should fallback to the
# untranslated string.
self.assertEqual(gettext("Title"), "Title")
def test_unknown_only_country_code(self):
li = get_language_info("de-xx")
self.assertEqual(li["code"], "de")
self.assertEqual(li["name_local"], "Deutsch")
self.assertEqual(li["name"], "German")
self.assertIs(li["bidi"], False)
def test_unknown_language_code_and_country_code(self):
with self.assertRaisesMessage(KeyError, "Unknown language code xx-xx and xx"):
get_language_info("xx-xx")
def test_fallback_language_code(self):
"""
get_language_info return the first fallback language info if the lang_info
struct does not contain the 'name' key.
"""
li = get_language_info("zh-my")
self.assertEqual(li["code"], "zh-hans")
li = get_language_info("zh-hans")
self.assertEqual(li["code"], "zh-hans")
@override_settings(
USE_I18N=True,
LANGUAGES=[
("en", "English"),
("fr", "French"),
],
MIDDLEWARE=[
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
],
ROOT_URLCONF="i18n.urls",
)
class LocaleMiddlewareTests(TestCase):
def test_streaming_response(self):
# Regression test for #5241
response = self.client.get("/fr/streaming/")
self.assertContains(response, "Oui/Non")
response = self.client.get("/en/streaming/")
self.assertContains(response, "Yes/No")
@override_settings(
USE_I18N=True,
LANGUAGES=[
("en", "English"),
("de", "German"),
("fr", "French"),
],
MIDDLEWARE=[
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
],
ROOT_URLCONF="i18n.urls_default_unprefixed",
LANGUAGE_CODE="en",
)
class UnprefixedDefaultLanguageTests(SimpleTestCase):
def test_default_lang_without_prefix(self):
"""
With i18n_patterns(..., prefix_default_language=False), the default
language (settings.LANGUAGE_CODE) should be accessible without a prefix.
"""
response = self.client.get("/simple/")
self.assertEqual(response.content, b"Yes")
def test_other_lang_with_prefix(self):
response = self.client.get("/fr/simple/")
self.assertEqual(response.content, b"Oui")
def test_unprefixed_language_with_accept_language(self):
"""'Accept-Language' is respected."""
response = self.client.get("/simple/", headers={"accept-language": "fr"})
self.assertRedirects(response, "/fr/simple/")
def test_unprefixed_language_with_cookie_language(self):
"""A language set in the cookies is respected."""
self.client.cookies.load({settings.LANGUAGE_COOKIE_NAME: "fr"})
response = self.client.get("/simple/")
self.assertRedirects(response, "/fr/simple/")
def test_unprefixed_language_with_non_valid_language(self):
response = self.client.get("/simple/", headers={"accept-language": "fi"})
self.assertEqual(response.content, b"Yes")
self.client.cookies.load({settings.LANGUAGE_COOKIE_NAME: "fi"})
response = self.client.get("/simple/")
self.assertEqual(response.content, b"Yes")
def test_page_with_dash(self):
# A page starting with /de* shouldn't match the 'de' language code.
response = self.client.get("/de-simple-page-test/")
self.assertEqual(response.content, b"Yes")
def test_no_redirect_on_404(self):
"""
A request for a nonexistent URL shouldn't cause a redirect to
/<default_language>/<request_url> when prefix_default_language=False and
/<default_language>/<request_url> has a URL match (#27402).
"""
# A match for /group1/group2/ must exist for this to act as a
# regression test.
response = self.client.get("/group1/group2/")
self.assertEqual(response.status_code, 200)
response = self.client.get("/nonexistent/")
self.assertEqual(response.status_code, 404)
@override_settings(
USE_I18N=True,
LANGUAGES=[
("bg", "Bulgarian"),
("en-us", "English"),
("pt-br", "Portuguese (Brazil)"),
],
MIDDLEWARE=[
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
],
ROOT_URLCONF="i18n.urls",
)
class CountrySpecificLanguageTests(SimpleTestCase):
rf = RequestFactory()
def test_check_for_language(self):
self.assertTrue(check_for_language("en"))
self.assertTrue(check_for_language("en-us"))
self.assertTrue(check_for_language("en-US"))
self.assertFalse(check_for_language("en_US"))
self.assertTrue(check_for_language("be"))
self.assertTrue(check_for_language("be@latin"))
self.assertTrue(check_for_language("sr-RS@latin"))
self.assertTrue(check_for_language("sr-RS@12345"))
self.assertFalse(check_for_language("en-ü"))
self.assertFalse(check_for_language("en\x00"))
self.assertFalse(check_for_language(None))
self.assertFalse(check_for_language("be@ "))
# Specifying encoding is not supported (Django enforces UTF-8)
self.assertFalse(check_for_language("tr-TR.UTF-8"))
self.assertFalse(check_for_language("tr-TR.UTF8"))
self.assertFalse(check_for_language("de-DE.utf-8"))
def test_check_for_language_null(self):
self.assertIs(trans_null.check_for_language("en"), True)
def test_get_language_from_request(self):
# issue 19919
request = self.rf.get(
"/", HTTP_ACCEPT_LANGUAGE="en-US,en;q=0.8,bg;q=0.6,ru;q=0.4"
)
lang = get_language_from_request(request)
self.assertEqual("en-us", lang)
request = self.rf.get(
"/", HTTP_ACCEPT_LANGUAGE="bg-bg,en-US;q=0.8,en;q=0.6,ru;q=0.4"
)
lang = get_language_from_request(request)
self.assertEqual("bg", lang)
def test_get_language_from_request_null(self):
lang = trans_null.get_language_from_request(None)
self.assertEqual(lang, None)
def test_specific_language_codes(self):
# issue 11915
request = self.rf.get(
"/", HTTP_ACCEPT_LANGUAGE="pt,en-US;q=0.8,en;q=0.6,ru;q=0.4"
)
lang = get_language_from_request(request)
self.assertEqual("pt-br", lang)
request = self.rf.get(
"/", HTTP_ACCEPT_LANGUAGE="pt-pt,en-US;q=0.8,en;q=0.6,ru;q=0.4"
)
lang = get_language_from_request(request)
self.assertEqual("pt-br", lang)
class TranslationFilesMissing(SimpleTestCase):
def setUp(self):
super().setUp()
self.gettext_find_builtin = gettext_module.find
def tearDown(self):
gettext_module.find = self.gettext_find_builtin
super().tearDown()
def patchGettextFind(self):
gettext_module.find = lambda *args, **kw: None
def test_failure_finding_default_mo_files(self):
"""OSError is raised if the default language is unparseable."""
self.patchGettextFind()
trans_real._translations = {}
with self.assertRaises(OSError):
activate("en")
class NonDjangoLanguageTests(SimpleTestCase):
"""
A language non present in default Django languages can still be
installed/used by a Django project.
"""
@override_settings(
USE_I18N=True,
LANGUAGES=[
("en-us", "English"),
("xxx", "Somelanguage"),
],
LANGUAGE_CODE="xxx",
LOCALE_PATHS=[os.path.join(here, "commands", "locale")],
)
def test_non_django_language(self):
self.assertEqual(get_language(), "xxx")
self.assertEqual(gettext("year"), "reay")
@override_settings(USE_I18N=True)
def test_check_for_language(self):
with tempfile.TemporaryDirectory() as app_dir:
os.makedirs(os.path.join(app_dir, "locale", "dummy_Lang", "LC_MESSAGES"))
open(
os.path.join(
app_dir, "locale", "dummy_Lang", "LC_MESSAGES", "django.mo"
),
"w",
).close()
app_config = AppConfig("dummy_app", AppModuleStub(__path__=[app_dir]))
with mock.patch(
"django.apps.apps.get_app_configs", return_value=[app_config]
):
self.assertIs(check_for_language("dummy-lang"), True)
@override_settings(
USE_I18N=True,
LANGUAGES=[
("en-us", "English"),
# xyz language has no locale files
("xyz", "XYZ"),
],
)
@translation.override("xyz")
def test_plural_non_django_language(self):
self.assertEqual(get_language(), "xyz")
self.assertEqual(ngettext("year", "years", 2), "years")
@override_settings(USE_I18N=True)
class WatchForTranslationChangesTests(SimpleTestCase):
@override_settings(USE_I18N=False)
def test_i18n_disabled(self):
mocked_sender = mock.MagicMock()
watch_for_translation_changes(mocked_sender)
mocked_sender.watch_dir.assert_not_called()
def test_i18n_enabled(self):
mocked_sender = mock.MagicMock()
watch_for_translation_changes(mocked_sender)
self.assertGreater(mocked_sender.watch_dir.call_count, 1)
def test_i18n_locale_paths(self):
mocked_sender = mock.MagicMock()
with tempfile.TemporaryDirectory() as app_dir:
with self.settings(LOCALE_PATHS=[app_dir]):
watch_for_translation_changes(mocked_sender)
mocked_sender.watch_dir.assert_any_call(Path(app_dir), "**/*.mo")
def test_i18n_app_dirs(self):
mocked_sender = mock.MagicMock()
with self.settings(INSTALLED_APPS=["i18n.sampleproject"]):
watch_for_translation_changes(mocked_sender)
project_dir = Path(__file__).parent / "sampleproject" / "locale"
mocked_sender.watch_dir.assert_any_call(project_dir, "**/*.mo")
def test_i18n_app_dirs_ignore_django_apps(self):
mocked_sender = mock.MagicMock()
with self.settings(INSTALLED_APPS=["django.contrib.admin"]):
watch_for_translation_changes(mocked_sender)
mocked_sender.watch_dir.assert_called_once_with(Path("locale"), "**/*.mo")
def test_i18n_local_locale(self):
mocked_sender = mock.MagicMock()
watch_for_translation_changes(mocked_sender)
locale_dir = Path(__file__).parent / "locale"
mocked_sender.watch_dir.assert_any_call(locale_dir, "**/*.mo")
class TranslationFileChangedTests(SimpleTestCase):
def setUp(self):
self.gettext_translations = gettext_module._translations.copy()
self.trans_real_translations = trans_real._translations.copy()
def tearDown(self):
gettext._translations = self.gettext_translations
trans_real._translations = self.trans_real_translations
def test_ignores_non_mo_files(self):
gettext_module._translations = {"foo": "bar"}
path = Path("test.py")
self.assertIsNone(translation_file_changed(None, path))
self.assertEqual(gettext_module._translations, {"foo": "bar"})
def test_resets_cache_with_mo_files(self):
gettext_module._translations = {"foo": "bar"}
trans_real._translations = {"foo": "bar"}
trans_real._default = 1
trans_real._active = False
path = Path("test.mo")
self.assertIs(translation_file_changed(None, path), True)
self.assertEqual(gettext_module._translations, {})
self.assertEqual(trans_real._translations, {})
self.assertIsNone(trans_real._default)
self.assertIsInstance(trans_real._active, Local)
class UtilsTests(SimpleTestCase):
def test_round_away_from_one(self):
tests = [
(0, 0),
(0.0, 0),
(0.25, 0),
(0.5, 0),
(0.75, 0),
(1, 1),
(1.0, 1),
(1.25, 2),
(1.5, 2),
(1.75, 2),
(-0.0, 0),
(-0.25, -1),
(-0.5, -1),
(-0.75, -1),
(-1, -1),
(-1.0, -1),
(-1.25, -2),
(-1.5, -2),
(-1.75, -2),
]
for value, expected in tests:
with self.subTest(value=value):
self.assertEqual(round_away_from_one(value), expected)
|
56efbdc66f6f153a569b80f9021711a567cc2e45a54b4816cbc9a24f128a0899 | import copy
import json
import os
import pickle
import unittest
import uuid
from django.core.exceptions import DisallowedRedirect
from django.core.serializers.json import DjangoJSONEncoder
from django.core.signals import request_finished
from django.db import close_old_connections
from django.http import (
BadHeaderError,
HttpResponse,
HttpResponseNotAllowed,
HttpResponseNotModified,
HttpResponsePermanentRedirect,
HttpResponseRedirect,
JsonResponse,
QueryDict,
SimpleCookie,
StreamingHttpResponse,
parse_cookie,
)
from django.test import SimpleTestCase
from django.utils.functional import lazystr
class QueryDictTests(SimpleTestCase):
def test_create_with_no_args(self):
self.assertEqual(QueryDict(), QueryDict(""))
def test_missing_key(self):
q = QueryDict()
with self.assertRaises(KeyError):
q.__getitem__("foo")
def test_immutability(self):
q = QueryDict()
with self.assertRaises(AttributeError):
q.__setitem__("something", "bar")
with self.assertRaises(AttributeError):
q.setlist("foo", ["bar"])
with self.assertRaises(AttributeError):
q.appendlist("foo", ["bar"])
with self.assertRaises(AttributeError):
q.update({"foo": "bar"})
with self.assertRaises(AttributeError):
q.pop("foo")
with self.assertRaises(AttributeError):
q.popitem()
with self.assertRaises(AttributeError):
q.clear()
def test_immutable_get_with_default(self):
q = QueryDict()
self.assertEqual(q.get("foo", "default"), "default")
def test_immutable_basic_operations(self):
q = QueryDict()
self.assertEqual(q.getlist("foo"), [])
self.assertNotIn("foo", q)
self.assertEqual(list(q), [])
self.assertEqual(list(q.items()), [])
self.assertEqual(list(q.lists()), [])
self.assertEqual(list(q.keys()), [])
self.assertEqual(list(q.values()), [])
self.assertEqual(len(q), 0)
self.assertEqual(q.urlencode(), "")
def test_single_key_value(self):
"""Test QueryDict with one key/value pair"""
q = QueryDict("foo=bar")
self.assertEqual(q["foo"], "bar")
with self.assertRaises(KeyError):
q.__getitem__("bar")
with self.assertRaises(AttributeError):
q.__setitem__("something", "bar")
self.assertEqual(q.get("foo", "default"), "bar")
self.assertEqual(q.get("bar", "default"), "default")
self.assertEqual(q.getlist("foo"), ["bar"])
self.assertEqual(q.getlist("bar"), [])
with self.assertRaises(AttributeError):
q.setlist("foo", ["bar"])
with self.assertRaises(AttributeError):
q.appendlist("foo", ["bar"])
self.assertIn("foo", q)
self.assertNotIn("bar", q)
self.assertEqual(list(q), ["foo"])
self.assertEqual(list(q.items()), [("foo", "bar")])
self.assertEqual(list(q.lists()), [("foo", ["bar"])])
self.assertEqual(list(q.keys()), ["foo"])
self.assertEqual(list(q.values()), ["bar"])
self.assertEqual(len(q), 1)
with self.assertRaises(AttributeError):
q.update({"foo": "bar"})
with self.assertRaises(AttributeError):
q.pop("foo")
with self.assertRaises(AttributeError):
q.popitem()
with self.assertRaises(AttributeError):
q.clear()
with self.assertRaises(AttributeError):
q.setdefault("foo", "bar")
self.assertEqual(q.urlencode(), "foo=bar")
def test_urlencode(self):
q = QueryDict(mutable=True)
q["next"] = "/a&b/"
self.assertEqual(q.urlencode(), "next=%2Fa%26b%2F")
self.assertEqual(q.urlencode(safe="/"), "next=/a%26b/")
q = QueryDict(mutable=True)
q["next"] = "/t\xebst&key/"
self.assertEqual(q.urlencode(), "next=%2Ft%C3%ABst%26key%2F")
self.assertEqual(q.urlencode(safe="/"), "next=/t%C3%ABst%26key/")
def test_urlencode_int(self):
# Normally QueryDict doesn't contain non-string values but lazily
# written tests may make that mistake.
q = QueryDict(mutable=True)
q["a"] = 1
self.assertEqual(q.urlencode(), "a=1")
def test_mutable_copy(self):
"""A copy of a QueryDict is mutable."""
q = QueryDict().copy()
with self.assertRaises(KeyError):
q.__getitem__("foo")
q["name"] = "john"
self.assertEqual(q["name"], "john")
def test_mutable_delete(self):
q = QueryDict(mutable=True)
q["name"] = "john"
del q["name"]
self.assertNotIn("name", q)
def test_basic_mutable_operations(self):
q = QueryDict(mutable=True)
q["name"] = "john"
self.assertEqual(q.get("foo", "default"), "default")
self.assertEqual(q.get("name", "default"), "john")
self.assertEqual(q.getlist("name"), ["john"])
self.assertEqual(q.getlist("foo"), [])
q.setlist("foo", ["bar", "baz"])
self.assertEqual(q.get("foo", "default"), "baz")
self.assertEqual(q.getlist("foo"), ["bar", "baz"])
q.appendlist("foo", "another")
self.assertEqual(q.getlist("foo"), ["bar", "baz", "another"])
self.assertEqual(q["foo"], "another")
self.assertIn("foo", q)
self.assertCountEqual(q, ["foo", "name"])
self.assertCountEqual(q.items(), [("foo", "another"), ("name", "john")])
self.assertCountEqual(
q.lists(), [("foo", ["bar", "baz", "another"]), ("name", ["john"])]
)
self.assertCountEqual(q.keys(), ["foo", "name"])
self.assertCountEqual(q.values(), ["another", "john"])
q.update({"foo": "hello"})
self.assertEqual(q["foo"], "hello")
self.assertEqual(q.get("foo", "not available"), "hello")
self.assertEqual(q.getlist("foo"), ["bar", "baz", "another", "hello"])
self.assertEqual(q.pop("foo"), ["bar", "baz", "another", "hello"])
self.assertEqual(q.pop("foo", "not there"), "not there")
self.assertEqual(q.get("foo", "not there"), "not there")
self.assertEqual(q.setdefault("foo", "bar"), "bar")
self.assertEqual(q["foo"], "bar")
self.assertEqual(q.getlist("foo"), ["bar"])
self.assertIn(q.urlencode(), ["foo=bar&name=john", "name=john&foo=bar"])
q.clear()
self.assertEqual(len(q), 0)
def test_multiple_keys(self):
"""Test QueryDict with two key/value pairs with same keys."""
q = QueryDict("vote=yes&vote=no")
self.assertEqual(q["vote"], "no")
with self.assertRaises(AttributeError):
q.__setitem__("something", "bar")
self.assertEqual(q.get("vote", "default"), "no")
self.assertEqual(q.get("foo", "default"), "default")
self.assertEqual(q.getlist("vote"), ["yes", "no"])
self.assertEqual(q.getlist("foo"), [])
with self.assertRaises(AttributeError):
q.setlist("foo", ["bar", "baz"])
with self.assertRaises(AttributeError):
q.setlist("foo", ["bar", "baz"])
with self.assertRaises(AttributeError):
q.appendlist("foo", ["bar"])
self.assertIn("vote", q)
self.assertNotIn("foo", q)
self.assertEqual(list(q), ["vote"])
self.assertEqual(list(q.items()), [("vote", "no")])
self.assertEqual(list(q.lists()), [("vote", ["yes", "no"])])
self.assertEqual(list(q.keys()), ["vote"])
self.assertEqual(list(q.values()), ["no"])
self.assertEqual(len(q), 1)
with self.assertRaises(AttributeError):
q.update({"foo": "bar"})
with self.assertRaises(AttributeError):
q.pop("foo")
with self.assertRaises(AttributeError):
q.popitem()
with self.assertRaises(AttributeError):
q.clear()
with self.assertRaises(AttributeError):
q.setdefault("foo", "bar")
with self.assertRaises(AttributeError):
q.__delitem__("vote")
def test_pickle(self):
q = QueryDict()
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q, q1)
q = QueryDict("a=b&c=d")
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q, q1)
q = QueryDict("a=b&c=d&a=1")
q1 = pickle.loads(pickle.dumps(q, 2))
self.assertEqual(q, q1)
def test_update_from_querydict(self):
"""Regression test for #8278: QueryDict.update(QueryDict)"""
x = QueryDict("a=1&a=2", mutable=True)
y = QueryDict("a=3&a=4")
x.update(y)
self.assertEqual(x.getlist("a"), ["1", "2", "3", "4"])
def test_non_default_encoding(self):
"""#13572 - QueryDict with a non-default encoding"""
q = QueryDict("cur=%A4", encoding="iso-8859-15")
self.assertEqual(q.encoding, "iso-8859-15")
self.assertEqual(list(q.items()), [("cur", "€")])
self.assertEqual(q.urlencode(), "cur=%A4")
q = q.copy()
self.assertEqual(q.encoding, "iso-8859-15")
self.assertEqual(list(q.items()), [("cur", "€")])
self.assertEqual(q.urlencode(), "cur=%A4")
self.assertEqual(copy.copy(q).encoding, "iso-8859-15")
self.assertEqual(copy.deepcopy(q).encoding, "iso-8859-15")
def test_querydict_fromkeys(self):
self.assertEqual(
QueryDict.fromkeys(["key1", "key2", "key3"]), QueryDict("key1&key2&key3")
)
def test_fromkeys_with_nonempty_value(self):
self.assertEqual(
QueryDict.fromkeys(["key1", "key2", "key3"], value="val"),
QueryDict("key1=val&key2=val&key3=val"),
)
def test_fromkeys_is_immutable_by_default(self):
# Match behavior of __init__() which is also immutable by default.
q = QueryDict.fromkeys(["key1", "key2", "key3"])
with self.assertRaisesMessage(
AttributeError, "This QueryDict instance is immutable"
):
q["key4"] = "nope"
def test_fromkeys_mutable_override(self):
q = QueryDict.fromkeys(["key1", "key2", "key3"], mutable=True)
q["key4"] = "yep"
self.assertEqual(q, QueryDict("key1&key2&key3&key4=yep"))
def test_duplicates_in_fromkeys_iterable(self):
self.assertEqual(QueryDict.fromkeys("xyzzy"), QueryDict("x&y&z&z&y"))
def test_fromkeys_with_nondefault_encoding(self):
key_utf16 = b"\xff\xfe\x8e\x02\xdd\x01\x9e\x02"
value_utf16 = b"\xff\xfe\xdd\x01n\x00l\x00P\x02\x8c\x02"
q = QueryDict.fromkeys([key_utf16], value=value_utf16, encoding="utf-16")
expected = QueryDict("", mutable=True)
expected["ʎǝʞ"] = "ǝnlɐʌ"
self.assertEqual(q, expected)
def test_fromkeys_empty_iterable(self):
self.assertEqual(QueryDict.fromkeys([]), QueryDict(""))
def test_fromkeys_noniterable(self):
with self.assertRaises(TypeError):
QueryDict.fromkeys(0)
class HttpResponseTests(SimpleTestCase):
def test_headers_type(self):
r = HttpResponse()
# ASCII strings or bytes values are converted to strings.
r.headers["key"] = "test"
self.assertEqual(r.headers["key"], "test")
r.headers["key"] = b"test"
self.assertEqual(r.headers["key"], "test")
self.assertIn(b"test", r.serialize_headers())
# Non-ASCII values are serialized to Latin-1.
r.headers["key"] = "café"
self.assertIn("café".encode("latin-1"), r.serialize_headers())
# Other Unicode values are MIME-encoded (there's no way to pass them as
# bytes).
r.headers["key"] = "†"
self.assertEqual(r.headers["key"], "=?utf-8?b?4oCg?=")
self.assertIn(b"=?utf-8?b?4oCg?=", r.serialize_headers())
# The response also converts string or bytes keys to strings, but requires
# them to contain ASCII
r = HttpResponse()
del r.headers["Content-Type"]
r.headers["foo"] = "bar"
headers = list(r.headers.items())
self.assertEqual(len(headers), 1)
self.assertEqual(headers[0], ("foo", "bar"))
r = HttpResponse()
del r.headers["Content-Type"]
r.headers[b"foo"] = "bar"
headers = list(r.headers.items())
self.assertEqual(len(headers), 1)
self.assertEqual(headers[0], ("foo", "bar"))
self.assertIsInstance(headers[0][0], str)
r = HttpResponse()
with self.assertRaises(UnicodeError):
r.headers.__setitem__("føø", "bar")
with self.assertRaises(UnicodeError):
r.headers.__setitem__("føø".encode(), "bar")
def test_long_line(self):
# Bug #20889: long lines trigger newlines to be added to headers
# (which is not allowed due to bug #10188)
h = HttpResponse()
f = b"zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz a\xcc\x88"
f = f.decode("utf-8")
h.headers["Content-Disposition"] = 'attachment; filename="%s"' % f
# This one is triggering https://bugs.python.org/issue20747, that is Python
# will itself insert a newline in the header
h.headers[
"Content-Disposition"
] = 'attachment; filename="EdelRot_Blu\u0308te (3)-0.JPG"'
def test_newlines_in_headers(self):
# Bug #10188: Do not allow newlines in headers (CR or LF)
r = HttpResponse()
with self.assertRaises(BadHeaderError):
r.headers.__setitem__("test\rstr", "test")
with self.assertRaises(BadHeaderError):
r.headers.__setitem__("test\nstr", "test")
def test_encoded_with_newlines_in_headers(self):
"""
Keys & values which throw a UnicodeError when encoding/decoding should
still be checked for newlines and re-raised as a BadHeaderError.
These specifically would still throw BadHeaderError after decoding
successfully, because the newlines are sandwiched in the middle of the
string and email.Header leaves those as they are.
"""
r = HttpResponse()
pairs = (
("†\nother", "test"),
("test", "†\nother"),
(b"\xe2\x80\xa0\nother", "test"),
("test", b"\xe2\x80\xa0\nother"),
)
msg = "Header values can't contain newlines"
for key, value in pairs:
with self.subTest(key=key, value=value):
with self.assertRaisesMessage(BadHeaderError, msg):
r[key] = value
def test_dict_behavior(self):
"""
Test for bug #14020: Make HttpResponse.get work like dict.get
"""
r = HttpResponse()
self.assertIsNone(r.get("test"))
def test_non_string_content(self):
# Bug 16494: HttpResponse should behave consistently with non-strings
r = HttpResponse(12345)
self.assertEqual(r.content, b"12345")
# test content via property
r = HttpResponse()
r.content = 12345
self.assertEqual(r.content, b"12345")
def test_memoryview_content(self):
r = HttpResponse(memoryview(b"memoryview"))
self.assertEqual(r.content, b"memoryview")
def test_iter_content(self):
r = HttpResponse(["abc", "def", "ghi"])
self.assertEqual(r.content, b"abcdefghi")
# test iter content via property
r = HttpResponse()
r.content = ["idan", "alex", "jacob"]
self.assertEqual(r.content, b"idanalexjacob")
r = HttpResponse()
r.content = [1, 2, 3]
self.assertEqual(r.content, b"123")
# test odd inputs
r = HttpResponse()
r.content = ["1", "2", 3, "\u079e"]
# '\xde\x9e' == unichr(1950).encode()
self.assertEqual(r.content, b"123\xde\x9e")
# .content can safely be accessed multiple times.
r = HttpResponse(iter(["hello", "world"]))
self.assertEqual(r.content, r.content)
self.assertEqual(r.content, b"helloworld")
# __iter__ can safely be called multiple times (#20187).
self.assertEqual(b"".join(r), b"helloworld")
self.assertEqual(b"".join(r), b"helloworld")
# Accessing .content still works.
self.assertEqual(r.content, b"helloworld")
# Accessing .content also works if the response was iterated first.
r = HttpResponse(iter(["hello", "world"]))
self.assertEqual(b"".join(r), b"helloworld")
self.assertEqual(r.content, b"helloworld")
# Additional content can be written to the response.
r = HttpResponse(iter(["hello", "world"]))
self.assertEqual(r.content, b"helloworld")
r.write("!")
self.assertEqual(r.content, b"helloworld!")
def test_iterator_isnt_rewound(self):
# Regression test for #13222
r = HttpResponse("abc")
i = iter(r)
self.assertEqual(list(i), [b"abc"])
self.assertEqual(list(i), [])
def test_lazy_content(self):
r = HttpResponse(lazystr("helloworld"))
self.assertEqual(r.content, b"helloworld")
def test_file_interface(self):
r = HttpResponse()
r.write(b"hello")
self.assertEqual(r.tell(), 5)
r.write("привет")
self.assertEqual(r.tell(), 17)
r = HttpResponse(["abc"])
r.write("def")
self.assertEqual(r.tell(), 6)
self.assertEqual(r.content, b"abcdef")
# with Content-Encoding header
r = HttpResponse()
r.headers["Content-Encoding"] = "winning"
r.write(b"abc")
r.write(b"def")
self.assertEqual(r.content, b"abcdef")
def test_stream_interface(self):
r = HttpResponse("asdf")
self.assertEqual(r.getvalue(), b"asdf")
r = HttpResponse()
self.assertIs(r.writable(), True)
r.writelines(["foo\n", "bar\n", "baz\n"])
self.assertEqual(r.content, b"foo\nbar\nbaz\n")
def test_unsafe_redirect(self):
bad_urls = [
'data:text/html,<script>window.alert("xss")</script>',
"mailto:[email protected]",
"file:///etc/passwd",
]
for url in bad_urls:
with self.assertRaises(DisallowedRedirect):
HttpResponseRedirect(url)
with self.assertRaises(DisallowedRedirect):
HttpResponsePermanentRedirect(url)
def test_header_deletion(self):
r = HttpResponse("hello")
r.headers["X-Foo"] = "foo"
del r.headers["X-Foo"]
self.assertNotIn("X-Foo", r.headers)
# del doesn't raise a KeyError on nonexistent headers.
del r.headers["X-Foo"]
def test_instantiate_with_headers(self):
r = HttpResponse("hello", headers={"X-Foo": "foo"})
self.assertEqual(r.headers["X-Foo"], "foo")
self.assertEqual(r.headers["x-foo"], "foo")
def test_content_type(self):
r = HttpResponse("hello", content_type="application/json")
self.assertEqual(r.headers["Content-Type"], "application/json")
def test_content_type_headers(self):
r = HttpResponse("hello", headers={"Content-Type": "application/json"})
self.assertEqual(r.headers["Content-Type"], "application/json")
def test_content_type_mutually_exclusive(self):
msg = (
"'headers' must not contain 'Content-Type' when the "
"'content_type' parameter is provided."
)
with self.assertRaisesMessage(ValueError, msg):
HttpResponse(
"hello",
content_type="application/json",
headers={"Content-Type": "text/csv"},
)
class HttpResponseSubclassesTests(SimpleTestCase):
def test_redirect(self):
response = HttpResponseRedirect("/redirected/")
self.assertEqual(response.status_code, 302)
# Standard HttpResponse init args can be used
response = HttpResponseRedirect(
"/redirected/",
content="The resource has temporarily moved",
)
self.assertContains(
response, "The resource has temporarily moved", status_code=302
)
self.assertEqual(response.url, response.headers["Location"])
def test_redirect_lazy(self):
"""Make sure HttpResponseRedirect works with lazy strings."""
r = HttpResponseRedirect(lazystr("/redirected/"))
self.assertEqual(r.url, "/redirected/")
def test_redirect_repr(self):
response = HttpResponseRedirect("/redirected/")
expected = (
'<HttpResponseRedirect status_code=302, "text/html; charset=utf-8", '
'url="/redirected/">'
)
self.assertEqual(repr(response), expected)
def test_invalid_redirect_repr(self):
"""
If HttpResponseRedirect raises DisallowedRedirect, its __repr__()
should work (in the debug view, for example).
"""
response = HttpResponseRedirect.__new__(HttpResponseRedirect)
with self.assertRaisesMessage(
DisallowedRedirect, "Unsafe redirect to URL with protocol 'ssh'"
):
HttpResponseRedirect.__init__(response, "ssh://foo")
expected = (
'<HttpResponseRedirect status_code=302, "text/html; charset=utf-8", '
'url="ssh://foo">'
)
self.assertEqual(repr(response), expected)
def test_not_modified(self):
response = HttpResponseNotModified()
self.assertEqual(response.status_code, 304)
# 304 responses should not have content/content-type
with self.assertRaises(AttributeError):
response.content = "Hello dear"
self.assertNotIn("content-type", response)
def test_not_modified_repr(self):
response = HttpResponseNotModified()
self.assertEqual(repr(response), "<HttpResponseNotModified status_code=304>")
def test_not_allowed(self):
response = HttpResponseNotAllowed(["GET"])
self.assertEqual(response.status_code, 405)
# Standard HttpResponse init args can be used
response = HttpResponseNotAllowed(
["GET"], content="Only the GET method is allowed"
)
self.assertContains(response, "Only the GET method is allowed", status_code=405)
def test_not_allowed_repr(self):
response = HttpResponseNotAllowed(["GET", "OPTIONS"], content_type="text/plain")
expected = (
'<HttpResponseNotAllowed [GET, OPTIONS] status_code=405, "text/plain">'
)
self.assertEqual(repr(response), expected)
def test_not_allowed_repr_no_content_type(self):
response = HttpResponseNotAllowed(("GET", "POST"))
del response.headers["Content-Type"]
self.assertEqual(
repr(response), "<HttpResponseNotAllowed [GET, POST] status_code=405>"
)
class JsonResponseTests(SimpleTestCase):
def test_json_response_non_ascii(self):
data = {"key": "łóżko"}
response = JsonResponse(data)
self.assertEqual(json.loads(response.content.decode()), data)
def test_json_response_raises_type_error_with_default_setting(self):
with self.assertRaisesMessage(
TypeError,
"In order to allow non-dict objects to be serialized set the "
"safe parameter to False",
):
JsonResponse([1, 2, 3])
def test_json_response_text(self):
response = JsonResponse("foobar", safe=False)
self.assertEqual(json.loads(response.content.decode()), "foobar")
def test_json_response_list(self):
response = JsonResponse(["foo", "bar"], safe=False)
self.assertEqual(json.loads(response.content.decode()), ["foo", "bar"])
def test_json_response_uuid(self):
u = uuid.uuid4()
response = JsonResponse(u, safe=False)
self.assertEqual(json.loads(response.content.decode()), str(u))
def test_json_response_custom_encoder(self):
class CustomDjangoJSONEncoder(DjangoJSONEncoder):
def encode(self, o):
return json.dumps({"foo": "bar"})
response = JsonResponse({}, encoder=CustomDjangoJSONEncoder)
self.assertEqual(json.loads(response.content.decode()), {"foo": "bar"})
def test_json_response_passing_arguments_to_json_dumps(self):
response = JsonResponse({"foo": "bar"}, json_dumps_params={"indent": 2})
self.assertEqual(response.content.decode(), '{\n "foo": "bar"\n}')
class StreamingHttpResponseTests(SimpleTestCase):
def test_streaming_response(self):
r = StreamingHttpResponse(iter(["hello", "world"]))
# iterating over the response itself yields bytestring chunks.
chunks = list(r)
self.assertEqual(chunks, [b"hello", b"world"])
for chunk in chunks:
self.assertIsInstance(chunk, bytes)
# and the response can only be iterated once.
self.assertEqual(list(r), [])
# even when a sequence that can be iterated many times, like a list,
# is given as content.
r = StreamingHttpResponse(["abc", "def"])
self.assertEqual(list(r), [b"abc", b"def"])
self.assertEqual(list(r), [])
# iterating over strings still yields bytestring chunks.
r.streaming_content = iter(["hello", "café"])
chunks = list(r)
# '\xc3\xa9' == unichr(233).encode()
self.assertEqual(chunks, [b"hello", b"caf\xc3\xa9"])
for chunk in chunks:
self.assertIsInstance(chunk, bytes)
# streaming responses don't have a `content` attribute.
self.assertFalse(hasattr(r, "content"))
# and you can't accidentally assign to a `content` attribute.
with self.assertRaises(AttributeError):
r.content = "xyz"
# but they do have a `streaming_content` attribute.
self.assertTrue(hasattr(r, "streaming_content"))
# that exists so we can check if a response is streaming, and wrap or
# replace the content iterator.
r.streaming_content = iter(["abc", "def"])
r.streaming_content = (chunk.upper() for chunk in r.streaming_content)
self.assertEqual(list(r), [b"ABC", b"DEF"])
# coercing a streaming response to bytes doesn't return a complete HTTP
# message like a regular response does. it only gives us the headers.
r = StreamingHttpResponse(iter(["hello", "world"]))
self.assertEqual(bytes(r), b"Content-Type: text/html; charset=utf-8")
# and this won't consume its content.
self.assertEqual(list(r), [b"hello", b"world"])
# additional content cannot be written to the response.
r = StreamingHttpResponse(iter(["hello", "world"]))
with self.assertRaises(Exception):
r.write("!")
# and we can't tell the current position.
with self.assertRaises(Exception):
r.tell()
r = StreamingHttpResponse(iter(["hello", "world"]))
self.assertEqual(r.getvalue(), b"helloworld")
def test_repr(self):
r = StreamingHttpResponse(iter(["hello", "café"]))
self.assertEqual(
repr(r),
'<StreamingHttpResponse status_code=200, "text/html; charset=utf-8">',
)
class FileCloseTests(SimpleTestCase):
def setUp(self):
# Disable the request_finished signal during this test
# to avoid interfering with the database connection.
request_finished.disconnect(close_old_connections)
def tearDown(self):
request_finished.connect(close_old_connections)
def test_response(self):
filename = os.path.join(os.path.dirname(__file__), "abc.txt")
# file isn't closed until we close the response.
file1 = open(filename)
r = HttpResponse(file1)
self.assertTrue(file1.closed)
r.close()
# when multiple file are assigned as content, make sure they are all
# closed with the response.
file1 = open(filename)
file2 = open(filename)
r = HttpResponse(file1)
r.content = file2
self.assertTrue(file1.closed)
self.assertTrue(file2.closed)
def test_streaming_response(self):
filename = os.path.join(os.path.dirname(__file__), "abc.txt")
# file isn't closed until we close the response.
file1 = open(filename)
r = StreamingHttpResponse(file1)
self.assertFalse(file1.closed)
r.close()
self.assertTrue(file1.closed)
# when multiple file are assigned as content, make sure they are all
# closed with the response.
file1 = open(filename)
file2 = open(filename)
r = StreamingHttpResponse(file1)
r.streaming_content = file2
self.assertFalse(file1.closed)
self.assertFalse(file2.closed)
r.close()
self.assertTrue(file1.closed)
self.assertTrue(file2.closed)
class CookieTests(unittest.TestCase):
def test_encode(self):
"""Semicolons and commas are encoded."""
c = SimpleCookie()
c["test"] = "An,awkward;value"
self.assertNotIn(";", c.output().rstrip(";")) # IE compat
self.assertNotIn(",", c.output().rstrip(";")) # Safari compat
def test_decode(self):
"""Semicolons and commas are decoded."""
c = SimpleCookie()
c["test"] = "An,awkward;value"
c2 = SimpleCookie()
c2.load(c.output()[12:])
self.assertEqual(c["test"].value, c2["test"].value)
c3 = parse_cookie(c.output()[12:])
self.assertEqual(c["test"].value, c3["test"])
def test_nonstandard_keys(self):
"""
A single non-standard cookie name doesn't affect all cookies (#13007).
"""
self.assertIn("good_cookie", parse_cookie("good_cookie=yes;bad:cookie=yes"))
def test_repeated_nonstandard_keys(self):
"""
A repeated non-standard name doesn't affect all cookies (#15852).
"""
self.assertIn("good_cookie", parse_cookie("a:=b; a:=c; good_cookie=yes"))
def test_python_cookies(self):
"""
Test cases copied from Python's Lib/test/test_http_cookies.py
"""
self.assertEqual(
parse_cookie("chips=ahoy; vienna=finger"),
{"chips": "ahoy", "vienna": "finger"},
)
# Here parse_cookie() differs from Python's cookie parsing in that it
# treats all semicolons as delimiters, even within quotes.
self.assertEqual(
parse_cookie('keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"'),
{"keebler": '"E=mc2', "L": '\\"Loves\\"', "fudge": "\\012", "": '"'},
)
# Illegal cookies that have an '=' char in an unquoted value.
self.assertEqual(parse_cookie("keebler=E=mc2"), {"keebler": "E=mc2"})
# Cookies with ':' character in their name.
self.assertEqual(
parse_cookie("key:term=value:term"), {"key:term": "value:term"}
)
# Cookies with '[' and ']'.
self.assertEqual(
parse_cookie("a=b; c=[; d=r; f=h"), {"a": "b", "c": "[", "d": "r", "f": "h"}
)
def test_cookie_edgecases(self):
# Cookies that RFC 6265 allows.
self.assertEqual(
parse_cookie("a=b; Domain=example.com"), {"a": "b", "Domain": "example.com"}
)
# parse_cookie() has historically kept only the last cookie with the
# same name.
self.assertEqual(parse_cookie("a=b; h=i; a=c"), {"a": "c", "h": "i"})
def test_invalid_cookies(self):
"""
Cookie strings that go against RFC 6265 but browsers will send if set
via document.cookie.
"""
# Chunks without an equals sign appear as unnamed values per
# https://bugzilla.mozilla.org/show_bug.cgi?id=169091
self.assertIn(
"django_language", parse_cookie("abc=def; unnamed; django_language=en")
)
# Even a double quote may be an unnamed value.
self.assertEqual(parse_cookie('a=b; "; c=d'), {"a": "b", "": '"', "c": "d"})
# Spaces in names and values, and an equals sign in values.
self.assertEqual(
parse_cookie("a b c=d e = f; gh=i"), {"a b c": "d e = f", "gh": "i"}
)
# More characters the spec forbids.
self.assertEqual(
parse_cookie('a b,c<>@:/[]?{}=d " =e,f g'),
{"a b,c<>@:/[]?{}": 'd " =e,f g'},
)
# Unicode characters. The spec only allows ASCII.
self.assertEqual(
parse_cookie("saint=André Bessette"), {"saint": "André Bessette"}
)
# Browsers don't send extra whitespace or semicolons in Cookie headers,
# but parse_cookie() should parse whitespace the same way
# document.cookie parses whitespace.
self.assertEqual(
parse_cookie(" = b ; ; = ; c = ; "), {"": "b", "c": ""}
)
def test_samesite(self):
c = SimpleCookie("name=value; samesite=lax; httponly")
self.assertEqual(c["name"]["samesite"], "lax")
self.assertIn("SameSite=lax", c.output())
def test_httponly_after_load(self):
c = SimpleCookie()
c.load("name=val")
c["name"]["httponly"] = True
self.assertTrue(c["name"]["httponly"])
def test_load_dict(self):
c = SimpleCookie()
c.load({"name": "val"})
self.assertEqual(c["name"].value, "val")
def test_pickle(self):
rawdata = 'Customer="WILE_E_COYOTE"; Path=/acme; Version=1'
expected_output = "Set-Cookie: %s" % rawdata
C = SimpleCookie()
C.load(rawdata)
self.assertEqual(C.output(), expected_output)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
C1 = pickle.loads(pickle.dumps(C, protocol=proto))
self.assertEqual(C1.output(), expected_output)
class HttpResponseHeadersTestCase(SimpleTestCase):
"""Headers by treating HttpResponse like a dictionary."""
def test_headers(self):
response = HttpResponse()
response["X-Foo"] = "bar"
self.assertEqual(response["X-Foo"], "bar")
self.assertEqual(response.headers["X-Foo"], "bar")
self.assertIn("X-Foo", response)
self.assertIs(response.has_header("X-Foo"), True)
del response["X-Foo"]
self.assertNotIn("X-Foo", response)
self.assertNotIn("X-Foo", response.headers)
# del doesn't raise a KeyError on nonexistent headers.
del response["X-Foo"]
def test_headers_as_iterable_of_tuple_pairs(self):
response = HttpResponse(headers=(("X-Foo", "bar"),))
self.assertEqual(response["X-Foo"], "bar")
def test_headers_bytestring(self):
response = HttpResponse()
response["X-Foo"] = b"bar"
self.assertEqual(response["X-Foo"], "bar")
self.assertEqual(response.headers["X-Foo"], "bar")
def test_newlines_in_headers(self):
response = HttpResponse()
with self.assertRaises(BadHeaderError):
response["test\rstr"] = "test"
with self.assertRaises(BadHeaderError):
response["test\nstr"] = "test"
|
1b4f2d76fdd35f2dd6c618e761cfd64cb8b60237125d7f602e4cb9930576904f | import datetime
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import models
from django.forms import CharField, FileField, Form, ModelForm
from django.forms.models import ModelFormMetaclass
from django.test import SimpleTestCase, TestCase
from ..models import (
BoundaryModel,
ChoiceFieldModel,
ChoiceModel,
ChoiceOptionModel,
Defaults,
FileModel,
OptionalMultiChoiceModel,
)
from . import jinja2_tests
class ChoiceFieldForm(ModelForm):
class Meta:
model = ChoiceFieldModel
fields = "__all__"
class OptionalMultiChoiceModelForm(ModelForm):
class Meta:
model = OptionalMultiChoiceModel
fields = "__all__"
class ChoiceFieldExclusionForm(ModelForm):
multi_choice = CharField(max_length=50)
class Meta:
exclude = ["multi_choice"]
model = ChoiceFieldModel
class EmptyCharLabelChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ["name", "choice"]
class EmptyIntegerLabelChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ["name", "choice_integer"]
class EmptyCharLabelNoneChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ["name", "choice_string_w_none"]
class FileForm(Form):
file1 = FileField()
class TestTicket14567(TestCase):
"""
The return values of ModelMultipleChoiceFields are QuerySets
"""
def test_empty_queryset_return(self):
"""
If a model's ManyToManyField has blank=True and is saved with no data,
a queryset is returned.
"""
option = ChoiceOptionModel.objects.create(name="default")
form = OptionalMultiChoiceModelForm(
{"multi_choice_optional": "", "multi_choice": [option.pk]}
)
self.assertTrue(form.is_valid())
# The empty value is a QuerySet
self.assertIsInstance(
form.cleaned_data["multi_choice_optional"], models.query.QuerySet
)
# While we're at it, test whether a QuerySet is returned if there *is* a value.
self.assertIsInstance(form.cleaned_data["multi_choice"], models.query.QuerySet)
class ModelFormCallableModelDefault(TestCase):
def test_no_empty_option(self):
"""
If a model's ForeignKey has blank=False and a default, no empty option
is created.
"""
option = ChoiceOptionModel.objects.create(name="default")
choices = list(ChoiceFieldForm().fields["choice"].choices)
self.assertEqual(len(choices), 1)
self.assertEqual(choices[0], (option.pk, str(option)))
def test_callable_initial_value(self):
"""
The initial value for a callable default returning a queryset is the
pk.
"""
ChoiceOptionModel.objects.create(id=1, name="default")
ChoiceOptionModel.objects.create(id=2, name="option 2")
ChoiceOptionModel.objects.create(id=3, name="option 3")
self.assertHTMLEqual(
ChoiceFieldForm().as_p(),
"""
<p><label for="id_choice">Choice:</label>
<select name="choice" id="id_choice">
<option value="1" selected>ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-choice" value="1" id="initial-id_choice">
</p>
<p><label for="id_choice_int">Choice int:</label>
<select name="choice_int" id="id_choice_int">
<option value="1" selected>ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-choice_int" value="1"
id="initial-id_choice_int">
</p>
<p><label for="id_multi_choice">Multi choice:</label>
<select multiple name="multi_choice" id="id_multi_choice" required>
<option value="1" selected>ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-multi_choice" value="1"
id="initial-id_multi_choice_0">
</p>
<p><label for="id_multi_choice_int">Multi choice int:</label>
<select multiple name="multi_choice_int" id="id_multi_choice_int" required>
<option value="1" selected>ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-multi_choice_int" value="1"
id="initial-id_multi_choice_int_0">
</p>
""",
)
def test_initial_instance_value(self):
"Initial instances for model fields may also be instances (refs #7287)"
ChoiceOptionModel.objects.create(id=1, name="default")
obj2 = ChoiceOptionModel.objects.create(id=2, name="option 2")
obj3 = ChoiceOptionModel.objects.create(id=3, name="option 3")
self.assertHTMLEqual(
ChoiceFieldForm(
initial={
"choice": obj2,
"choice_int": obj2,
"multi_choice": [obj2, obj3],
"multi_choice_int": ChoiceOptionModel.objects.exclude(
name="default"
),
}
).as_p(),
"""
<p><label for="id_choice">Choice:</label>
<select name="choice" id="id_choice">
<option value="1">ChoiceOption 1</option>
<option value="2" selected>ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-choice" value="2" id="initial-id_choice">
</p>
<p><label for="id_choice_int">Choice int:</label>
<select name="choice_int" id="id_choice_int">
<option value="1">ChoiceOption 1</option>
<option value="2" selected>ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-choice_int" value="2"
id="initial-id_choice_int">
</p>
<p><label for="id_multi_choice">Multi choice:</label>
<select multiple name="multi_choice" id="id_multi_choice" required>
<option value="1">ChoiceOption 1</option>
<option value="2" selected>ChoiceOption 2</option>
<option value="3" selected>ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-multi_choice" value="2"
id="initial-id_multi_choice_0">
<input type="hidden" name="initial-multi_choice" value="3"
id="initial-id_multi_choice_1">
</p>
<p><label for="id_multi_choice_int">Multi choice int:</label>
<select multiple name="multi_choice_int" id="id_multi_choice_int" required>
<option value="1">ChoiceOption 1</option>
<option value="2" selected>ChoiceOption 2</option>
<option value="3" selected>ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-multi_choice_int" value="2"
id="initial-id_multi_choice_int_0">
<input type="hidden" name="initial-multi_choice_int" value="3"
id="initial-id_multi_choice_int_1">
</p>
""",
)
def test_callable_default_hidden_widget_value_not_overridden(self):
class FieldWithCallableDefaultsModel(models.Model):
int_field = models.IntegerField(default=lambda: 1)
json_field = models.JSONField(default=dict)
class FieldWithCallableDefaultsModelForm(ModelForm):
class Meta:
model = FieldWithCallableDefaultsModel
fields = "__all__"
form = FieldWithCallableDefaultsModelForm(
data={
"initial-int_field": "1",
"int_field": "1000",
"initial-json_field": "{}",
"json_field": '{"key": "val"}',
}
)
form_html = form.as_p()
self.assertHTMLEqual(
form_html,
"""
<p>
<label for="id_int_field">Int field:</label>
<input type="number" name="int_field" value="1000"
required id="id_int_field">
<input type="hidden" name="initial-int_field" value="1"
id="initial-id_int_field">
</p>
<p>
<label for="id_json_field">Json field:</label>
<textarea cols="40" id="id_json_field" name="json_field" required rows="10">
{"key": "val"}
</textarea>
<input id="initial-id_json_field" name="initial-json_field" type="hidden"
value="{}">
</p>
""",
)
class FormsModelTestCase(TestCase):
def test_unicode_filename(self):
# FileModel with Unicode filename and data #########################
file1 = SimpleUploadedFile(
"我隻氣墊船裝滿晒鱔.txt", "मेरी मँडराने वाली नाव सर्पमीनों से भरी ह".encode()
)
f = FileForm(data={}, files={"file1": file1}, auto_id=False)
self.assertTrue(f.is_valid())
self.assertIn("file1", f.cleaned_data)
m = FileModel.objects.create(file=f.cleaned_data["file1"])
self.assertEqual(
m.file.name,
"tests/\u6211\u96bb\u6c23\u588a\u8239\u88dd\u6eff\u6652\u9c54.txt",
)
m.delete()
def test_boundary_conditions(self):
# Boundary conditions on a PositiveIntegerField #########################
class BoundaryForm(ModelForm):
class Meta:
model = BoundaryModel
fields = "__all__"
f = BoundaryForm({"positive_integer": 100})
self.assertTrue(f.is_valid())
f = BoundaryForm({"positive_integer": 0})
self.assertTrue(f.is_valid())
f = BoundaryForm({"positive_integer": -100})
self.assertFalse(f.is_valid())
def test_formfield_initial(self):
# If the model has default values for some fields, they are used as the
# formfield initial values.
class DefaultsForm(ModelForm):
class Meta:
model = Defaults
fields = "__all__"
self.assertEqual(DefaultsForm().fields["name"].initial, "class default value")
self.assertEqual(
DefaultsForm().fields["def_date"].initial, datetime.date(1980, 1, 1)
)
self.assertEqual(DefaultsForm().fields["value"].initial, 42)
r1 = DefaultsForm()["callable_default"].as_widget()
r2 = DefaultsForm()["callable_default"].as_widget()
self.assertNotEqual(r1, r2)
# In a ModelForm that is passed an instance, the initial values come from the
# instance's values, not the model's defaults.
foo_instance = Defaults(
name="instance value", def_date=datetime.date(1969, 4, 4), value=12
)
instance_form = DefaultsForm(instance=foo_instance)
self.assertEqual(instance_form.initial["name"], "instance value")
self.assertEqual(instance_form.initial["def_date"], datetime.date(1969, 4, 4))
self.assertEqual(instance_form.initial["value"], 12)
from django.forms import CharField
class ExcludingForm(ModelForm):
name = CharField(max_length=255)
class Meta:
model = Defaults
exclude = ["name", "callable_default"]
f = ExcludingForm(
{"name": "Hello", "value": 99, "def_date": datetime.date(1999, 3, 2)}
)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data["name"], "Hello")
obj = f.save()
self.assertEqual(obj.name, "class default value")
self.assertEqual(obj.value, 99)
self.assertEqual(obj.def_date, datetime.date(1999, 3, 2))
class RelatedModelFormTests(SimpleTestCase):
def test_invalid_loading_order(self):
"""
Test for issue 10405
"""
class A(models.Model):
ref = models.ForeignKey("B", models.CASCADE)
class Meta:
model = A
fields = "__all__"
msg = (
"Cannot create form field for 'ref' yet, because "
"its related model 'B' has not been loaded yet"
)
with self.assertRaisesMessage(ValueError, msg):
ModelFormMetaclass("Form", (ModelForm,), {"Meta": Meta})
class B(models.Model):
pass
def test_valid_loading_order(self):
"""
Test for issue 10405
"""
class C(models.Model):
ref = models.ForeignKey("D", models.CASCADE)
class D(models.Model):
pass
class Meta:
model = C
fields = "__all__"
self.assertTrue(
issubclass(
ModelFormMetaclass("Form", (ModelForm,), {"Meta": Meta}), ModelForm
)
)
class ManyToManyExclusionTestCase(TestCase):
def test_m2m_field_exclusion(self):
# Issue 12337. save_instance should honor the passed-in exclude keyword.
opt1 = ChoiceOptionModel.objects.create(id=1, name="default")
opt2 = ChoiceOptionModel.objects.create(id=2, name="option 2")
opt3 = ChoiceOptionModel.objects.create(id=3, name="option 3")
initial = {
"choice": opt1,
"choice_int": opt1,
}
data = {
"choice": opt2.pk,
"choice_int": opt2.pk,
"multi_choice": "string data!",
"multi_choice_int": [opt1.pk],
}
instance = ChoiceFieldModel.objects.create(**initial)
instance.multi_choice.set([opt2, opt3])
instance.multi_choice_int.set([opt2, opt3])
form = ChoiceFieldExclusionForm(data=data, instance=instance)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data["multi_choice"], data["multi_choice"])
form.save()
self.assertEqual(form.instance.choice.pk, data["choice"])
self.assertEqual(form.instance.choice_int.pk, data["choice_int"])
self.assertEqual(list(form.instance.multi_choice.all()), [opt2, opt3])
self.assertEqual(
[obj.pk for obj in form.instance.multi_choice_int.all()],
data["multi_choice_int"],
)
class EmptyLabelTestCase(TestCase):
def test_empty_field_char(self):
f = EmptyCharLabelChoiceForm()
self.assertHTMLEqual(
f.as_p(),
"""
<p><label for="id_name">Name:</label>
<input id="id_name" maxlength="10" name="name" type="text" required></p>
<p><label for="id_choice">Choice:</label>
<select id="id_choice" name="choice">
<option value="" selected>No Preference</option>
<option value="f">Foo</option>
<option value="b">Bar</option>
</select></p>
""",
)
def test_empty_field_char_none(self):
f = EmptyCharLabelNoneChoiceForm()
self.assertHTMLEqual(
f.as_p(),
"""
<p><label for="id_name">Name:</label>
<input id="id_name" maxlength="10" name="name" type="text" required></p>
<p><label for="id_choice_string_w_none">Choice string w none:</label>
<select id="id_choice_string_w_none" name="choice_string_w_none">
<option value="" selected>No Preference</option>
<option value="f">Foo</option>
<option value="b">Bar</option>
</select></p>
""",
)
def test_save_empty_label_forms(self):
# Saving a form with a blank choice results in the expected
# value being stored in the database.
tests = [
(EmptyCharLabelNoneChoiceForm, "choice_string_w_none", None),
(EmptyIntegerLabelChoiceForm, "choice_integer", None),
(EmptyCharLabelChoiceForm, "choice", ""),
]
for form, key, expected in tests:
with self.subTest(form=form):
f = form({"name": "some-key", key: ""})
self.assertTrue(f.is_valid())
m = f.save()
self.assertEqual(expected, getattr(m, key))
self.assertEqual(
"No Preference", getattr(m, "get_{}_display".format(key))()
)
def test_empty_field_integer(self):
f = EmptyIntegerLabelChoiceForm()
self.assertHTMLEqual(
f.as_p(),
"""
<p><label for="id_name">Name:</label>
<input id="id_name" maxlength="10" name="name" type="text" required></p>
<p><label for="id_choice_integer">Choice integer:</label>
<select id="id_choice_integer" name="choice_integer">
<option value="" selected>No Preference</option>
<option value="1">Foo</option>
<option value="2">Bar</option>
</select></p>
""",
)
def test_get_display_value_on_none(self):
m = ChoiceModel.objects.create(name="test", choice="", choice_integer=None)
self.assertIsNone(m.choice_integer)
self.assertEqual("No Preference", m.get_choice_integer_display())
def test_html_rendering_of_prepopulated_models(self):
none_model = ChoiceModel(name="none-test", choice_integer=None)
f = EmptyIntegerLabelChoiceForm(instance=none_model)
self.assertHTMLEqual(
f.as_p(),
"""
<p><label for="id_name">Name:</label>
<input id="id_name" maxlength="10" name="name" type="text"
value="none-test" required>
</p>
<p><label for="id_choice_integer">Choice integer:</label>
<select id="id_choice_integer" name="choice_integer">
<option value="" selected>No Preference</option>
<option value="1">Foo</option>
<option value="2">Bar</option>
</select></p>
""",
)
foo_model = ChoiceModel(name="foo-test", choice_integer=1)
f = EmptyIntegerLabelChoiceForm(instance=foo_model)
self.assertHTMLEqual(
f.as_p(),
"""
<p><label for="id_name">Name:</label>
<input id="id_name" maxlength="10" name="name" type="text"
value="foo-test" required>
</p>
<p><label for="id_choice_integer">Choice integer:</label>
<select id="id_choice_integer" name="choice_integer">
<option value="">No Preference</option>
<option value="1" selected>Foo</option>
<option value="2">Bar</option>
</select></p>
""",
)
@jinja2_tests
class Jinja2EmptyLabelTestCase(EmptyLabelTestCase):
pass
|
9f177e7312d3d9bed214fc44d814867b8fc010454647f976f4076e13ea0f378b | import copy
import datetime
import json
import uuid
from django.core.exceptions import NON_FIELD_ERRORS
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.validators import MaxValueValidator, RegexValidator
from django.forms import (
BooleanField,
CharField,
CheckboxSelectMultiple,
ChoiceField,
DateField,
DateTimeField,
EmailField,
FileField,
FileInput,
FloatField,
Form,
HiddenInput,
ImageField,
IntegerField,
MultipleChoiceField,
MultipleHiddenInput,
MultiValueField,
MultiWidget,
NullBooleanField,
PasswordInput,
RadioSelect,
Select,
SplitDateTimeField,
SplitHiddenDateTimeWidget,
Textarea,
TextInput,
TimeField,
ValidationError,
forms,
)
from django.forms.renderers import DjangoTemplates, get_default_renderer
from django.forms.utils import ErrorList
from django.http import QueryDict
from django.template import Context, Template
from django.test import SimpleTestCase
from django.test.utils import isolate_lru_cache, override_settings
from django.utils.datastructures import MultiValueDict
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.safestring import mark_safe
from . import jinja2_tests
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[("P", "Python"), ("J", "Java")], widget=RadioSelect)
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class PersonNew(Form):
first_name = CharField(widget=TextInput(attrs={"id": "first_name_id"}))
last_name = CharField()
birthday = DateField()
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(
choices=[("J", "John Lennon"), ("P", "Paul McCartney")],
widget=CheckboxSelectMultiple,
)
class MultiValueDictLike(dict):
def getlist(self, key):
return [self[key]]
class FormsTestCase(SimpleTestCase):
# A Form is a collection of Fields. It knows how to validate a set of data and it
# knows how to render itself in a couple of default ways (e.g., an HTML table).
# You can pass it data in __init__(), as a dictionary.
def test_form(self):
# Pass a dictionary to a Form's __init__().
p = Person(
{"first_name": "John", "last_name": "Lennon", "birthday": "1940-10-9"}
)
self.assertTrue(p.is_bound)
self.assertEqual(p.errors, {})
self.assertIsInstance(p.errors, dict)
self.assertTrue(p.is_valid())
self.assertHTMLEqual(p.errors.as_ul(), "")
self.assertEqual(p.errors.as_text(), "")
self.assertEqual(p.cleaned_data["first_name"], "John")
self.assertEqual(p.cleaned_data["last_name"], "Lennon")
self.assertEqual(p.cleaned_data["birthday"], datetime.date(1940, 10, 9))
self.assertHTMLEqual(
str(p["first_name"]),
'<input type="text" name="first_name" value="John" id="id_first_name" '
"required>",
)
self.assertHTMLEqual(
str(p["last_name"]),
'<input type="text" name="last_name" value="Lennon" id="id_last_name" '
"required>",
)
self.assertHTMLEqual(
str(p["birthday"]),
'<input type="text" name="birthday" value="1940-10-9" id="id_birthday" '
"required>",
)
msg = (
"Key 'nonexistentfield' not found in 'Person'. Choices are: birthday, "
"first_name, last_name."
)
with self.assertRaisesMessage(KeyError, msg):
p["nonexistentfield"]
form_output = []
for boundfield in p:
form_output.append(str(boundfield))
self.assertHTMLEqual(
"\n".join(form_output),
'<input type="text" name="first_name" value="John" id="id_first_name" '
"required>"
'<input type="text" name="last_name" value="Lennon" id="id_last_name" '
"required>"
'<input type="text" name="birthday" value="1940-10-9" id="id_birthday" '
"required>",
)
form_output = []
for boundfield in p:
form_output.append([boundfield.label, boundfield.data])
self.assertEqual(
form_output,
[
["First name", "John"],
["Last name", "Lennon"],
["Birthday", "1940-10-9"],
],
)
self.assertHTMLEqual(
str(p),
'<div><label for="id_first_name">First name:</label><input type="text" '
'name="first_name" value="John" required id="id_first_name"></div><div>'
'<label for="id_last_name">Last name:</label><input type="text" '
'name="last_name" value="Lennon" required id="id_last_name"></div><div>'
'<label for="id_birthday">Birthday:</label><input type="text" '
'name="birthday" value="1940-10-9" required id="id_birthday"></div>',
)
self.assertHTMLEqual(
p.as_div(),
'<div><label for="id_first_name">First name:</label><input type="text" '
'name="first_name" value="John" required id="id_first_name"></div><div>'
'<label for="id_last_name">Last name:</label><input type="text" '
'name="last_name" value="Lennon" required id="id_last_name"></div><div>'
'<label for="id_birthday">Birthday:</label><input type="text" '
'name="birthday" value="1940-10-9" required id="id_birthday"></div>',
)
def test_empty_dict(self):
# Empty dictionaries are valid, too.
p = Person({})
self.assertTrue(p.is_bound)
self.assertEqual(p.errors["first_name"], ["This field is required."])
self.assertEqual(p.errors["last_name"], ["This field is required."])
self.assertEqual(p.errors["birthday"], ["This field is required."])
self.assertFalse(p.is_valid())
self.assertEqual(p.cleaned_data, {})
self.assertHTMLEqual(
str(p),
'<div><label for="id_first_name">First name:</label>'
'<ul class="errorlist"><li>This field is required.</li></ul>'
'<input type="text" name="first_name" required id="id_first_name"></div>'
'<div><label for="id_last_name">Last name:</label>'
'<ul class="errorlist"><li>This field is required.</li></ul>'
'<input type="text" name="last_name" required id="id_last_name"></div><div>'
'<label for="id_birthday">Birthday:</label>'
'<ul class="errorlist"><li>This field is required.</li></ul>'
'<input type="text" name="birthday" required id="id_birthday"></div>',
)
self.assertHTMLEqual(
p.as_table(),
"""<tr><th><label for="id_first_name">First name:</label></th><td>
<ul class="errorlist"><li>This field is required.</li></ul>
<input type="text" name="first_name" id="id_first_name" required></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th>
<td><ul class="errorlist"><li>This field is required.</li></ul>
<input type="text" name="last_name" id="id_last_name" required></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th>
<td><ul class="errorlist"><li>This field is required.</li></ul>
<input type="text" name="birthday" id="id_birthday" required></td></tr>""",
)
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist"><li>This field is required.</li></ul>
<label for="id_first_name">First name:</label>
<input type="text" name="first_name" id="id_first_name" required></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
<label for="id_last_name">Last name:</label>
<input type="text" name="last_name" id="id_last_name" required></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
<label for="id_birthday">Birthday:</label>
<input type="text" name="birthday" id="id_birthday" required></li>""",
)
self.assertHTMLEqual(
p.as_p(),
"""<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_first_name">First name:</label>
<input type="text" name="first_name" id="id_first_name" required></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_last_name">Last name:</label>
<input type="text" name="last_name" id="id_last_name" required></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p><label for="id_birthday">Birthday:</label>
<input type="text" name="birthday" id="id_birthday" required></p>""",
)
self.assertHTMLEqual(
p.as_div(),
'<div><label for="id_first_name">First name:</label>'
'<ul class="errorlist"><li>This field is required.</li></ul>'
'<input type="text" name="first_name" required id="id_first_name"></div>'
'<div><label for="id_last_name">Last name:</label>'
'<ul class="errorlist"><li>This field is required.</li></ul>'
'<input type="text" name="last_name" required id="id_last_name"></div><div>'
'<label for="id_birthday">Birthday:</label>'
'<ul class="errorlist"><li>This field is required.</li></ul>'
'<input type="text" name="birthday" required id="id_birthday"></div>',
)
def test_empty_querydict_args(self):
data = QueryDict()
files = QueryDict()
p = Person(data, files)
self.assertIs(p.data, data)
self.assertIs(p.files, files)
def test_unbound_form(self):
# If you don't pass any values to the Form's __init__(), or if you pass None,
# the Form will be considered unbound and won't do any validation. Form.errors
# will be an empty dictionary *but* Form.is_valid() will return False.
p = Person()
self.assertFalse(p.is_bound)
self.assertEqual(p.errors, {})
self.assertFalse(p.is_valid())
with self.assertRaises(AttributeError):
p.cleaned_data
self.assertHTMLEqual(
str(p),
'<div><label for="id_first_name">First name:</label><input type="text" '
'name="first_name" id="id_first_name" required></div><div><label '
'for="id_last_name">Last name:</label><input type="text" name="last_name" '
'id="id_last_name" required></div><div><label for="id_birthday">'
'Birthday:</label><input type="text" name="birthday" id="id_birthday" '
"required></div>",
)
self.assertHTMLEqual(
p.as_table(),
"""<tr><th><label for="id_first_name">First name:</label></th><td>
<input type="text" name="first_name" id="id_first_name" required></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td>
<input type="text" name="last_name" id="id_last_name" required></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td>
<input type="text" name="birthday" id="id_birthday" required></td></tr>""",
)
self.assertHTMLEqual(
p.as_ul(),
"""<li><label for="id_first_name">First name:</label>
<input type="text" name="first_name" id="id_first_name" required></li>
<li><label for="id_last_name">Last name:</label>
<input type="text" name="last_name" id="id_last_name" required></li>
<li><label for="id_birthday">Birthday:</label>
<input type="text" name="birthday" id="id_birthday" required></li>""",
)
self.assertHTMLEqual(
p.as_p(),
"""<p><label for="id_first_name">First name:</label>
<input type="text" name="first_name" id="id_first_name" required></p>
<p><label for="id_last_name">Last name:</label>
<input type="text" name="last_name" id="id_last_name" required></p>
<p><label for="id_birthday">Birthday:</label>
<input type="text" name="birthday" id="id_birthday" required></p>""",
)
self.assertHTMLEqual(
p.as_div(),
'<div><label for="id_first_name">First name:</label><input type="text" '
'name="first_name" id="id_first_name" required></div><div><label '
'for="id_last_name">Last name:</label><input type="text" name="last_name" '
'id="id_last_name" required></div><div><label for="id_birthday">'
'Birthday:</label><input type="text" name="birthday" id="id_birthday" '
"required></div>",
)
def test_unicode_values(self):
# Unicode values are handled properly.
p = Person(
{
"first_name": "John",
"last_name": "\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111",
"birthday": "1940-10-9",
}
)
self.assertHTMLEqual(
p.as_table(),
'<tr><th><label for="id_first_name">First name:</label></th><td>'
'<input type="text" name="first_name" value="John" id="id_first_name" '
"required></td></tr>\n"
'<tr><th><label for="id_last_name">Last name:</label>'
'</th><td><input type="text" name="last_name" '
'value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111"'
'id="id_last_name" required></td></tr>\n'
'<tr><th><label for="id_birthday">Birthday:</label></th><td>'
'<input type="text" name="birthday" value="1940-10-9" id="id_birthday" '
"required></td></tr>",
)
self.assertHTMLEqual(
p.as_ul(),
'<li><label for="id_first_name">First name:</label> '
'<input type="text" name="first_name" value="John" id="id_first_name" '
"required></li>\n"
'<li><label for="id_last_name">Last name:</label> '
'<input type="text" name="last_name" '
'value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" '
'id="id_last_name" required></li>\n'
'<li><label for="id_birthday">Birthday:</label> '
'<input type="text" name="birthday" value="1940-10-9" id="id_birthday" '
"required></li>",
)
self.assertHTMLEqual(
p.as_p(),
'<p><label for="id_first_name">First name:</label> '
'<input type="text" name="first_name" value="John" id="id_first_name" '
"required></p>\n"
'<p><label for="id_last_name">Last name:</label> '
'<input type="text" name="last_name" '
'value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" '
'id="id_last_name" required></p>\n'
'<p><label for="id_birthday">Birthday:</label> '
'<input type="text" name="birthday" value="1940-10-9" id="id_birthday" '
"required></p>",
)
self.assertHTMLEqual(
p.as_div(),
'<div><label for="id_first_name">First name:</label>'
'<input type="text" name="first_name" value="John" id="id_first_name" '
'required></div><div><label for="id_last_name">Last name:</label>'
'<input type="text" name="last_name"'
'value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" '
'id="id_last_name" required></div><div><label for="id_birthday">'
'Birthday:</label><input type="text" name="birthday" value="1940-10-9" '
'id="id_birthday" required></div>',
)
p = Person({"last_name": "Lennon"})
self.assertEqual(p.errors["first_name"], ["This field is required."])
self.assertEqual(p.errors["birthday"], ["This field is required."])
self.assertFalse(p.is_valid())
self.assertEqual(
p.errors,
{
"birthday": ["This field is required."],
"first_name": ["This field is required."],
},
)
self.assertEqual(p.cleaned_data, {"last_name": "Lennon"})
self.assertEqual(p["first_name"].errors, ["This field is required."])
self.assertHTMLEqual(
p["first_name"].errors.as_ul(),
'<ul class="errorlist"><li>This field is required.</li></ul>',
)
self.assertEqual(p["first_name"].errors.as_text(), "* This field is required.")
p = Person()
self.assertHTMLEqual(
str(p["first_name"]),
'<input type="text" name="first_name" id="id_first_name" required>',
)
self.assertHTMLEqual(
str(p["last_name"]),
'<input type="text" name="last_name" id="id_last_name" required>',
)
self.assertHTMLEqual(
str(p["birthday"]),
'<input type="text" name="birthday" id="id_birthday" required>',
)
def test_cleaned_data_only_fields(self):
# cleaned_data will always *only* contain a key for fields defined in the
# Form, even if you pass extra data when you define the Form. In this
# example, we pass a bunch of extra fields to the form constructor,
# but cleaned_data contains only the form's fields.
data = {
"first_name": "John",
"last_name": "Lennon",
"birthday": "1940-10-9",
"extra1": "hello",
"extra2": "hello",
}
p = Person(data)
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data["first_name"], "John")
self.assertEqual(p.cleaned_data["last_name"], "Lennon")
self.assertEqual(p.cleaned_data["birthday"], datetime.date(1940, 10, 9))
def test_optional_data(self):
# cleaned_data will include a key and value for *all* fields defined in
# the Form, even if the Form's data didn't include a value for fields
# that are not required. In this example, the data dictionary doesn't
# include a value for the "nick_name" field, but cleaned_data includes
# it. For CharFields, it's set to the empty string.
class OptionalPersonForm(Form):
first_name = CharField()
last_name = CharField()
nick_name = CharField(required=False)
data = {"first_name": "John", "last_name": "Lennon"}
f = OptionalPersonForm(data)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data["nick_name"], "")
self.assertEqual(f.cleaned_data["first_name"], "John")
self.assertEqual(f.cleaned_data["last_name"], "Lennon")
# For DateFields, it's set to None.
class OptionalPersonForm(Form):
first_name = CharField()
last_name = CharField()
birth_date = DateField(required=False)
data = {"first_name": "John", "last_name": "Lennon"}
f = OptionalPersonForm(data)
self.assertTrue(f.is_valid())
self.assertIsNone(f.cleaned_data["birth_date"])
self.assertEqual(f.cleaned_data["first_name"], "John")
self.assertEqual(f.cleaned_data["last_name"], "Lennon")
def test_auto_id(self):
# "auto_id" tells the Form to add an "id" attribute to each form
# element. If it's a string that contains '%s', Django will use that as
# a format string into which the field's name will be inserted. It will
# also put a <label> around the human-readable labels for a field.
p = Person(auto_id="%s_id")
self.assertHTMLEqual(
p.as_table(),
"""<tr><th><label for="first_name_id">First name:</label></th><td>
<input type="text" name="first_name" id="first_name_id" required></td></tr>
<tr><th><label for="last_name_id">Last name:</label></th><td>
<input type="text" name="last_name" id="last_name_id" required></td></tr>
<tr><th><label for="birthday_id">Birthday:</label></th><td>
<input type="text" name="birthday" id="birthday_id" required></td></tr>""",
)
self.assertHTMLEqual(
p.as_ul(),
"""<li><label for="first_name_id">First name:</label>
<input type="text" name="first_name" id="first_name_id" required></li>
<li><label for="last_name_id">Last name:</label>
<input type="text" name="last_name" id="last_name_id" required></li>
<li><label for="birthday_id">Birthday:</label>
<input type="text" name="birthday" id="birthday_id" required></li>""",
)
self.assertHTMLEqual(
p.as_p(),
"""<p><label for="first_name_id">First name:</label>
<input type="text" name="first_name" id="first_name_id" required></p>
<p><label for="last_name_id">Last name:</label>
<input type="text" name="last_name" id="last_name_id" required></p>
<p><label for="birthday_id">Birthday:</label>
<input type="text" name="birthday" id="birthday_id" required></p>""",
)
self.assertHTMLEqual(
p.as_div(),
'<div><label for="first_name_id">First name:</label><input type="text" '
'name="first_name" id="first_name_id" required></div><div><label '
'for="last_name_id">Last name:</label><input type="text" '
'name="last_name" id="last_name_id" required></div><div><label '
'for="birthday_id">Birthday:</label><input type="text" name="birthday" '
'id="birthday_id" required></div>',
)
def test_auto_id_true(self):
# If auto_id is any True value whose str() does not contain '%s', the "id"
# attribute will be the name of the field.
p = Person(auto_id=True)
self.assertHTMLEqual(
p.as_ul(),
"""<li><label for="first_name">First name:</label>
<input type="text" name="first_name" id="first_name" required></li>
<li><label for="last_name">Last name:</label>
<input type="text" name="last_name" id="last_name" required></li>
<li><label for="birthday">Birthday:</label>
<input type="text" name="birthday" id="birthday" required></li>""",
)
def test_auto_id_false(self):
# If auto_id is any False value, an "id" attribute won't be output unless it
# was manually entered.
p = Person(auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li>First name: <input type="text" name="first_name" required></li>
<li>Last name: <input type="text" name="last_name" required></li>
<li>Birthday: <input type="text" name="birthday" required></li>""",
)
def test_id_on_field(self):
# In this example, auto_id is False, but the "id" attribute for the "first_name"
# field is given. Also note that field gets a <label>, while the others don't.
p = PersonNew(auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li><label for="first_name_id">First name:</label>
<input type="text" id="first_name_id" name="first_name" required></li>
<li>Last name: <input type="text" name="last_name" required></li>
<li>Birthday: <input type="text" name="birthday" required></li>""",
)
def test_auto_id_on_form_and_field(self):
# If the "id" attribute is specified in the Form and auto_id is True, the "id"
# attribute in the Form gets precedence.
p = PersonNew(auto_id=True)
self.assertHTMLEqual(
p.as_ul(),
"""<li><label for="first_name_id">First name:</label>
<input type="text" id="first_name_id" name="first_name" required></li>
<li><label for="last_name">Last name:</label>
<input type="text" name="last_name" id="last_name" required></li>
<li><label for="birthday">Birthday:</label>
<input type="text" name="birthday" id="birthday" required></li>""",
)
def test_various_boolean_values(self):
class SignupForm(Form):
email = EmailField()
get_spam = BooleanField()
f = SignupForm(auto_id=False)
self.assertHTMLEqual(
str(f["email"]), '<input type="email" name="email" required>'
)
self.assertHTMLEqual(
str(f["get_spam"]), '<input type="checkbox" name="get_spam" required>'
)
f = SignupForm({"email": "[email protected]", "get_spam": True}, auto_id=False)
self.assertHTMLEqual(
str(f["email"]),
'<input type="email" name="email" value="[email protected]" required>',
)
self.assertHTMLEqual(
str(f["get_spam"]),
'<input checked type="checkbox" name="get_spam" required>',
)
# 'True' or 'true' should be rendered without a value attribute
f = SignupForm({"email": "[email protected]", "get_spam": "True"}, auto_id=False)
self.assertHTMLEqual(
str(f["get_spam"]),
'<input checked type="checkbox" name="get_spam" required>',
)
f = SignupForm({"email": "[email protected]", "get_spam": "true"}, auto_id=False)
self.assertHTMLEqual(
str(f["get_spam"]),
'<input checked type="checkbox" name="get_spam" required>',
)
# A value of 'False' or 'false' should be rendered unchecked
f = SignupForm(
{"email": "[email protected]", "get_spam": "False"}, auto_id=False
)
self.assertHTMLEqual(
str(f["get_spam"]), '<input type="checkbox" name="get_spam" required>'
)
f = SignupForm(
{"email": "[email protected]", "get_spam": "false"}, auto_id=False
)
self.assertHTMLEqual(
str(f["get_spam"]), '<input type="checkbox" name="get_spam" required>'
)
# A value of '0' should be interpreted as a True value (#16820)
f = SignupForm({"email": "[email protected]", "get_spam": "0"})
self.assertTrue(f.is_valid())
self.assertTrue(f.cleaned_data.get("get_spam"))
def test_widget_output(self):
# Any Field can have a Widget class passed to its constructor:
class ContactForm(Form):
subject = CharField()
message = CharField(widget=Textarea)
f = ContactForm(auto_id=False)
self.assertHTMLEqual(
str(f["subject"]), '<input type="text" name="subject" required>'
)
self.assertHTMLEqual(
str(f["message"]),
'<textarea name="message" rows="10" cols="40" required></textarea>',
)
# as_textarea(), as_text() and as_hidden() are shortcuts for changing the output
# widget type:
self.assertHTMLEqual(
f["subject"].as_textarea(),
'<textarea name="subject" rows="10" cols="40" required></textarea>',
)
self.assertHTMLEqual(
f["message"].as_text(), '<input type="text" name="message" required>'
)
self.assertHTMLEqual(
f["message"].as_hidden(), '<input type="hidden" name="message">'
)
# The 'widget' parameter to a Field can also be an instance:
class ContactForm(Form):
subject = CharField()
message = CharField(widget=Textarea(attrs={"rows": 80, "cols": 20}))
f = ContactForm(auto_id=False)
self.assertHTMLEqual(
str(f["message"]),
'<textarea name="message" rows="80" cols="20" required></textarea>',
)
# Instance-level attrs are *not* carried over to as_textarea(), as_text() and
# as_hidden():
self.assertHTMLEqual(
f["message"].as_text(), '<input type="text" name="message" required>'
)
f = ContactForm({"subject": "Hello", "message": "I love you."}, auto_id=False)
self.assertHTMLEqual(
f["subject"].as_textarea(),
'<textarea rows="10" cols="40" name="subject" required>Hello</textarea>',
)
self.assertHTMLEqual(
f["message"].as_text(),
'<input type="text" name="message" value="I love you." required>',
)
self.assertHTMLEqual(
f["message"].as_hidden(),
'<input type="hidden" name="message" value="I love you.">',
)
def test_forms_with_choices(self):
# For a form with a <select>, use ChoiceField:
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(choices=[("P", "Python"), ("J", "Java")])
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(
str(f["language"]),
"""<select name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""",
)
f = FrameworkForm({"name": "Django", "language": "P"}, auto_id=False)
self.assertHTMLEqual(
str(f["language"]),
"""<select name="language">
<option value="P" selected>Python</option>
<option value="J">Java</option>
</select>""",
)
# A subtlety: If one of the choices' value is the empty string and the form is
# unbound, then the <option> for the empty-string choice will get selected.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(
choices=[("", "------"), ("P", "Python"), ("J", "Java")]
)
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(
str(f["language"]),
"""<select name="language" required>
<option value="" selected>------</option>
<option value="P">Python</option>
<option value="J">Java</option>
</select>""",
)
# You can specify widget attributes in the Widget constructor.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(
choices=[("P", "Python"), ("J", "Java")],
widget=Select(attrs={"class": "foo"}),
)
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(
str(f["language"]),
"""<select class="foo" name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""",
)
f = FrameworkForm({"name": "Django", "language": "P"}, auto_id=False)
self.assertHTMLEqual(
str(f["language"]),
"""<select class="foo" name="language">
<option value="P" selected>Python</option>
<option value="J">Java</option>
</select>""",
)
# When passing a custom widget instance to ChoiceField, note that setting
# 'choices' on the widget is meaningless. The widget will use the choices
# defined on the Field, not the ones defined on the Widget.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField(
choices=[("P", "Python"), ("J", "Java")],
widget=Select(
choices=[("R", "Ruby"), ("P", "Perl")], attrs={"class": "foo"}
),
)
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(
str(f["language"]),
"""<select class="foo" name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""",
)
f = FrameworkForm({"name": "Django", "language": "P"}, auto_id=False)
self.assertHTMLEqual(
str(f["language"]),
"""<select class="foo" name="language">
<option value="P" selected>Python</option>
<option value="J">Java</option>
</select>""",
)
# You can set a ChoiceField's choices after the fact.
class FrameworkForm(Form):
name = CharField()
language = ChoiceField()
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(
str(f["language"]),
"""<select name="language">
</select>""",
)
f.fields["language"].choices = [("P", "Python"), ("J", "Java")]
self.assertHTMLEqual(
str(f["language"]),
"""<select name="language">
<option value="P">Python</option>
<option value="J">Java</option>
</select>""",
)
def test_forms_with_radio(self):
# Add widget=RadioSelect to use that widget with a ChoiceField.
f = FrameworkForm(auto_id=False)
self.assertHTMLEqual(
str(f["language"]),
"""<div>
<div><label><input type="radio" name="language" value="P" required> Python</label></div>
<div><label><input type="radio" name="language" value="J" required> Java</label></div>
</div>""",
)
self.assertHTMLEqual(
f.as_table(),
"""<tr><th>Name:</th><td><input type="text" name="name" required></td></tr>
<tr><th>Language:</th><td><div>
<div><label><input type="radio" name="language" value="P" required> Python</label></div>
<div><label><input type="radio" name="language" value="J" required> Java</label></div>
</div></td></tr>""",
)
self.assertHTMLEqual(
f.as_ul(),
"""<li>Name: <input type="text" name="name" required></li>
<li>Language: <div>
<div><label><input type="radio" name="language" value="P" required> Python</label></div>
<div><label><input type="radio" name="language" value="J" required> Java</label></div>
</div></li>""",
)
# Need an auto_id to generate legend.
self.assertHTMLEqual(
f.render(f.template_name_div),
'<div> Name: <input type="text" name="name" required></div><div><fieldset>'
'Language:<div><div><label><input type="radio" name="language" value="P" '
'required> Python</label></div><div><label><input type="radio" '
'name="language" value="J" required> Java</label></div></div></fieldset>'
"</div>",
)
# Regarding auto_id and <label>, RadioSelect is a special case. Each
# radio button gets a distinct ID, formed by appending an underscore
# plus the button's zero-based index.
f = FrameworkForm(auto_id="id_%s")
self.assertHTMLEqual(
str(f["language"]),
"""
<div id="id_language">
<div><label for="id_language_0">
<input type="radio" id="id_language_0" value="P" name="language" required>
Python</label></div>
<div><label for="id_language_1">
<input type="radio" id="id_language_1" value="J" name="language" required>
Java</label></div>
</div>""",
)
# When RadioSelect is used with auto_id, and the whole form is printed
# using either as_table() or as_ul(), the label for the RadioSelect
# will **not** point to the ID of the *first* radio button to improve
# accessibility for screen reader users.
self.assertHTMLEqual(
f.as_table(),
"""
<tr><th><label for="id_name">Name:</label></th><td>
<input type="text" name="name" id="id_name" required></td></tr>
<tr><th><label>Language:</label></th><td><div id="id_language">
<div><label for="id_language_0">
<input type="radio" id="id_language_0" value="P" name="language" required>
Python</label></div>
<div><label for="id_language_1">
<input type="radio" id="id_language_1" value="J" name="language" required>
Java</label></div>
</div></td></tr>""",
)
self.assertHTMLEqual(
f.as_ul(),
"""
<li><label for="id_name">Name:</label>
<input type="text" name="name" id="id_name" required></li>
<li><label>Language:</label> <div id="id_language">
<div><label for="id_language_0">
<input type="radio" id="id_language_0" value="P" name="language" required>
Python</label></div>
<div><label for="id_language_1">
<input type="radio" id="id_language_1" value="J" name="language" required>
Java</label></div>
</div></li>
""",
)
self.assertHTMLEqual(
f.as_p(),
"""
<p><label for="id_name">Name:</label>
<input type="text" name="name" id="id_name" required></p>
<p><label>Language:</label> <div id="id_language">
<div><label for="id_language_0">
<input type="radio" id="id_language_0" value="P" name="language" required>
Python</label></div>
<div><label for="id_language_1">
<input type="radio" id="id_language_1" value="J" name="language" required>
Java</label></div>
</div></p>
""",
)
self.assertHTMLEqual(
f.render(f.template_name_div),
'<div><label for="id_name">Name:</label><input type="text" name="name" '
'required id="id_name"></div><div><fieldset><legend>Language:</legend>'
'<div id="id_language"><div><label for="id_language_0"><input '
'type="radio" name="language" value="P" required id="id_language_0">'
'Python</label></div><div><label for="id_language_1"><input type="radio" '
'name="language" value="J" required id="id_language_1">Java</label></div>'
"</div></fieldset></div>",
)
def test_form_with_iterable_boundfield(self):
class BeatleForm(Form):
name = ChoiceField(
choices=[
("john", "John"),
("paul", "Paul"),
("george", "George"),
("ringo", "Ringo"),
],
widget=RadioSelect,
)
f = BeatleForm(auto_id=False)
self.assertHTMLEqual(
"\n".join(str(bf) for bf in f["name"]),
'<label><input type="radio" name="name" value="john" required> John</label>'
'<label><input type="radio" name="name" value="paul" required> Paul</label>'
'<label><input type="radio" name="name" value="george" required> George'
"</label>"
'<label><input type="radio" name="name" value="ringo" required> Ringo'
"</label>",
)
self.assertHTMLEqual(
"\n".join("<div>%s</div>" % bf for bf in f["name"]),
"""
<div><label>
<input type="radio" name="name" value="john" required> John</label></div>
<div><label>
<input type="radio" name="name" value="paul" required> Paul</label></div>
<div><label>
<input type="radio" name="name" value="george" required> George
</label></div>
<div><label>
<input type="radio" name="name" value="ringo" required> Ringo</label></div>
""",
)
def test_form_with_iterable_boundfield_id(self):
class BeatleForm(Form):
name = ChoiceField(
choices=[
("john", "John"),
("paul", "Paul"),
("george", "George"),
("ringo", "Ringo"),
],
widget=RadioSelect,
)
fields = list(BeatleForm()["name"])
self.assertEqual(len(fields), 4)
self.assertEqual(fields[0].id_for_label, "id_name_0")
self.assertEqual(fields[0].choice_label, "John")
self.assertHTMLEqual(
fields[0].tag(),
'<input type="radio" name="name" value="john" id="id_name_0" required>',
)
self.assertHTMLEqual(
str(fields[0]),
'<label for="id_name_0"><input type="radio" name="name" '
'value="john" id="id_name_0" required> John</label>',
)
self.assertEqual(fields[1].id_for_label, "id_name_1")
self.assertEqual(fields[1].choice_label, "Paul")
self.assertHTMLEqual(
fields[1].tag(),
'<input type="radio" name="name" value="paul" id="id_name_1" required>',
)
self.assertHTMLEqual(
str(fields[1]),
'<label for="id_name_1"><input type="radio" name="name" '
'value="paul" id="id_name_1" required> Paul</label>',
)
def test_iterable_boundfield_select(self):
class BeatleForm(Form):
name = ChoiceField(
choices=[
("john", "John"),
("paul", "Paul"),
("george", "George"),
("ringo", "Ringo"),
]
)
fields = list(BeatleForm(auto_id=False)["name"])
self.assertEqual(len(fields), 4)
self.assertIsNone(fields[0].id_for_label)
self.assertEqual(fields[0].choice_label, "John")
self.assertHTMLEqual(fields[0].tag(), '<option value="john">John</option>')
self.assertHTMLEqual(str(fields[0]), '<option value="john">John</option>')
def test_form_with_noniterable_boundfield(self):
# You can iterate over any BoundField, not just those with widget=RadioSelect.
class BeatleForm(Form):
name = CharField()
f = BeatleForm(auto_id=False)
self.assertHTMLEqual(
"\n".join(str(bf) for bf in f["name"]),
'<input type="text" name="name" required>',
)
def test_boundfield_slice(self):
class BeatleForm(Form):
name = ChoiceField(
choices=[
("john", "John"),
("paul", "Paul"),
("george", "George"),
("ringo", "Ringo"),
],
widget=RadioSelect,
)
f = BeatleForm()
bf = f["name"]
self.assertEqual(
[str(item) for item in bf[1:]],
[str(bf[1]), str(bf[2]), str(bf[3])],
)
def test_boundfield_invalid_index(self):
class TestForm(Form):
name = ChoiceField(choices=[])
field = TestForm()["name"]
msg = "BoundField indices must be integers or slices, not str."
with self.assertRaisesMessage(TypeError, msg):
field["foo"]
def test_boundfield_bool(self):
"""BoundField without any choices (subwidgets) evaluates to True."""
class TestForm(Form):
name = ChoiceField(choices=[])
self.assertIs(bool(TestForm()["name"]), True)
def test_forms_with_multiple_choice(self):
# MultipleChoiceField is a special case, as its data is required to be a list:
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField()
f = SongForm(auto_id=False)
self.assertHTMLEqual(
str(f["composers"]),
"""<select multiple name="composers" required>
</select>""",
)
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(
choices=[("J", "John Lennon"), ("P", "Paul McCartney")]
)
f = SongForm(auto_id=False)
self.assertHTMLEqual(
str(f["composers"]),
"""<select multiple name="composers" required>
<option value="J">John Lennon</option>
<option value="P">Paul McCartney</option>
</select>""",
)
f = SongForm({"name": "Yesterday", "composers": ["P"]}, auto_id=False)
self.assertHTMLEqual(
str(f["name"]), '<input type="text" name="name" value="Yesterday" required>'
)
self.assertHTMLEqual(
str(f["composers"]),
"""<select multiple name="composers" required>
<option value="J">John Lennon</option>
<option value="P" selected>Paul McCartney</option>
</select>""",
)
f = SongForm()
self.assertHTMLEqual(
f.as_table(),
'<tr><th><label for="id_name">Name:</label></th>'
'<td><input type="text" name="name" required id="id_name"></td>'
'</tr><tr><th><label for="id_composers">Composers:</label></th>'
'<td><select name="composers" required id="id_composers" multiple>'
'<option value="J">John Lennon</option>'
'<option value="P">Paul McCartney</option>'
"</select></td></tr>",
)
self.assertHTMLEqual(
f.as_ul(),
'<li><label for="id_name">Name:</label>'
'<input type="text" name="name" required id="id_name"></li>'
'<li><label for="id_composers">Composers:</label>'
'<select name="composers" required id="id_composers" multiple>'
'<option value="J">John Lennon</option>'
'<option value="P">Paul McCartney</option>'
"</select></li>",
)
self.assertHTMLEqual(
f.as_p(),
'<p><label for="id_name">Name:</label>'
'<input type="text" name="name" required id="id_name"></p>'
'<p><label for="id_composers">Composers:</label>'
'<select name="composers" required id="id_composers" multiple>'
'<option value="J">John Lennon</option>'
'<option value="P">Paul McCartney</option>'
"</select></p>",
)
self.assertHTMLEqual(
f.render(f.template_name_div),
'<div><label for="id_name">Name:</label><input type="text" name="name" '
'required id="id_name"></div><div><label for="id_composers">Composers:'
'</label><select name="composers" required id="id_composers" multiple>'
'<option value="J">John Lennon</option><option value="P">Paul McCartney'
"</option></select></div>",
)
def test_multiple_checkbox_render(self):
f = SongForm()
self.assertHTMLEqual(
f.as_table(),
'<tr><th><label for="id_name">Name:</label></th><td>'
'<input type="text" name="name" required id="id_name"></td></tr>'
'<tr><th><label>Composers:</label></th><td><div id="id_composers">'
'<div><label for="id_composers_0">'
'<input type="checkbox" name="composers" value="J" '
'id="id_composers_0">John Lennon</label></div>'
'<div><label for="id_composers_1">'
'<input type="checkbox" name="composers" value="P" '
'id="id_composers_1">Paul McCartney</label></div>'
"</div></td></tr>",
)
self.assertHTMLEqual(
f.as_ul(),
'<li><label for="id_name">Name:</label>'
'<input type="text" name="name" required id="id_name"></li>'
'<li><label>Composers:</label><div id="id_composers">'
'<div><label for="id_composers_0">'
'<input type="checkbox" name="composers" value="J" '
'id="id_composers_0">John Lennon</label></div>'
'<div><label for="id_composers_1">'
'<input type="checkbox" name="composers" value="P" '
'id="id_composers_1">Paul McCartney</label></div>'
"</div></li>",
)
self.assertHTMLEqual(
f.as_p(),
'<p><label for="id_name">Name:</label>'
'<input type="text" name="name" required id="id_name"></p>'
'<p><label>Composers:</label><div id="id_composers">'
'<div><label for="id_composers_0">'
'<input type="checkbox" name="composers" value="J" '
'id="id_composers_0">John Lennon</label></div>'
'<div><label for="id_composers_1">'
'<input type="checkbox" name="composers" value="P" '
'id="id_composers_1">Paul McCartney</label></div>'
"</div></p>",
)
self.assertHTMLEqual(
f.render(f.template_name_div),
'<div><label for="id_name">Name:</label><input type="text" name="name" '
'required id="id_name"></div><div><fieldset><legend>Composers:</legend>'
'<div id="id_composers"><div><label for="id_composers_0"><input '
'type="checkbox" name="composers" value="J" id="id_composers_0">'
'John Lennon</label></div><div><label for="id_composers_1"><input '
'type="checkbox" name="composers" value="P" id="id_composers_1">'
"Paul McCartney</label></div></div></fieldset></div>",
)
def test_form_with_disabled_fields(self):
class PersonForm(Form):
name = CharField()
birthday = DateField(disabled=True)
class PersonFormFieldInitial(Form):
name = CharField()
birthday = DateField(disabled=True, initial=datetime.date(1974, 8, 16))
# Disabled fields are generally not transmitted by user agents.
# The value from the form's initial data is used.
f1 = PersonForm(
{"name": "John Doe"}, initial={"birthday": datetime.date(1974, 8, 16)}
)
f2 = PersonFormFieldInitial({"name": "John Doe"})
for form in (f1, f2):
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data,
{"birthday": datetime.date(1974, 8, 16), "name": "John Doe"},
)
# Values provided in the form's data are ignored.
data = {"name": "John Doe", "birthday": "1984-11-10"}
f1 = PersonForm(data, initial={"birthday": datetime.date(1974, 8, 16)})
f2 = PersonFormFieldInitial(data)
for form in (f1, f2):
self.assertTrue(form.is_valid())
self.assertEqual(
form.cleaned_data,
{"birthday": datetime.date(1974, 8, 16), "name": "John Doe"},
)
# Initial data remains present on invalid forms.
data = {}
f1 = PersonForm(data, initial={"birthday": datetime.date(1974, 8, 16)})
f2 = PersonFormFieldInitial(data)
for form in (f1, f2):
self.assertFalse(form.is_valid())
self.assertEqual(form["birthday"].value(), datetime.date(1974, 8, 16))
def test_hidden_data(self):
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(
choices=[("J", "John Lennon"), ("P", "Paul McCartney")]
)
# MultipleChoiceField rendered as_hidden() is a special case. Because it can
# have multiple values, its as_hidden() renders multiple <input type="hidden">
# tags.
f = SongForm({"name": "Yesterday", "composers": ["P"]}, auto_id=False)
self.assertHTMLEqual(
f["composers"].as_hidden(),
'<input type="hidden" name="composers" value="P">',
)
f = SongForm({"name": "From Me To You", "composers": ["P", "J"]}, auto_id=False)
self.assertHTMLEqual(
f["composers"].as_hidden(),
"""<input type="hidden" name="composers" value="P">
<input type="hidden" name="composers" value="J">""",
)
# DateTimeField rendered as_hidden() is special too
class MessageForm(Form):
when = SplitDateTimeField()
f = MessageForm({"when_0": "1992-01-01", "when_1": "01:01"})
self.assertTrue(f.is_valid())
self.assertHTMLEqual(
str(f["when"]),
'<input type="text" name="when_0" value="1992-01-01" id="id_when_0" '
"required>"
'<input type="text" name="when_1" value="01:01" id="id_when_1" required>',
)
self.assertHTMLEqual(
f["when"].as_hidden(),
'<input type="hidden" name="when_0" value="1992-01-01" id="id_when_0">'
'<input type="hidden" name="when_1" value="01:01" id="id_when_1">',
)
def test_multiple_choice_checkbox(self):
# MultipleChoiceField can also be used with the CheckboxSelectMultiple widget.
f = SongForm(auto_id=False)
self.assertHTMLEqual(
str(f["composers"]),
"""
<div>
<div><label><input type="checkbox" name="composers" value="J">
John Lennon</label></div>
<div><label><input type="checkbox" name="composers" value="P">
Paul McCartney</label></div>
</div>
""",
)
f = SongForm({"composers": ["J"]}, auto_id=False)
self.assertHTMLEqual(
str(f["composers"]),
"""
<div>
<div><label><input checked type="checkbox" name="composers" value="J">
John Lennon</label></div>
<div><label><input type="checkbox" name="composers" value="P">
Paul McCartney</label></div>
</div>
""",
)
f = SongForm({"composers": ["J", "P"]}, auto_id=False)
self.assertHTMLEqual(
str(f["composers"]),
"""
<div>
<div><label><input checked type="checkbox" name="composers" value="J">
John Lennon</label></div>
<div><label><input checked type="checkbox" name="composers" value="P">
Paul McCartney</label></div>
</div>
""",
)
def test_checkbox_auto_id(self):
# Regarding auto_id, CheckboxSelectMultiple is a special case. Each checkbox
# gets a distinct ID, formed by appending an underscore plus the checkbox's
# zero-based index.
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(
choices=[("J", "John Lennon"), ("P", "Paul McCartney")],
widget=CheckboxSelectMultiple,
)
f = SongForm(auto_id="%s_id")
self.assertHTMLEqual(
str(f["composers"]),
"""
<div id="composers_id">
<div><label for="composers_id_0">
<input type="checkbox" name="composers" value="J" id="composers_id_0">
John Lennon</label></div>
<div><label for="composers_id_1">
<input type="checkbox" name="composers" value="P" id="composers_id_1">
Paul McCartney</label></div>
</div>
""",
)
def test_multiple_choice_list_data(self):
# Data for a MultipleChoiceField should be a list. QueryDict and
# MultiValueDict conveniently work with this.
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(
choices=[("J", "John Lennon"), ("P", "Paul McCartney")],
widget=CheckboxSelectMultiple,
)
data = {"name": "Yesterday", "composers": ["J", "P"]}
f = SongForm(data)
self.assertEqual(f.errors, {})
data = QueryDict("name=Yesterday&composers=J&composers=P")
f = SongForm(data)
self.assertEqual(f.errors, {})
data = MultiValueDict({"name": ["Yesterday"], "composers": ["J", "P"]})
f = SongForm(data)
self.assertEqual(f.errors, {})
# SelectMultiple uses ducktyping so that MultiValueDictLike.getlist()
# is called.
f = SongForm(MultiValueDictLike({"name": "Yesterday", "composers": "J"}))
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data["composers"], ["J"])
def test_multiple_hidden(self):
class SongForm(Form):
name = CharField()
composers = MultipleChoiceField(
choices=[("J", "John Lennon"), ("P", "Paul McCartney")],
widget=CheckboxSelectMultiple,
)
# The MultipleHiddenInput widget renders multiple values as hidden fields.
class SongFormHidden(Form):
name = CharField()
composers = MultipleChoiceField(
choices=[("J", "John Lennon"), ("P", "Paul McCartney")],
widget=MultipleHiddenInput,
)
f = SongFormHidden(
MultiValueDict({"name": ["Yesterday"], "composers": ["J", "P"]}),
auto_id=False,
)
self.assertHTMLEqual(
f.as_ul(),
"""<li>Name: <input type="text" name="name" value="Yesterday" required>
<input type="hidden" name="composers" value="J">
<input type="hidden" name="composers" value="P"></li>""",
)
# When using CheckboxSelectMultiple, the framework expects a list of input and
# returns a list of input.
f = SongForm({"name": "Yesterday"}, auto_id=False)
self.assertEqual(f.errors["composers"], ["This field is required."])
f = SongForm({"name": "Yesterday", "composers": ["J"]}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data["composers"], ["J"])
self.assertEqual(f.cleaned_data["name"], "Yesterday")
f = SongForm({"name": "Yesterday", "composers": ["J", "P"]}, auto_id=False)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data["composers"], ["J", "P"])
self.assertEqual(f.cleaned_data["name"], "Yesterday")
# MultipleHiddenInput uses ducktyping so that
# MultiValueDictLike.getlist() is called.
f = SongForm(MultiValueDictLike({"name": "Yesterday", "composers": "J"}))
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data["composers"], ["J"])
def test_escaping(self):
# Validation errors are HTML-escaped when output as HTML.
class EscapingForm(Form):
special_name = CharField(label="<em>Special</em> Field")
special_safe_name = CharField(label=mark_safe("<em>Special</em> Field"))
def clean_special_name(self):
raise ValidationError(
"Something's wrong with '%s'" % self.cleaned_data["special_name"]
)
def clean_special_safe_name(self):
raise ValidationError(
mark_safe(
"'<b>%s</b>' is a safe string"
% self.cleaned_data["special_safe_name"]
)
)
f = EscapingForm(
{
"special_name": "Nothing to escape",
"special_safe_name": "Nothing to escape",
},
auto_id=False,
)
self.assertHTMLEqual(
f.as_table(),
"""
<tr><th><em>Special</em> Field:</th><td>
<ul class="errorlist">
<li>Something's wrong with 'Nothing to escape'</li></ul>
<input type="text" name="special_name" value="Nothing to escape" required>
</td></tr>
<tr><th><em>Special</em> Field:</th><td>
<ul class="errorlist">
<li>'<b>Nothing to escape</b>' is a safe string</li></ul>
<input type="text" name="special_safe_name" value="Nothing to escape"
required></td></tr>
""",
)
f = EscapingForm(
{
"special_name": "Should escape < & > and <script>alert('xss')</script>",
"special_safe_name": "<i>Do not escape</i>",
},
auto_id=False,
)
self.assertHTMLEqual(
f.as_table(),
"<tr><th><em>Special</em> Field:</th><td>"
'<ul class="errorlist"><li>'
"Something's wrong with 'Should escape < & > and "
"<script>alert('xss')</script>'</li></ul>"
'<input type="text" name="special_name" value="Should escape < & '
'> and <script>alert('xss')</script>" required>'
"</td></tr>"
"<tr><th><em>Special</em> Field:</th><td>"
'<ul class="errorlist">'
"<li>'<b><i>Do not escape</i></b>' is a safe string</li></ul>"
'<input type="text" name="special_safe_name" '
'value="<i>Do not escape</i>" required></td></tr>',
)
def test_validating_multiple_fields(self):
# There are a couple of ways to do multiple-field validation. If you
# want the validation message to be associated with a particular field,
# implement the clean_XXX() method on the Form, where XXX is the field
# name. As in Field.clean(), the clean_XXX() method should return the
# cleaned value. In the clean_XXX() method, you have access to
# self.cleaned_data, which is a dictionary of all the data that has
# been cleaned *so far*, in order by the fields, including the current
# field (e.g., the field XXX if you're in clean_XXX()).
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean_password2(self):
if (
self.cleaned_data.get("password1")
and self.cleaned_data.get("password2")
and self.cleaned_data["password1"] != self.cleaned_data["password2"]
):
raise ValidationError("Please make sure your passwords match.")
return self.cleaned_data["password2"]
f = UserRegistration(auto_id=False)
self.assertEqual(f.errors, {})
f = UserRegistration({}, auto_id=False)
self.assertEqual(f.errors["username"], ["This field is required."])
self.assertEqual(f.errors["password1"], ["This field is required."])
self.assertEqual(f.errors["password2"], ["This field is required."])
f = UserRegistration(
{"username": "adrian", "password1": "foo", "password2": "bar"},
auto_id=False,
)
self.assertEqual(
f.errors["password2"], ["Please make sure your passwords match."]
)
f = UserRegistration(
{"username": "adrian", "password1": "foo", "password2": "foo"},
auto_id=False,
)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data["username"], "adrian")
self.assertEqual(f.cleaned_data["password1"], "foo")
self.assertEqual(f.cleaned_data["password2"], "foo")
# Another way of doing multiple-field validation is by implementing the
# Form's clean() method. Usually ValidationError raised by that method
# will not be associated with a particular field and will have a
# special-case association with the field named '__all__'. It's
# possible to associate the errors to particular field with the
# Form.add_error() method or by passing a dictionary that maps each
# field to one or more errors.
#
# Note that in Form.clean(), you have access to self.cleaned_data, a
# dictionary of all the fields/values that have *not* raised a
# ValidationError. Also note Form.clean() is required to return a
# dictionary of all clean data.
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
# Test raising a ValidationError as NON_FIELD_ERRORS.
if (
self.cleaned_data.get("password1")
and self.cleaned_data.get("password2")
and self.cleaned_data["password1"] != self.cleaned_data["password2"]
):
raise ValidationError("Please make sure your passwords match.")
# Test raising ValidationError that targets multiple fields.
errors = {}
if self.cleaned_data.get("password1") == "FORBIDDEN_VALUE":
errors["password1"] = "Forbidden value."
if self.cleaned_data.get("password2") == "FORBIDDEN_VALUE":
errors["password2"] = ["Forbidden value."]
if errors:
raise ValidationError(errors)
# Test Form.add_error()
if self.cleaned_data.get("password1") == "FORBIDDEN_VALUE2":
self.add_error(None, "Non-field error 1.")
self.add_error("password1", "Forbidden value 2.")
if self.cleaned_data.get("password2") == "FORBIDDEN_VALUE2":
self.add_error("password2", "Forbidden value 2.")
raise ValidationError("Non-field error 2.")
return self.cleaned_data
f = UserRegistration(auto_id=False)
self.assertEqual(f.errors, {})
f = UserRegistration({}, auto_id=False)
self.assertHTMLEqual(
f.as_table(),
"""<tr><th>Username:</th><td>
<ul class="errorlist"><li>This field is required.</li></ul>
<input type="text" name="username" maxlength="10" required></td></tr>
<tr><th>Password1:</th><td><ul class="errorlist"><li>This field is required.</li></ul>
<input type="password" name="password1" required></td></tr>
<tr><th>Password2:</th><td><ul class="errorlist"><li>This field is required.</li></ul>
<input type="password" name="password2" required></td></tr>""",
)
self.assertEqual(f.errors["username"], ["This field is required."])
self.assertEqual(f.errors["password1"], ["This field is required."])
self.assertEqual(f.errors["password2"], ["This field is required."])
f = UserRegistration(
{"username": "adrian", "password1": "foo", "password2": "bar"},
auto_id=False,
)
self.assertEqual(
f.errors["__all__"], ["Please make sure your passwords match."]
)
self.assertHTMLEqual(
f.as_table(),
"""
<tr><td colspan="2">
<ul class="errorlist nonfield">
<li>Please make sure your passwords match.</li></ul></td></tr>
<tr><th>Username:</th><td>
<input type="text" name="username" value="adrian" maxlength="10" required>
</td></tr>
<tr><th>Password1:</th><td>
<input type="password" name="password1" required></td></tr>
<tr><th>Password2:</th><td>
<input type="password" name="password2" required></td></tr>
""",
)
self.assertHTMLEqual(
f.as_ul(),
"""
<li><ul class="errorlist nonfield">
<li>Please make sure your passwords match.</li></ul></li>
<li>Username:
<input type="text" name="username" value="adrian" maxlength="10" required>
</li>
<li>Password1: <input type="password" name="password1" required></li>
<li>Password2: <input type="password" name="password2" required></li>
""",
)
self.assertHTMLEqual(
f.render(f.template_name_div),
'<ul class="errorlist nonfield"><li>Please make sure your passwords match.'
'</li></ul><div>Username: <input type="text" name="username" '
'value="adrian" maxlength="10" required></div><div>Password1: <input '
'type="password" name="password1" required></div><div>Password2: <input '
'type="password" name="password2" required></div>',
)
f = UserRegistration(
{"username": "adrian", "password1": "foo", "password2": "foo"},
auto_id=False,
)
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data["username"], "adrian")
self.assertEqual(f.cleaned_data["password1"], "foo")
self.assertEqual(f.cleaned_data["password2"], "foo")
f = UserRegistration(
{
"username": "adrian",
"password1": "FORBIDDEN_VALUE",
"password2": "FORBIDDEN_VALUE",
},
auto_id=False,
)
self.assertEqual(f.errors["password1"], ["Forbidden value."])
self.assertEqual(f.errors["password2"], ["Forbidden value."])
f = UserRegistration(
{
"username": "adrian",
"password1": "FORBIDDEN_VALUE2",
"password2": "FORBIDDEN_VALUE2",
},
auto_id=False,
)
self.assertEqual(
f.errors["__all__"], ["Non-field error 1.", "Non-field error 2."]
)
self.assertEqual(f.errors["password1"], ["Forbidden value 2."])
self.assertEqual(f.errors["password2"], ["Forbidden value 2."])
with self.assertRaisesMessage(ValueError, "has no field named"):
f.add_error("missing_field", "Some error.")
def test_update_error_dict(self):
class CodeForm(Form):
code = CharField(max_length=10)
def clean(self):
try:
raise ValidationError({"code": [ValidationError("Code error 1.")]})
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
try:
raise ValidationError({"code": [ValidationError("Code error 2.")]})
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
try:
raise ValidationError({"code": forms.ErrorList(["Code error 3."])})
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
try:
raise ValidationError("Non-field error 1.")
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
try:
raise ValidationError([ValidationError("Non-field error 2.")])
except ValidationError as e:
self._errors = e.update_error_dict(self._errors)
# The newly added list of errors is an instance of ErrorList.
for field, error_list in self._errors.items():
if not isinstance(error_list, self.error_class):
self._errors[field] = self.error_class(error_list)
form = CodeForm({"code": "hello"})
# Trigger validation.
self.assertFalse(form.is_valid())
# update_error_dict didn't lose track of the ErrorDict type.
self.assertIsInstance(form._errors, forms.ErrorDict)
self.assertEqual(
dict(form.errors),
{
"code": ["Code error 1.", "Code error 2.", "Code error 3."],
NON_FIELD_ERRORS: ["Non-field error 1.", "Non-field error 2."],
},
)
def test_has_error(self):
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput, min_length=5)
password2 = CharField(widget=PasswordInput)
def clean(self):
if (
self.cleaned_data.get("password1")
and self.cleaned_data.get("password2")
and self.cleaned_data["password1"] != self.cleaned_data["password2"]
):
raise ValidationError(
"Please make sure your passwords match.",
code="password_mismatch",
)
f = UserRegistration(data={})
self.assertTrue(f.has_error("password1"))
self.assertTrue(f.has_error("password1", "required"))
self.assertFalse(f.has_error("password1", "anything"))
f = UserRegistration(data={"password1": "Hi", "password2": "Hi"})
self.assertTrue(f.has_error("password1"))
self.assertTrue(f.has_error("password1", "min_length"))
self.assertFalse(f.has_error("password1", "anything"))
self.assertFalse(f.has_error("password2"))
self.assertFalse(f.has_error("password2", "anything"))
f = UserRegistration(data={"password1": "Bonjour", "password2": "Hello"})
self.assertFalse(f.has_error("password1"))
self.assertFalse(f.has_error("password1", "required"))
self.assertTrue(f.has_error(NON_FIELD_ERRORS))
self.assertTrue(f.has_error(NON_FIELD_ERRORS, "password_mismatch"))
self.assertFalse(f.has_error(NON_FIELD_ERRORS, "anything"))
def test_html_output_with_hidden_input_field_errors(self):
class TestForm(Form):
hidden_input = CharField(widget=HiddenInput)
def clean(self):
self.add_error(None, "Form error")
f = TestForm(data={})
error_dict = {
"hidden_input": ["This field is required."],
"__all__": ["Form error"],
}
self.assertEqual(f.errors, error_dict)
f.as_table()
self.assertEqual(f.errors, error_dict)
self.assertHTMLEqual(
f.as_table(),
'<tr><td colspan="2"><ul class="errorlist nonfield"><li>Form error</li>'
"<li>(Hidden field hidden_input) This field is required.</li></ul>"
'<input type="hidden" name="hidden_input" id="id_hidden_input"></td></tr>',
)
self.assertHTMLEqual(
f.as_ul(),
'<li><ul class="errorlist nonfield"><li>Form error</li>'
"<li>(Hidden field hidden_input) This field is required.</li></ul>"
'<input type="hidden" name="hidden_input" id="id_hidden_input"></li>',
)
self.assertHTMLEqual(
f.as_p(),
'<ul class="errorlist nonfield"><li>Form error</li>'
"<li>(Hidden field hidden_input) This field is required.</li></ul>"
'<p><input type="hidden" name="hidden_input" id="id_hidden_input"></p>',
)
self.assertHTMLEqual(
f.render(f.template_name_div),
'<ul class="errorlist nonfield"><li>Form error</li>'
"<li>(Hidden field hidden_input) This field is required.</li></ul>"
'<div><input type="hidden" name="hidden_input" id="id_hidden_input"></div>',
)
def test_dynamic_construction(self):
# It's possible to construct a Form dynamically by adding to the self.fields
# dictionary in __init__(). Don't forget to call Form.__init__() within the
# subclass' __init__().
class Person(Form):
first_name = CharField()
last_name = CharField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["birthday"] = DateField()
p = Person(auto_id=False)
self.assertHTMLEqual(
p.as_table(),
"""
<tr><th>First name:</th><td>
<input type="text" name="first_name" required></td></tr>
<tr><th>Last name:</th><td>
<input type="text" name="last_name" required></td></tr>
<tr><th>Birthday:</th><td>
<input type="text" name="birthday" required></td></tr>
""",
)
# Instances of a dynamic Form do not persist fields from one Form instance to
# the next.
class MyForm(Form):
def __init__(self, data=None, auto_id=False, field_list=[]):
Form.__init__(self, data, auto_id=auto_id)
for field in field_list:
self.fields[field[0]] = field[1]
field_list = [("field1", CharField()), ("field2", CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(
my_form.as_table(),
"""
<tr><th>Field1:</th><td><input type="text" name="field1" required></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" required></td></tr>
""",
)
field_list = [("field3", CharField()), ("field4", CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(
my_form.as_table(),
"""
<tr><th>Field3:</th><td><input type="text" name="field3" required></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" required></td></tr>
""",
)
class MyForm(Form):
default_field_1 = CharField()
default_field_2 = CharField()
def __init__(self, data=None, auto_id=False, field_list=[]):
Form.__init__(self, data, auto_id=auto_id)
for field in field_list:
self.fields[field[0]] = field[1]
field_list = [("field1", CharField()), ("field2", CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(
my_form.as_table(),
"""
<tr><th>Default field 1:</th><td>
<input type="text" name="default_field_1" required></td></tr>
<tr><th>Default field 2:</th><td>
<input type="text" name="default_field_2" required></td></tr>
<tr><th>Field1:</th><td><input type="text" name="field1" required></td></tr>
<tr><th>Field2:</th><td><input type="text" name="field2" required></td></tr>
""",
)
field_list = [("field3", CharField()), ("field4", CharField())]
my_form = MyForm(field_list=field_list)
self.assertHTMLEqual(
my_form.as_table(),
"""
<tr><th>Default field 1:</th><td>
<input type="text" name="default_field_1" required></td></tr>
<tr><th>Default field 2:</th><td>
<input type="text" name="default_field_2" required></td></tr>
<tr><th>Field3:</th><td><input type="text" name="field3" required></td></tr>
<tr><th>Field4:</th><td><input type="text" name="field4" required></td></tr>
""",
)
# Similarly, changes to field attributes do not persist from one Form instance
# to the next.
class Person(Form):
first_name = CharField(required=False)
last_name = CharField(required=False)
def __init__(self, names_required=False, *args, **kwargs):
super().__init__(*args, **kwargs)
if names_required:
self.fields["first_name"].required = True
self.fields["first_name"].widget.attrs["class"] = "required"
self.fields["last_name"].required = True
self.fields["last_name"].widget.attrs["class"] = "required"
f = Person(names_required=False)
self.assertEqual(
f["first_name"].field.required,
f["last_name"].field.required,
(False, False),
)
self.assertEqual(
f["first_name"].field.widget.attrs,
f["last_name"].field.widget.attrs,
({}, {}),
)
f = Person(names_required=True)
self.assertEqual(
f["first_name"].field.required, f["last_name"].field.required, (True, True)
)
self.assertEqual(
f["first_name"].field.widget.attrs,
f["last_name"].field.widget.attrs,
({"class": "reuired"}, {"class": "required"}),
)
f = Person(names_required=False)
self.assertEqual(
f["first_name"].field.required,
f["last_name"].field.required,
(False, False),
)
self.assertEqual(
f["first_name"].field.widget.attrs,
f["last_name"].field.widget.attrs,
({}, {}),
)
class Person(Form):
first_name = CharField(max_length=30)
last_name = CharField(max_length=30)
def __init__(self, name_max_length=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if name_max_length:
self.fields["first_name"].max_length = name_max_length
self.fields["last_name"].max_length = name_max_length
f = Person(name_max_length=None)
self.assertEqual(
f["first_name"].field.max_length, f["last_name"].field.max_length, (30, 30)
)
f = Person(name_max_length=20)
self.assertEqual(
f["first_name"].field.max_length, f["last_name"].field.max_length, (20, 20)
)
f = Person(name_max_length=None)
self.assertEqual(
f["first_name"].field.max_length, f["last_name"].field.max_length, (30, 30)
)
# Similarly, choices do not persist from one Form instance to the next.
# Refs #15127.
class Person(Form):
first_name = CharField(required=False)
last_name = CharField(required=False)
gender = ChoiceField(choices=(("f", "Female"), ("m", "Male")))
def __init__(self, allow_unspec_gender=False, *args, **kwargs):
super().__init__(*args, **kwargs)
if allow_unspec_gender:
self.fields["gender"].choices += (("u", "Unspecified"),)
f = Person()
self.assertEqual(f["gender"].field.choices, [("f", "Female"), ("m", "Male")])
f = Person(allow_unspec_gender=True)
self.assertEqual(
f["gender"].field.choices,
[("f", "Female"), ("m", "Male"), ("u", "Unspecified")],
)
f = Person()
self.assertEqual(f["gender"].field.choices, [("f", "Female"), ("m", "Male")])
def test_validators_independence(self):
"""
The list of form field validators can be modified without polluting
other forms.
"""
class MyForm(Form):
myfield = CharField(max_length=25)
f1 = MyForm()
f2 = MyForm()
f1.fields["myfield"].validators[0] = MaxValueValidator(12)
self.assertNotEqual(
f1.fields["myfield"].validators[0], f2.fields["myfield"].validators[0]
)
def test_hidden_widget(self):
# HiddenInput widgets are displayed differently in the as_table(), as_ul())
# and as_p() output of a Form -- their verbose names are not displayed, and a
# separate row is not displayed. They're displayed in the last row of the
# form, directly after that row's form element.
class Person(Form):
first_name = CharField()
last_name = CharField()
hidden_text = CharField(widget=HiddenInput)
birthday = DateField()
p = Person(auto_id=False)
self.assertHTMLEqual(
p.as_table(),
"""
<tr><th>First name:</th><td><input type="text" name="first_name" required>
</td></tr>
<tr><th>Last name:</th><td><input type="text" name="last_name" required>
</td></tr>
<tr><th>Birthday:</th>
<td><input type="text" name="birthday" required>
<input type="hidden" name="hidden_text"></td></tr>
""",
)
self.assertHTMLEqual(
p.as_ul(),
"""
<li>First name: <input type="text" name="first_name" required></li>
<li>Last name: <input type="text" name="last_name" required></li>
<li>Birthday: <input type="text" name="birthday" required>
<input type="hidden" name="hidden_text"></li>
""",
)
self.assertHTMLEqual(
p.as_p(),
"""
<p>First name: <input type="text" name="first_name" required></p>
<p>Last name: <input type="text" name="last_name" required></p>
<p>Birthday: <input type="text" name="birthday" required>
<input type="hidden" name="hidden_text"></p>
""",
)
self.assertHTMLEqual(
p.as_div(),
'<div>First name: <input type="text" name="first_name" required></div>'
'<div>Last name: <input type="text" name="last_name" required></div><div>'
'Birthday: <input type="text" name="birthday" required><input '
'type="hidden" name="hidden_text"></div>',
)
# With auto_id set, a HiddenInput still gets an ID, but it doesn't get a label.
p = Person(auto_id="id_%s")
self.assertHTMLEqual(
p.as_table(),
"""<tr><th><label for="id_first_name">First name:</label></th><td>
<input type="text" name="first_name" id="id_first_name" required></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td>
<input type="text" name="last_name" id="id_last_name" required></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th><td>
<input type="text" name="birthday" id="id_birthday" required>
<input type="hidden" name="hidden_text" id="id_hidden_text"></td></tr>""",
)
self.assertHTMLEqual(
p.as_ul(),
"""<li><label for="id_first_name">First name:</label>
<input type="text" name="first_name" id="id_first_name" required></li>
<li><label for="id_last_name">Last name:</label>
<input type="text" name="last_name" id="id_last_name" required></li>
<li><label for="id_birthday">Birthday:</label>
<input type="text" name="birthday" id="id_birthday" required>
<input type="hidden" name="hidden_text" id="id_hidden_text"></li>""",
)
self.assertHTMLEqual(
p.as_p(),
"""<p><label for="id_first_name">First name:</label>
<input type="text" name="first_name" id="id_first_name" required></p>
<p><label for="id_last_name">Last name:</label>
<input type="text" name="last_name" id="id_last_name" required></p>
<p><label for="id_birthday">Birthday:</label>
<input type="text" name="birthday" id="id_birthday" required>
<input type="hidden" name="hidden_text" id="id_hidden_text"></p>""",
)
self.assertHTMLEqual(
p.as_div(),
'<div><label for="id_first_name">First name:</label><input type="text" '
'name="first_name" id="id_first_name" required></div><div><label '
'for="id_last_name">Last name:</label><input type="text" name="last_name" '
'id="id_last_name" required></div><div><label for="id_birthday">Birthday:'
'</label><input type="text" name="birthday" id="id_birthday" required>'
'<input type="hidden" name="hidden_text" id="id_hidden_text"></div>',
)
# If a field with a HiddenInput has errors, the as_table() and as_ul() output
# will include the error message(s) with the text "(Hidden field [fieldname]) "
# prepended. This message is displayed at the top of the output, regardless of
# its field's order in the form.
p = Person(
{"first_name": "John", "last_name": "Lennon", "birthday": "1940-10-9"},
auto_id=False,
)
self.assertHTMLEqual(
p.as_table(),
"""
<tr><td colspan="2">
<ul class="errorlist nonfield"><li>
(Hidden field hidden_text) This field is required.</li></ul></td></tr>
<tr><th>First name:</th><td>
<input type="text" name="first_name" value="John" required></td></tr>
<tr><th>Last name:</th><td>
<input type="text" name="last_name" value="Lennon" required></td></tr>
<tr><th>Birthday:</th><td>
<input type="text" name="birthday" value="1940-10-9" required>
<input type="hidden" name="hidden_text"></td></tr>
""",
)
self.assertHTMLEqual(
p.as_ul(),
"""
<li><ul class="errorlist nonfield"><li>
(Hidden field hidden_text) This field is required.</li></ul></li>
<li>First name: <input type="text" name="first_name" value="John" required>
</li>
<li>Last name: <input type="text" name="last_name" value="Lennon" required>
</li>
<li>Birthday: <input type="text" name="birthday" value="1940-10-9" required>
<input type="hidden" name="hidden_text"></li>
""",
)
self.assertHTMLEqual(
p.as_p(),
"""
<ul class="errorlist nonfield"><li>
(Hidden field hidden_text) This field is required.</li></ul>
<p>First name: <input type="text" name="first_name" value="John" required>
</p>
<p>Last name: <input type="text" name="last_name" value="Lennon" required>
</p>
<p>Birthday: <input type="text" name="birthday" value="1940-10-9" required>
<input type="hidden" name="hidden_text"></p>
""",
)
self.assertHTMLEqual(
p.as_div(),
'<ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field '
'is required.</li></ul><div>First name: <input type="text" '
'name="first_name" value="John" required></div><div>Last name: <input '
'type="text" name="last_name" value="Lennon" required></div><div>'
'Birthday: <input type="text" name="birthday" value="1940-10-9" required>'
'<input type="hidden" name="hidden_text"></div>',
)
# A corner case: It's possible for a form to have only HiddenInputs.
class TestForm(Form):
foo = CharField(widget=HiddenInput)
bar = CharField(widget=HiddenInput)
p = TestForm(auto_id=False)
self.assertHTMLEqual(
p.as_table(),
'<input type="hidden" name="foo"><input type="hidden" name="bar">',
)
self.assertHTMLEqual(
p.as_ul(),
'<input type="hidden" name="foo"><input type="hidden" name="bar">',
)
self.assertHTMLEqual(
p.as_p(), '<input type="hidden" name="foo"><input type="hidden" name="bar">'
)
def test_field_order(self):
# A Form's fields are displayed in the same order in which they were defined.
class TestForm(Form):
field1 = CharField()
field2 = CharField()
field3 = CharField()
field4 = CharField()
field5 = CharField()
field6 = CharField()
field7 = CharField()
field8 = CharField()
field9 = CharField()
field10 = CharField()
field11 = CharField()
field12 = CharField()
field13 = CharField()
field14 = CharField()
p = TestForm(auto_id=False)
self.assertHTMLEqual(
p.as_table(),
"".join(
f"<tr><th>Field{i}:</th><td>"
f'<input type="text" name="field{i}" required></td></tr>'
for i in range(1, 15)
),
)
def test_explicit_field_order(self):
class TestFormParent(Form):
field1 = CharField()
field2 = CharField()
field4 = CharField()
field5 = CharField()
field6 = CharField()
field_order = ["field6", "field5", "field4", "field2", "field1"]
class TestForm(TestFormParent):
field3 = CharField()
field_order = ["field2", "field4", "field3", "field5", "field6"]
class TestFormRemove(TestForm):
field1 = None
class TestFormMissing(TestForm):
field_order = ["field2", "field4", "field3", "field5", "field6", "field1"]
field1 = None
class TestFormInit(TestFormParent):
field3 = CharField()
field_order = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.order_fields(field_order=TestForm.field_order)
p = TestFormParent()
self.assertEqual(list(p.fields), TestFormParent.field_order)
p = TestFormRemove()
self.assertEqual(list(p.fields), TestForm.field_order)
p = TestFormMissing()
self.assertEqual(list(p.fields), TestForm.field_order)
p = TestForm()
self.assertEqual(list(p.fields), TestFormMissing.field_order)
p = TestFormInit()
order = [*TestForm.field_order, "field1"]
self.assertEqual(list(p.fields), order)
TestForm.field_order = ["unknown"]
p = TestForm()
self.assertEqual(
list(p.fields), ["field1", "field2", "field4", "field5", "field6", "field3"]
)
def test_form_html_attributes(self):
# Some Field classes have an effect on the HTML attributes of their associated
# Widget. If you set max_length in a CharField and its associated widget is
# either a TextInput or PasswordInput, then the widget's rendered HTML will
# include the "maxlength" attribute.
class UserRegistration(Form):
username = CharField(max_length=10) # uses TextInput by default
password = CharField(max_length=10, widget=PasswordInput)
realname = CharField(
max_length=10, widget=TextInput
) # redundantly define widget, just to test
address = CharField() # no max_length defined here
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""
<li>Username: <input type="text" name="username" maxlength="10" required>
</li>
<li>Password: <input type="password" name="password" maxlength="10"
required></li>
<li>Realname: <input type="text" name="realname" maxlength="10" required>
</li>
<li>Address: <input type="text" name="address" required></li>
""",
)
# If you specify a custom "attrs" that includes the "maxlength"
# attribute, the Field's max_length attribute will override whatever
# "maxlength" you specify in "attrs".
class UserRegistration(Form):
username = CharField(
max_length=10, widget=TextInput(attrs={"maxlength": 20})
)
password = CharField(max_length=10, widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
'<li>Username: <input type="text" name="username" maxlength="10" required>'
"</li>"
'<li>Password: <input type="password" name="password" maxlength="10" '
"required></li>",
)
def test_specifying_labels(self):
# You can specify the label for a field by using the 'label' argument to a Field
# class. If you don't specify 'label', Django will use the field name with
# underscores converted to spaces, and the initial letter capitalized.
class UserRegistration(Form):
username = CharField(max_length=10, label="Your username")
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput, label="Contraseña (de nuevo)")
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""
<li>Your username:
<input type="text" name="username" maxlength="10" required></li>
<li>Password1: <input type="password" name="password1" required></li>
<li>Contraseña (de nuevo):
<input type="password" name="password2" required></li>
""",
)
# Labels for as_* methods will only end in a colon if they don't end in other
# punctuation already.
class Questions(Form):
q1 = CharField(label="The first question")
q2 = CharField(label="What is your name?")
q3 = CharField(label="The answer to life is:")
q4 = CharField(label="Answer this question!")
q5 = CharField(label="The last question. Period.")
self.assertHTMLEqual(
Questions(auto_id=False).as_p(),
"""<p>The first question: <input type="text" name="q1" required></p>
<p>What is your name? <input type="text" name="q2" required></p>
<p>The answer to life is: <input type="text" name="q3" required></p>
<p>Answer this question! <input type="text" name="q4" required></p>
<p>The last question. Period. <input type="text" name="q5" required></p>""",
)
self.assertHTMLEqual(
Questions().as_p(),
"""
<p><label for="id_q1">The first question:</label>
<input type="text" name="q1" id="id_q1" required></p>
<p><label for="id_q2">What is your name?</label>
<input type="text" name="q2" id="id_q2" required></p>
<p><label for="id_q3">The answer to life is:</label>
<input type="text" name="q3" id="id_q3" required></p>
<p><label for="id_q4">Answer this question!</label>
<input type="text" name="q4" id="id_q4" required></p>
<p><label for="id_q5">The last question. Period.</label>
<input type="text" name="q5" id="id_q5" required></p>
""",
)
# If a label is set to the empty string for a field, that field won't
# get a label.
class UserRegistration(Form):
username = CharField(max_length=10, label="")
password = CharField(widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li> <input type="text" name="username" maxlength="10" required></li>
<li>Password: <input type="password" name="password" required></li>""",
)
p = UserRegistration(auto_id="id_%s")
self.assertHTMLEqual(
p.as_ul(),
"""
<li>
<input id="id_username" type="text" name="username" maxlength="10" required>
</li>
<li><label for="id_password">Password:</label>
<input type="password" name="password" id="id_password" required></li>
""",
)
# If label is None, Django will auto-create the label from the field name. This
# is default behavior.
class UserRegistration(Form):
username = CharField(max_length=10, label=None)
password = CharField(widget=PasswordInput)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
'<li>Username: <input type="text" name="username" maxlength="10" required>'
"</li>"
'<li>Password: <input type="password" name="password" required></li>',
)
p = UserRegistration(auto_id="id_%s")
self.assertHTMLEqual(
p.as_ul(),
"""<li><label for="id_username">Username:</label>
<input id="id_username" type="text" name="username" maxlength="10" required></li>
<li><label for="id_password">Password:</label>
<input type="password" name="password" id="id_password" required></li>""",
)
def test_label_suffix(self):
# You can specify the 'label_suffix' argument to a Form class to modify
# the punctuation symbol used at the end of a label. By default, the
# colon (:) is used, and is only appended to the label if the label
# doesn't already end with a punctuation symbol: ., !, ? or :. If you
# specify a different suffix, it will be appended regardless of the
# last character of the label.
class FavoriteForm(Form):
color = CharField(label="Favorite color?")
animal = CharField(label="Favorite animal")
answer = CharField(label="Secret answer", label_suffix=" =")
f = FavoriteForm(auto_id=False)
self.assertHTMLEqual(
f.as_ul(),
"""<li>Favorite color? <input type="text" name="color" required></li>
<li>Favorite animal: <input type="text" name="animal" required></li>
<li>Secret answer = <input type="text" name="answer" required></li>""",
)
f = FavoriteForm(auto_id=False, label_suffix="?")
self.assertHTMLEqual(
f.as_ul(),
"""<li>Favorite color? <input type="text" name="color" required></li>
<li>Favorite animal? <input type="text" name="animal" required></li>
<li>Secret answer = <input type="text" name="answer" required></li>""",
)
f = FavoriteForm(auto_id=False, label_suffix="")
self.assertHTMLEqual(
f.as_ul(),
"""<li>Favorite color? <input type="text" name="color" required></li>
<li>Favorite animal <input type="text" name="animal" required></li>
<li>Secret answer = <input type="text" name="answer" required></li>""",
)
f = FavoriteForm(auto_id=False, label_suffix="\u2192")
self.assertHTMLEqual(
f.as_ul(),
'<li>Favorite color? <input type="text" name="color" required></li>\n'
"<li>Favorite animal\u2192 "
'<input type="text" name="animal" required></li>\n'
'<li>Secret answer = <input type="text" name="answer" required></li>',
)
def test_initial_data(self):
# You can specify initial data for a field by using the 'initial' argument to a
# Field class. This initial data is displayed when a Form is rendered with *no*
# data. It is not displayed when a Form is rendered with any data (including an
# empty dictionary). Also, the initial value is *not* used if data for a
# particular required field isn't provided.
class UserRegistration(Form):
username = CharField(max_length=10, initial="django")
password = CharField(widget=PasswordInput)
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""
<li>Username: <input type="text" name="username" value="django"
maxlength="10" required></li>
<li>Password: <input type="password" name="password" required></li>
""",
)
# Here, we're submitting data, so the initial value will *not* be displayed.
p = UserRegistration({}, auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist"><li>This field is required.</li></ul>
Username: <input type="text" name="username" maxlength="10" required></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
Password: <input type="password" name="password" required></li>""",
)
p = UserRegistration({"username": ""}, auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist"><li>This field is required.</li></ul>
Username: <input type="text" name="username" maxlength="10" required></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
Password: <input type="password" name="password" required></li>""",
)
p = UserRegistration({"username": "foo"}, auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""
<li>Username: <input type="text" name="username" value="foo" maxlength="10"
required></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
Password: <input type="password" name="password" required></li>
""",
)
# An 'initial' value is *not* used as a fallback if data is not
# provided. In this example, we don't provide a value for 'username',
# and the form raises a validation error rather than using the initial
# value for 'username'.
p = UserRegistration({"password": "secret"})
self.assertEqual(p.errors["username"], ["This field is required."])
self.assertFalse(p.is_valid())
def test_dynamic_initial_data(self):
# The previous technique dealt with "hard-coded" initial data, but it's also
# possible to specify initial data after you've already created the Form class
# (i.e., at runtime). Use the 'initial' parameter to the Form constructor. This
# should be a dictionary containing initial values for one or more fields in the
# form, keyed by field name.
class UserRegistration(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(initial={"username": "django"}, auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""
<li>Username: <input type="text" name="username" value="django"
maxlength="10" required></li>
<li>Password: <input type="password" name="password" required></li>
""",
)
p = UserRegistration(initial={"username": "stephane"}, auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""
<li>Username: <input type="text" name="username" value="stephane"
maxlength="10" required></li>
<li>Password: <input type="password" name="password" required></li>
""",
)
# The 'initial' parameter is meaningless if you pass data.
p = UserRegistration({}, initial={"username": "django"}, auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist"><li>This field is required.</li></ul>
Username: <input type="text" name="username" maxlength="10" required></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
Password: <input type="password" name="password" required></li>""",
)
p = UserRegistration(
{"username": ""}, initial={"username": "django"}, auto_id=False
)
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist"><li>This field is required.</li></ul>
Username: <input type="text" name="username" maxlength="10" required></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
Password: <input type="password" name="password" required></li>""",
)
p = UserRegistration(
{"username": "foo"}, initial={"username": "django"}, auto_id=False
)
self.assertHTMLEqual(
p.as_ul(),
"""
<li>Username: <input type="text" name="username" value="foo" maxlength="10"
required></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
Password: <input type="password" name="password" required></li>
""",
)
# A dynamic 'initial' value is *not* used as a fallback if data is not provided.
# In this example, we don't provide a value for 'username', and the
# form raises a validation error rather than using the initial value
# for 'username'.
p = UserRegistration({"password": "secret"}, initial={"username": "django"})
self.assertEqual(p.errors["username"], ["This field is required."])
self.assertFalse(p.is_valid())
# If a Form defines 'initial' *and* 'initial' is passed as a parameter
# to Form(), then the latter will get precedence.
class UserRegistration(Form):
username = CharField(max_length=10, initial="django")
password = CharField(widget=PasswordInput)
p = UserRegistration(initial={"username": "babik"}, auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""
<li>Username: <input type="text" name="username" value="babik"
maxlength="10" required></li>
<li>Password: <input type="password" name="password" required></li>
""",
)
def test_callable_initial_data(self):
# The previous technique dealt with raw values as initial data, but it's also
# possible to specify callable data.
class UserRegistration(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
options = MultipleChoiceField(
choices=[("f", "foo"), ("b", "bar"), ("w", "whiz")]
)
# We need to define functions that get called later.)
def initial_django():
return "django"
def initial_stephane():
return "stephane"
def initial_options():
return ["f", "b"]
def initial_other_options():
return ["b", "w"]
# Here, we're not submitting any data, so the initial value will be displayed.)
p = UserRegistration(
initial={"username": initial_django, "options": initial_options},
auto_id=False,
)
self.assertHTMLEqual(
p.as_ul(),
"""
<li>Username: <input type="text" name="username" value="django"
maxlength="10" required></li>
<li>Password: <input type="password" name="password" required></li>
<li>Options: <select multiple name="options" required>
<option value="f" selected>foo</option>
<option value="b" selected>bar</option>
<option value="w">whiz</option>
</select></li>
""",
)
# The 'initial' parameter is meaningless if you pass data.
p = UserRegistration(
{},
initial={"username": initial_django, "options": initial_options},
auto_id=False,
)
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist"><li>This field is required.</li></ul>
Username: <input type="text" name="username" maxlength="10" required></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
Password: <input type="password" name="password" required></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
Options: <select multiple name="options" required>
<option value="f">foo</option>
<option value="b">bar</option>
<option value="w">whiz</option>
</select></li>""",
)
p = UserRegistration(
{"username": ""}, initial={"username": initial_django}, auto_id=False
)
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist"><li>This field is required.</li></ul>
Username: <input type="text" name="username" maxlength="10" required></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
Password: <input type="password" name="password" required></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
Options: <select multiple name="options" required>
<option value="f">foo</option>
<option value="b">bar</option>
<option value="w">whiz</option>
</select></li>""",
)
p = UserRegistration(
{"username": "foo", "options": ["f", "b"]},
initial={"username": initial_django},
auto_id=False,
)
self.assertHTMLEqual(
p.as_ul(),
"""
<li>Username: <input type="text" name="username" value="foo" maxlength="10"
required></li>
<li><ul class="errorlist"><li>This field is required.</li></ul>
Password: <input type="password" name="password" required></li>
<li>Options: <select multiple name="options" required>
<option value="f" selected>foo</option>
<option value="b" selected>bar</option>
<option value="w">whiz</option>
</select></li>
""",
)
# A callable 'initial' value is *not* used as a fallback if data is not
# provided. In this example, we don't provide a value for 'username',
# and the form raises a validation error rather than using the initial
# value for 'username'.
p = UserRegistration(
{"password": "secret"},
initial={"username": initial_django, "options": initial_options},
)
self.assertEqual(p.errors["username"], ["This field is required."])
self.assertFalse(p.is_valid())
# If a Form defines 'initial' *and* 'initial' is passed as a parameter
# to Form(), then the latter will get precedence.
class UserRegistration(Form):
username = CharField(max_length=10, initial=initial_django)
password = CharField(widget=PasswordInput)
options = MultipleChoiceField(
choices=[("f", "foo"), ("b", "bar"), ("w", "whiz")],
initial=initial_other_options,
)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""
<li>Username: <input type="text" name="username" value="django"
maxlength="10" required></li>
<li>Password: <input type="password" name="password" required></li>
<li>Options: <select multiple name="options" required>
<option value="f">foo</option>
<option value="b" selected>bar</option>
<option value="w" selected>whiz</option>
</select></li>
""",
)
p = UserRegistration(
initial={"username": initial_stephane, "options": initial_options},
auto_id=False,
)
self.assertHTMLEqual(
p.as_ul(),
"""
<li>Username: <input type="text" name="username" value="stephane"
maxlength="10" required></li>
<li>Password: <input type="password" name="password" required></li>
<li>Options: <select multiple name="options" required>
<option value="f" selected>foo</option>
<option value="b" selected>bar</option>
<option value="w">whiz</option>
</select></li>
""",
)
def test_get_initial_for_field(self):
now = datetime.datetime(2006, 10, 25, 14, 30, 45, 123456)
class PersonForm(Form):
first_name = CharField(initial="John")
last_name = CharField(initial="Doe")
age = IntegerField()
occupation = CharField(initial=lambda: "Unknown")
dt_fixed = DateTimeField(initial=now)
dt_callable = DateTimeField(initial=lambda: now)
form = PersonForm(initial={"first_name": "Jane"})
cases = [
("age", None),
("last_name", "Doe"),
# Form.initial overrides Field.initial.
("first_name", "Jane"),
# Callables are evaluated.
("occupation", "Unknown"),
# Microseconds are removed from datetimes.
("dt_fixed", datetime.datetime(2006, 10, 25, 14, 30, 45)),
("dt_callable", datetime.datetime(2006, 10, 25, 14, 30, 45)),
]
for field_name, expected in cases:
with self.subTest(field_name=field_name):
field = form.fields[field_name]
actual = form.get_initial_for_field(field, field_name)
self.assertEqual(actual, expected)
def test_changed_data(self):
class Person(Form):
first_name = CharField(initial="Hans")
last_name = CharField(initial="Greatel")
birthday = DateField(initial=datetime.date(1974, 8, 16))
p = Person(
data={"first_name": "Hans", "last_name": "Scrmbl", "birthday": "1974-08-16"}
)
self.assertTrue(p.is_valid())
self.assertNotIn("first_name", p.changed_data)
self.assertIn("last_name", p.changed_data)
self.assertNotIn("birthday", p.changed_data)
# A field raising ValidationError is always in changed_data
class PedanticField(forms.Field):
def to_python(self, value):
raise ValidationError("Whatever")
class Person2(Person):
pedantic = PedanticField(initial="whatever", show_hidden_initial=True)
p = Person2(
data={
"first_name": "Hans",
"last_name": "Scrmbl",
"birthday": "1974-08-16",
"initial-pedantic": "whatever",
}
)
self.assertFalse(p.is_valid())
self.assertIn("pedantic", p.changed_data)
def test_boundfield_values(self):
# It's possible to get to the value which would be used for rendering
# the widget for a field by using the BoundField's value method.
class UserRegistration(Form):
username = CharField(max_length=10, initial="djangonaut")
password = CharField(widget=PasswordInput)
unbound = UserRegistration()
bound = UserRegistration({"password": "foo"})
self.assertIsNone(bound["username"].value())
self.assertEqual(unbound["username"].value(), "djangonaut")
self.assertEqual(bound["password"].value(), "foo")
self.assertIsNone(unbound["password"].value())
def test_boundfield_initial_called_once(self):
"""
Multiple calls to BoundField().value() in an unbound form should return
the same result each time (#24391).
"""
class MyForm(Form):
name = CharField(max_length=10, initial=uuid.uuid4)
form = MyForm()
name = form["name"]
self.assertEqual(name.value(), name.value())
# BoundField is also cached
self.assertIs(form["name"], name)
def test_boundfield_value_disabled_callable_initial(self):
class PersonForm(Form):
name = CharField(initial=lambda: "John Doe", disabled=True)
# Without form data.
form = PersonForm()
self.assertEqual(form["name"].value(), "John Doe")
# With form data. As the field is disabled, the value should not be
# affected by the form data.
form = PersonForm({})
self.assertEqual(form["name"].value(), "John Doe")
def test_custom_boundfield(self):
class CustomField(CharField):
def get_bound_field(self, form, name):
return (form, name)
class SampleForm(Form):
name = CustomField()
f = SampleForm()
self.assertEqual(f["name"], (f, "name"))
def test_initial_datetime_values(self):
now = datetime.datetime.now()
# Nix microseconds (since they should be ignored). #22502
now_no_ms = now.replace(microsecond=0)
if now == now_no_ms:
now = now.replace(microsecond=1)
def delayed_now():
return now
def delayed_now_time():
return now.time()
class HiddenInputWithoutMicrosec(HiddenInput):
supports_microseconds = False
class TextInputWithoutMicrosec(TextInput):
supports_microseconds = False
class DateTimeForm(Form):
# Test a non-callable.
fixed = DateTimeField(initial=now)
auto_timestamp = DateTimeField(initial=delayed_now)
auto_time_only = TimeField(initial=delayed_now_time)
supports_microseconds = DateTimeField(initial=delayed_now, widget=TextInput)
hi_default_microsec = DateTimeField(initial=delayed_now, widget=HiddenInput)
hi_without_microsec = DateTimeField(
initial=delayed_now, widget=HiddenInputWithoutMicrosec
)
ti_without_microsec = DateTimeField(
initial=delayed_now, widget=TextInputWithoutMicrosec
)
unbound = DateTimeForm()
cases = [
("fixed", now_no_ms),
("auto_timestamp", now_no_ms),
("auto_time_only", now_no_ms.time()),
("supports_microseconds", now),
("hi_default_microsec", now),
("hi_without_microsec", now_no_ms),
("ti_without_microsec", now_no_ms),
]
for field_name, expected in cases:
with self.subTest(field_name=field_name):
actual = unbound[field_name].value()
self.assertEqual(actual, expected)
# Also check get_initial_for_field().
field = unbound.fields[field_name]
actual = unbound.get_initial_for_field(field, field_name)
self.assertEqual(actual, expected)
def get_datetime_form_with_callable_initial(self, disabled, microseconds=0):
class FakeTime:
def __init__(self):
self.elapsed_seconds = 0
def now(self):
self.elapsed_seconds += 1
return datetime.datetime(
2006,
10,
25,
14,
30,
45 + self.elapsed_seconds,
microseconds,
)
class DateTimeForm(forms.Form):
dt = DateTimeField(initial=FakeTime().now, disabled=disabled)
return DateTimeForm({})
def test_datetime_clean_disabled_callable_initial_microseconds(self):
"""
Cleaning a form with a disabled DateTimeField and callable initial
removes microseconds.
"""
form = self.get_datetime_form_with_callable_initial(
disabled=True,
microseconds=123456,
)
self.assertEqual(form.errors, {})
self.assertEqual(
form.cleaned_data,
{
"dt": datetime.datetime(2006, 10, 25, 14, 30, 46),
},
)
def test_datetime_clean_disabled_callable_initial_bound_field(self):
"""
The cleaned value for a form with a disabled DateTimeField and callable
initial matches the bound field's cached initial value.
"""
form = self.get_datetime_form_with_callable_initial(disabled=True)
self.assertEqual(form.errors, {})
cleaned = form.cleaned_data["dt"]
self.assertEqual(cleaned, datetime.datetime(2006, 10, 25, 14, 30, 46))
bf = form["dt"]
self.assertEqual(cleaned, bf.initial)
def test_datetime_changed_data_callable_with_microseconds(self):
class DateTimeForm(forms.Form):
dt = DateTimeField(
initial=lambda: datetime.datetime(2006, 10, 25, 14, 30, 45, 123456),
disabled=True,
)
form = DateTimeForm({"dt": "2006-10-25 14:30:45"})
self.assertEqual(form.changed_data, [])
def test_help_text(self):
# You can specify descriptive text for a field by using the 'help_text'
# argument.
class UserRegistration(Form):
username = CharField(max_length=10, help_text="e.g., [email protected]")
password = CharField(
widget=PasswordInput, help_text="Wählen Sie mit Bedacht."
)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li>Username: <input type="text" name="username" maxlength="10" required>
<span class="helptext">e.g., [email protected]</span></li>
<li>Password: <input type="password" name="password" required>
<span class="helptext">Wählen Sie mit Bedacht.</span></li>""",
)
self.assertHTMLEqual(
p.as_p(),
"""<p>Username: <input type="text" name="username" maxlength="10" required>
<span class="helptext">e.g., [email protected]</span></p>
<p>Password: <input type="password" name="password" required>
<span class="helptext">Wählen Sie mit Bedacht.</span></p>""",
)
self.assertHTMLEqual(
p.as_table(),
"""
<tr><th>Username:</th><td>
<input type="text" name="username" maxlength="10" required><br>
<span class="helptext">e.g., [email protected]</span></td></tr>
<tr><th>Password:</th><td><input type="password" name="password" required>
<br>
<span class="helptext">Wählen Sie mit Bedacht.</span></td></tr>""",
)
self.assertHTMLEqual(
p.as_div(),
'<div>Username: <div class="helptext">e.g., [email protected]</div>'
'<input type="text" name="username" maxlength="10" required></div>'
'<div>Password: <div class="helptext">Wählen Sie mit Bedacht.</div>'
'<input type="password" name="password" required></div>',
)
# The help text is displayed whether or not data is provided for the form.
p = UserRegistration({"username": "foo"}, auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
'<li>Username: <input type="text" name="username" value="foo" '
'maxlength="10" required>'
'<span class="helptext">e.g., [email protected]</span></li>'
'<li><ul class="errorlist"><li>This field is required.</li></ul>'
'Password: <input type="password" name="password" required>'
'<span class="helptext">Wählen Sie mit Bedacht.</span></li>',
)
# help_text is not displayed for hidden fields. It can be used for documentation
# purposes, though.
class UserRegistration(Form):
username = CharField(max_length=10, help_text="e.g., [email protected]")
password = CharField(widget=PasswordInput)
next = CharField(
widget=HiddenInput, initial="/", help_text="Redirect destination"
)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li>Username: <input type="text" name="username" maxlength="10" required>
<span class="helptext">e.g., [email protected]</span></li>
<li>Password: <input type="password" name="password" required>
<input type="hidden" name="next" value="/"></li>""",
)
def test_help_text_html_safe(self):
"""help_text should not be escaped."""
class UserRegistration(Form):
username = CharField(max_length=10, help_text="e.g., [email protected]")
password = CharField(
widget=PasswordInput,
help_text="Help text is <strong>escaped</strong>.",
)
p = UserRegistration(auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
'<li>Username: <input type="text" name="username" maxlength="10" required>'
'<span class="helptext">e.g., [email protected]</span></li>'
'<li>Password: <input type="password" name="password" required>'
'<span class="helptext">Help text is <strong>escaped</strong>.</span></li>',
)
self.assertHTMLEqual(
p.as_p(),
'<p>Username: <input type="text" name="username" maxlength="10" required>'
'<span class="helptext">e.g., [email protected]</span></p>'
'<p>Password: <input type="password" name="password" required>'
'<span class="helptext">Help text is <strong>escaped</strong>.</span></p>',
)
self.assertHTMLEqual(
p.as_table(),
"<tr><th>Username:</th><td>"
'<input type="text" name="username" maxlength="10" required><br>'
'<span class="helptext">e.g., [email protected]</span></td></tr>'
"<tr><th>Password:</th><td>"
'<input type="password" name="password" required><br>'
'<span class="helptext">Help text is <strong>escaped</strong>.</span>'
"</td></tr>",
)
def test_subclassing_forms(self):
# You can subclass a Form to add fields. The resulting form subclass will have
# all of the fields of the parent Form, plus whichever fields you define in the
# subclass.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class Musician(Person):
instrument = CharField()
p = Person(auto_id=False)
self.assertHTMLEqual(
p.as_ul(),
"""<li>First name: <input type="text" name="first_name" required></li>
<li>Last name: <input type="text" name="last_name" required></li>
<li>Birthday: <input type="text" name="birthday" required></li>""",
)
m = Musician(auto_id=False)
self.assertHTMLEqual(
m.as_ul(),
"""<li>First name: <input type="text" name="first_name" required></li>
<li>Last name: <input type="text" name="last_name" required></li>
<li>Birthday: <input type="text" name="birthday" required></li>
<li>Instrument: <input type="text" name="instrument" required></li>""",
)
# Yes, you can subclass multiple forms. The fields are added in the order in
# which the parent classes are listed.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
class Instrument(Form):
instrument = CharField()
class Beatle(Person, Instrument):
haircut_type = CharField()
b = Beatle(auto_id=False)
self.assertHTMLEqual(
b.as_ul(),
"""<li>Instrument: <input type="text" name="instrument" required></li>
<li>First name: <input type="text" name="first_name" required></li>
<li>Last name: <input type="text" name="last_name" required></li>
<li>Birthday: <input type="text" name="birthday" required></li>
<li>Haircut type: <input type="text" name="haircut_type" required></li>""",
)
def test_forms_with_prefixes(self):
# Sometimes it's necessary to have multiple forms display on the same
# HTML page, or multiple copies of the same form. We can accomplish
# this with form prefixes. Pass the keyword argument 'prefix' to the
# Form constructor to use this feature. This value will be prepended to
# each HTML form field name. One way to think about this is "namespaces
# for HTML forms". Notice that in the data argument, each field's key
# has the prefix, in this case 'person1', prepended to the actual field
# name.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
data = {
"person1-first_name": "John",
"person1-last_name": "Lennon",
"person1-birthday": "1940-10-9",
}
p = Person(data, prefix="person1")
self.assertHTMLEqual(
p.as_ul(),
"""
<li><label for="id_person1-first_name">First name:</label>
<input type="text" name="person1-first_name" value="John"
id="id_person1-first_name" required></li>
<li><label for="id_person1-last_name">Last name:</label>
<input type="text" name="person1-last_name" value="Lennon"
id="id_person1-last_name" required></li>
<li><label for="id_person1-birthday">Birthday:</label>
<input type="text" name="person1-birthday" value="1940-10-9"
id="id_person1-birthday" required></li>
""",
)
self.assertHTMLEqual(
str(p["first_name"]),
'<input type="text" name="person1-first_name" value="John" '
'id="id_person1-first_name" required>',
)
self.assertHTMLEqual(
str(p["last_name"]),
'<input type="text" name="person1-last_name" value="Lennon" '
'id="id_person1-last_name" required>',
)
self.assertHTMLEqual(
str(p["birthday"]),
'<input type="text" name="person1-birthday" value="1940-10-9" '
'id="id_person1-birthday" required>',
)
self.assertEqual(p.errors, {})
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data["first_name"], "John")
self.assertEqual(p.cleaned_data["last_name"], "Lennon")
self.assertEqual(p.cleaned_data["birthday"], datetime.date(1940, 10, 9))
# Let's try submitting some bad data to make sure form.errors and field.errors
# work as expected.
data = {
"person1-first_name": "",
"person1-last_name": "",
"person1-birthday": "",
}
p = Person(data, prefix="person1")
self.assertEqual(p.errors["first_name"], ["This field is required."])
self.assertEqual(p.errors["last_name"], ["This field is required."])
self.assertEqual(p.errors["birthday"], ["This field is required."])
self.assertEqual(p["first_name"].errors, ["This field is required."])
# Accessing a nonexistent field.
with self.assertRaises(KeyError):
p["person1-first_name"].errors
# In this example, the data doesn't have a prefix, but the form requires it, so
# the form doesn't "see" the fields.
data = {"first_name": "John", "last_name": "Lennon", "birthday": "1940-10-9"}
p = Person(data, prefix="person1")
self.assertEqual(p.errors["first_name"], ["This field is required."])
self.assertEqual(p.errors["last_name"], ["This field is required."])
self.assertEqual(p.errors["birthday"], ["This field is required."])
# With prefixes, a single data dictionary can hold data for multiple instances
# of the same form.
data = {
"person1-first_name": "John",
"person1-last_name": "Lennon",
"person1-birthday": "1940-10-9",
"person2-first_name": "Jim",
"person2-last_name": "Morrison",
"person2-birthday": "1943-12-8",
}
p1 = Person(data, prefix="person1")
self.assertTrue(p1.is_valid())
self.assertEqual(p1.cleaned_data["first_name"], "John")
self.assertEqual(p1.cleaned_data["last_name"], "Lennon")
self.assertEqual(p1.cleaned_data["birthday"], datetime.date(1940, 10, 9))
p2 = Person(data, prefix="person2")
self.assertTrue(p2.is_valid())
self.assertEqual(p2.cleaned_data["first_name"], "Jim")
self.assertEqual(p2.cleaned_data["last_name"], "Morrison")
self.assertEqual(p2.cleaned_data["birthday"], datetime.date(1943, 12, 8))
# By default, forms append a hyphen between the prefix and the field name, but a
# form can alter that behavior by implementing the add_prefix() method. This
# method takes a field name and returns the prefixed field, according to
# self.prefix.
class Person(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
def add_prefix(self, field_name):
return (
"%s-prefix-%s" % (self.prefix, field_name)
if self.prefix
else field_name
)
p = Person(prefix="foo")
self.assertHTMLEqual(
p.as_ul(),
"""
<li><label for="id_foo-prefix-first_name">First name:</label>
<input type="text" name="foo-prefix-first_name"
id="id_foo-prefix-first_name" required></li>
<li><label for="id_foo-prefix-last_name">Last name:</label>
<input type="text" name="foo-prefix-last_name" id="id_foo-prefix-last_name"
required></li>
<li><label for="id_foo-prefix-birthday">Birthday:</label>
<input type="text" name="foo-prefix-birthday" id="id_foo-prefix-birthday"
required></li>
""",
)
data = {
"foo-prefix-first_name": "John",
"foo-prefix-last_name": "Lennon",
"foo-prefix-birthday": "1940-10-9",
}
p = Person(data, prefix="foo")
self.assertTrue(p.is_valid())
self.assertEqual(p.cleaned_data["first_name"], "John")
self.assertEqual(p.cleaned_data["last_name"], "Lennon")
self.assertEqual(p.cleaned_data["birthday"], datetime.date(1940, 10, 9))
def test_class_prefix(self):
# Prefix can be also specified at the class level.
class Person(Form):
first_name = CharField()
prefix = "foo"
p = Person()
self.assertEqual(p.prefix, "foo")
p = Person(prefix="bar")
self.assertEqual(p.prefix, "bar")
def test_forms_with_null_boolean(self):
# NullBooleanField is a bit of a special case because its presentation (widget)
# is different than its data. This is handled transparently, though.
class Person(Form):
name = CharField()
is_cool = NullBooleanField()
p = Person({"name": "Joe"}, auto_id=False)
self.assertHTMLEqual(
str(p["is_cool"]),
"""<select name="is_cool">
<option value="unknown" selected>Unknown</option>
<option value="true">Yes</option>
<option value="false">No</option>
</select>""",
)
p = Person({"name": "Joe", "is_cool": "1"}, auto_id=False)
self.assertHTMLEqual(
str(p["is_cool"]),
"""<select name="is_cool">
<option value="unknown" selected>Unknown</option>
<option value="true">Yes</option>
<option value="false">No</option>
</select>""",
)
p = Person({"name": "Joe", "is_cool": "2"}, auto_id=False)
self.assertHTMLEqual(
str(p["is_cool"]),
"""<select name="is_cool">
<option value="unknown">Unknown</option>
<option value="true" selected>Yes</option>
<option value="false">No</option>
</select>""",
)
p = Person({"name": "Joe", "is_cool": "3"}, auto_id=False)
self.assertHTMLEqual(
str(p["is_cool"]),
"""<select name="is_cool">
<option value="unknown">Unknown</option>
<option value="true">Yes</option>
<option value="false" selected>No</option>
</select>""",
)
p = Person({"name": "Joe", "is_cool": True}, auto_id=False)
self.assertHTMLEqual(
str(p["is_cool"]),
"""<select name="is_cool">
<option value="unknown">Unknown</option>
<option value="true" selected>Yes</option>
<option value="false">No</option>
</select>""",
)
p = Person({"name": "Joe", "is_cool": False}, auto_id=False)
self.assertHTMLEqual(
str(p["is_cool"]),
"""<select name="is_cool">
<option value="unknown">Unknown</option>
<option value="true">Yes</option>
<option value="false" selected>No</option>
</select>""",
)
p = Person({"name": "Joe", "is_cool": "unknown"}, auto_id=False)
self.assertHTMLEqual(
str(p["is_cool"]),
"""<select name="is_cool">
<option value="unknown" selected>Unknown</option>
<option value="true">Yes</option>
<option value="false">No</option>
</select>""",
)
p = Person({"name": "Joe", "is_cool": "true"}, auto_id=False)
self.assertHTMLEqual(
str(p["is_cool"]),
"""<select name="is_cool">
<option value="unknown">Unknown</option>
<option value="true" selected>Yes</option>
<option value="false">No</option>
</select>""",
)
p = Person({"name": "Joe", "is_cool": "false"}, auto_id=False)
self.assertHTMLEqual(
str(p["is_cool"]),
"""<select name="is_cool">
<option value="unknown">Unknown</option>
<option value="true">Yes</option>
<option value="false" selected>No</option>
</select>""",
)
def test_forms_with_file_fields(self):
# FileFields are a special case because they take their data from the
# request.FILES, not request.POST.
class FileForm(Form):
file1 = FileField()
f = FileForm(auto_id=False)
self.assertHTMLEqual(
f.as_table(),
"<tr><th>File1:</th><td>"
'<input type="file" name="file1" required></td></tr>',
)
f = FileForm(data={}, files={}, auto_id=False)
self.assertHTMLEqual(
f.as_table(),
"<tr><th>File1:</th><td>"
'<ul class="errorlist"><li>This field is required.</li></ul>'
'<input type="file" name="file1" required></td></tr>',
)
f = FileForm(
data={}, files={"file1": SimpleUploadedFile("name", b"")}, auto_id=False
)
self.assertHTMLEqual(
f.as_table(),
"<tr><th>File1:</th><td>"
'<ul class="errorlist"><li>The submitted file is empty.</li></ul>'
'<input type="file" name="file1" required></td></tr>',
)
f = FileForm(
data={}, files={"file1": "something that is not a file"}, auto_id=False
)
self.assertHTMLEqual(
f.as_table(),
"<tr><th>File1:</th><td>"
'<ul class="errorlist"><li>No file was submitted. Check the '
"encoding type on the form.</li></ul>"
'<input type="file" name="file1" required></td></tr>',
)
f = FileForm(
data={},
files={"file1": SimpleUploadedFile("name", b"some content")},
auto_id=False,
)
self.assertHTMLEqual(
f.as_table(),
"<tr><th>File1:</th><td>"
'<input type="file" name="file1" required></td></tr>',
)
self.assertTrue(f.is_valid())
file1 = SimpleUploadedFile(
"我隻氣墊船裝滿晒鱔.txt", "मेरी मँडराने वाली नाव सर्पमीनों से भरी ह".encode()
)
f = FileForm(data={}, files={"file1": file1}, auto_id=False)
self.assertHTMLEqual(
f.as_table(),
"<tr><th>File1:</th><td>"
'<input type="file" name="file1" required></td></tr>',
)
# A required file field with initial data should not contain the
# required HTML attribute. The file input is left blank by the user to
# keep the existing, initial value.
f = FileForm(initial={"file1": "resume.txt"}, auto_id=False)
self.assertHTMLEqual(
f.as_table(),
'<tr><th>File1:</th><td><input type="file" name="file1"></td></tr>',
)
def test_filefield_initial_callable(self):
class FileForm(forms.Form):
file1 = forms.FileField(initial=lambda: "resume.txt")
f = FileForm({})
self.assertEqual(f.errors, {})
self.assertEqual(f.cleaned_data["file1"], "resume.txt")
def test_filefield_with_fileinput_required(self):
class FileForm(Form):
file1 = forms.FileField(widget=FileInput)
f = FileForm(auto_id=False)
self.assertHTMLEqual(
f.as_table(),
"<tr><th>File1:</th><td>"
'<input type="file" name="file1" required></td></tr>',
)
# A required file field with initial data doesn't contain the required
# HTML attribute. The file input is left blank by the user to keep the
# existing, initial value.
f = FileForm(initial={"file1": "resume.txt"}, auto_id=False)
self.assertHTMLEqual(
f.as_table(),
'<tr><th>File1:</th><td><input type="file" name="file1"></td></tr>',
)
def test_empty_permitted(self):
# Sometimes (pretty much in formsets) we want to allow a form to pass validation
# if it is completely empty. We can accomplish this by using the empty_permitted
# argument to a form constructor.
class SongForm(Form):
artist = CharField()
name = CharField()
# First let's show what happens id empty_permitted=False (the default):
data = {"artist": "", "song": ""}
form = SongForm(data, empty_permitted=False)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
"name": ["This field is required."],
"artist": ["This field is required."],
},
)
self.assertEqual(form.cleaned_data, {})
# Now let's show what happens when empty_permitted=True and the form is empty.
form = SongForm(data, empty_permitted=True, use_required_attribute=False)
self.assertTrue(form.is_valid())
self.assertEqual(form.errors, {})
self.assertEqual(form.cleaned_data, {})
# But if we fill in data for one of the fields, the form is no longer empty and
# the whole thing must pass validation.
data = {"artist": "The Doors", "song": ""}
form = SongForm(data, empty_permitted=False)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {"name": ["This field is required."]})
self.assertEqual(form.cleaned_data, {"artist": "The Doors"})
# If a field is not given in the data then None is returned for its data. Lets
# make sure that when checking for empty_permitted that None is treated
# accordingly.
data = {"artist": None, "song": ""}
form = SongForm(data, empty_permitted=True, use_required_attribute=False)
self.assertTrue(form.is_valid())
# However, we *really* need to be sure we are checking for None as any data in
# initial that returns False on a boolean call needs to be treated literally.
class PriceForm(Form):
amount = FloatField()
qty = IntegerField()
data = {"amount": "0.0", "qty": ""}
form = PriceForm(
data,
initial={"amount": 0.0},
empty_permitted=True,
use_required_attribute=False,
)
self.assertTrue(form.is_valid())
def test_empty_permitted_and_use_required_attribute(self):
msg = (
"The empty_permitted and use_required_attribute arguments may not "
"both be True."
)
with self.assertRaisesMessage(ValueError, msg):
Person(empty_permitted=True, use_required_attribute=True)
def test_extracting_hidden_and_visible(self):
class SongForm(Form):
token = CharField(widget=HiddenInput)
artist = CharField()
name = CharField()
form = SongForm()
self.assertEqual([f.name for f in form.hidden_fields()], ["token"])
self.assertEqual([f.name for f in form.visible_fields()], ["artist", "name"])
def test_hidden_initial_gets_id(self):
class MyForm(Form):
field1 = CharField(max_length=50, show_hidden_initial=True)
self.assertHTMLEqual(
MyForm().as_table(),
'<tr><th><label for="id_field1">Field1:</label></th><td>'
'<input id="id_field1" type="text" name="field1" maxlength="50" required>'
'<input type="hidden" name="initial-field1" id="initial-id_field1">'
"</td></tr>",
)
def test_error_html_required_html_classes(self):
class Person(Form):
name = CharField()
is_cool = NullBooleanField()
email = EmailField(required=False)
age = IntegerField()
p = Person({})
p.error_css_class = "error"
p.required_css_class = "required"
self.assertHTMLEqual(
p.as_ul(),
"""
<li class="required error"><ul class="errorlist">
<li>This field is required.</li></ul>
<label class="required" for="id_name">Name:</label>
<input type="text" name="name" id="id_name" required></li>
<li class="required">
<label class="required" for="id_is_cool">Is cool:</label>
<select name="is_cool" id="id_is_cool">
<option value="unknown" selected>Unknown</option>
<option value="true">Yes</option>
<option value="false">No</option>
</select></li>
<li><label for="id_email">Email:</label>
<input type="email" name="email" id="id_email"></li>
<li class="required error"><ul class="errorlist">
<li>This field is required.</li></ul>
<label class="required" for="id_age">Age:</label>
<input type="number" name="age" id="id_age" required></li>""",
)
self.assertHTMLEqual(
p.as_p(),
"""
<ul class="errorlist"><li>This field is required.</li></ul>
<p class="required error">
<label class="required" for="id_name">Name:</label>
<input type="text" name="name" id="id_name" required></p>
<p class="required">
<label class="required" for="id_is_cool">Is cool:</label>
<select name="is_cool" id="id_is_cool">
<option value="unknown" selected>Unknown</option>
<option value="true">Yes</option>
<option value="false">No</option>
</select></p>
<p><label for="id_email">Email:</label>
<input type="email" name="email" id="id_email"></p>
<ul class="errorlist"><li>This field is required.</li></ul>
<p class="required error"><label class="required" for="id_age">Age:</label>
<input type="number" name="age" id="id_age" required></p>
""",
)
self.assertHTMLEqual(
p.as_table(),
"""<tr class="required error">
<th><label class="required" for="id_name">Name:</label></th>
<td><ul class="errorlist"><li>This field is required.</li></ul>
<input type="text" name="name" id="id_name" required></td></tr>
<tr class="required"><th><label class="required" for="id_is_cool">Is cool:</label></th>
<td><select name="is_cool" id="id_is_cool">
<option value="unknown" selected>Unknown</option>
<option value="true">Yes</option>
<option value="false">No</option>
</select></td></tr>
<tr><th><label for="id_email">Email:</label></th><td>
<input type="email" name="email" id="id_email"></td></tr>
<tr class="required error"><th><label class="required" for="id_age">Age:</label></th>
<td><ul class="errorlist"><li>This field is required.</li></ul>
<input type="number" name="age" id="id_age" required></td></tr>""",
)
self.assertHTMLEqual(
p.as_div(),
'<div class="required error"><label for="id_name" class="required">Name:'
'</label><ul class="errorlist"><li>This field is required.</li></ul>'
'<input type="text" name="name" required id="id_name" /></div>'
'<div class="required"><label for="id_is_cool" class="required">Is cool:'
'</label><select name="is_cool" id="id_is_cool">'
'<option value="unknown" selected>Unknown</option>'
'<option value="true">Yes</option><option value="false">No</option>'
'</select></div><div><label for="id_email">Email:</label>'
'<input type="email" name="email" id="id_email" /></div>'
'<div class="required error"><label for="id_age" class="required">Age:'
'</label><ul class="errorlist"><li>This field is required.</li></ul>'
'<input type="number" name="age" required id="id_age" /></div>',
)
def test_label_has_required_css_class(self):
"""
required_css_class is added to label_tag() and legend_tag() of required
fields.
"""
class SomeForm(Form):
required_css_class = "required"
field = CharField(max_length=10)
field2 = IntegerField(required=False)
f = SomeForm({"field": "test"})
self.assertHTMLEqual(
f["field"].label_tag(),
'<label for="id_field" class="required">Field:</label>',
)
self.assertHTMLEqual(
f["field"].legend_tag(),
'<legend for="id_field" class="required">Field:</legend>',
)
self.assertHTMLEqual(
f["field"].label_tag(attrs={"class": "foo"}),
'<label for="id_field" class="foo required">Field:</label>',
)
self.assertHTMLEqual(
f["field"].legend_tag(attrs={"class": "foo"}),
'<legend for="id_field" class="foo required">Field:</legend>',
)
self.assertHTMLEqual(
f["field2"].label_tag(), '<label for="id_field2">Field2:</label>'
)
self.assertHTMLEqual(
f["field2"].legend_tag(),
'<legend for="id_field2">Field2:</legend>',
)
def test_label_split_datetime_not_displayed(self):
class EventForm(Form):
happened_at = SplitDateTimeField(widget=SplitHiddenDateTimeWidget)
form = EventForm()
self.assertHTMLEqual(
form.as_ul(),
'<input type="hidden" name="happened_at_0" id="id_happened_at_0">'
'<input type="hidden" name="happened_at_1" id="id_happened_at_1">',
)
def test_multivalue_field_validation(self):
def bad_names(value):
if value == "bad value":
raise ValidationError("bad value not allowed")
class NameField(MultiValueField):
def __init__(self, fields=(), *args, **kwargs):
fields = (
CharField(label="First name", max_length=10),
CharField(label="Last name", max_length=10),
)
super().__init__(fields=fields, *args, **kwargs)
def compress(self, data_list):
return " ".join(data_list)
class NameForm(Form):
name = NameField(validators=[bad_names])
form = NameForm(data={"name": ["bad", "value"]})
form.full_clean()
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {"name": ["bad value not allowed"]})
form = NameForm(data={"name": ["should be overly", "long for the field names"]})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors,
{
"name": [
"Ensure this value has at most 10 characters (it has 16).",
"Ensure this value has at most 10 characters (it has 24).",
],
},
)
form = NameForm(data={"name": ["fname", "lname"]})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {"name": "fname lname"})
def test_multivalue_deep_copy(self):
"""
#19298 -- MultiValueField needs to override the default as it needs
to deep-copy subfields:
"""
class ChoicesField(MultiValueField):
def __init__(self, fields=(), *args, **kwargs):
fields = (
ChoiceField(label="Rank", choices=((1, 1), (2, 2))),
CharField(label="Name", max_length=10),
)
super().__init__(fields=fields, *args, **kwargs)
field = ChoicesField()
field2 = copy.deepcopy(field)
self.assertIsInstance(field2, ChoicesField)
self.assertIsNot(field2.fields, field.fields)
self.assertIsNot(field2.fields[0].choices, field.fields[0].choices)
def test_multivalue_initial_data(self):
"""
#23674 -- invalid initial data should not break form.changed_data()
"""
class DateAgeField(MultiValueField):
def __init__(self, fields=(), *args, **kwargs):
fields = (DateField(label="Date"), IntegerField(label="Age"))
super().__init__(fields=fields, *args, **kwargs)
class DateAgeForm(Form):
date_age = DateAgeField()
data = {"date_age": ["1998-12-06", 16]}
form = DateAgeForm(data, initial={"date_age": ["200-10-10", 14]})
self.assertTrue(form.has_changed())
def test_multivalue_optional_subfields(self):
class PhoneField(MultiValueField):
def __init__(self, *args, **kwargs):
fields = (
CharField(
label="Country Code",
validators=[
RegexValidator(
r"^\+[0-9]{1,2}$", message="Enter a valid country code."
)
],
),
CharField(label="Phone Number"),
CharField(
label="Extension",
error_messages={"incomplete": "Enter an extension."},
),
CharField(
label="Label", required=False, help_text="E.g. home, work."
),
)
super().__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
return "%s.%s ext. %s (label: %s)" % tuple(data_list)
return None
# An empty value for any field will raise a `required` error on a
# required `MultiValueField`.
f = PhoneField()
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean("")
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean([])
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(["+61"])
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(["+61", "287654321", "123"])
self.assertEqual(
"+61.287654321 ext. 123 (label: Home)",
f.clean(["+61", "287654321", "123", "Home"]),
)
with self.assertRaisesMessage(ValidationError, "'Enter a valid country code.'"):
f.clean(["61", "287654321", "123", "Home"])
# Empty values for fields will NOT raise a `required` error on an
# optional `MultiValueField`
f = PhoneField(required=False)
self.assertIsNone(f.clean(""))
self.assertIsNone(f.clean(None))
self.assertIsNone(f.clean([]))
self.assertEqual("+61. ext. (label: )", f.clean(["+61"]))
self.assertEqual(
"+61.287654321 ext. 123 (label: )", f.clean(["+61", "287654321", "123"])
)
self.assertEqual(
"+61.287654321 ext. 123 (label: Home)",
f.clean(["+61", "287654321", "123", "Home"]),
)
with self.assertRaisesMessage(ValidationError, "'Enter a valid country code.'"):
f.clean(["61", "287654321", "123", "Home"])
# For a required `MultiValueField` with `require_all_fields=False`, a
# `required` error will only be raised if all fields are empty. Fields
# can individually be required or optional. An empty value for any
# required field will raise an `incomplete` error.
f = PhoneField(require_all_fields=False)
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean("")
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean([])
with self.assertRaisesMessage(ValidationError, "'Enter a complete value.'"):
f.clean(["+61"])
self.assertEqual(
"+61.287654321 ext. 123 (label: )", f.clean(["+61", "287654321", "123"])
)
with self.assertRaisesMessage(
ValidationError, "'Enter a complete value.', 'Enter an extension.'"
):
f.clean(["", "", "", "Home"])
with self.assertRaisesMessage(ValidationError, "'Enter a valid country code.'"):
f.clean(["61", "287654321", "123", "Home"])
# For an optional `MultiValueField` with `require_all_fields=False`, we
# don't get any `required` error but we still get `incomplete` errors.
f = PhoneField(required=False, require_all_fields=False)
self.assertIsNone(f.clean(""))
self.assertIsNone(f.clean(None))
self.assertIsNone(f.clean([]))
with self.assertRaisesMessage(ValidationError, "'Enter a complete value.'"):
f.clean(["+61"])
self.assertEqual(
"+61.287654321 ext. 123 (label: )", f.clean(["+61", "287654321", "123"])
)
with self.assertRaisesMessage(
ValidationError, "'Enter a complete value.', 'Enter an extension.'"
):
f.clean(["", "", "", "Home"])
with self.assertRaisesMessage(ValidationError, "'Enter a valid country code.'"):
f.clean(["61", "287654321", "123", "Home"])
def test_multivalue_optional_subfields_rendering(self):
class PhoneWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = [TextInput(), TextInput()]
super().__init__(widgets, attrs)
def decompress(self, value):
return [None, None]
class PhoneField(MultiValueField):
def __init__(self, *args, **kwargs):
fields = [CharField(), CharField(required=False)]
super().__init__(fields, *args, **kwargs)
class PhoneForm(Form):
phone1 = PhoneField(widget=PhoneWidget)
phone2 = PhoneField(widget=PhoneWidget, required=False)
phone3 = PhoneField(widget=PhoneWidget, require_all_fields=False)
phone4 = PhoneField(
widget=PhoneWidget,
required=False,
require_all_fields=False,
)
form = PhoneForm(auto_id=False)
self.assertHTMLEqual(
form.as_p(),
"""
<p>Phone1:<input type="text" name="phone1_0" required>
<input type="text" name="phone1_1" required></p>
<p>Phone2:<input type="text" name="phone2_0">
<input type="text" name="phone2_1"></p>
<p>Phone3:<input type="text" name="phone3_0" required>
<input type="text" name="phone3_1"></p>
<p>Phone4:<input type="text" name="phone4_0">
<input type="text" name="phone4_1"></p>
""",
)
def test_custom_empty_values(self):
"""
Form fields can customize what is considered as an empty value
for themselves (#19997).
"""
class CustomJSONField(CharField):
empty_values = [None, ""]
def to_python(self, value):
# Fake json.loads
if value == "{}":
return {}
return super().to_python(value)
class JSONForm(forms.Form):
json = CustomJSONField()
form = JSONForm(data={"json": "{}"})
form.full_clean()
self.assertEqual(form.cleaned_data, {"json": {}})
def test_boundfield_label_tag(self):
class SomeForm(Form):
field = CharField()
boundfield = SomeForm()["field"]
testcases = [ # (args, kwargs, expected)
# without anything: just print the <label>
((), {}, '<%(tag)s for="id_field">Field:</%(tag)s>'),
# passing just one argument: overrides the field's label
(("custom",), {}, '<%(tag)s for="id_field">custom:</%(tag)s>'),
# the overridden label is escaped
(("custom&",), {}, '<%(tag)s for="id_field">custom&:</%(tag)s>'),
((mark_safe("custom&"),), {}, '<%(tag)s for="id_field">custom&:</%(tag)s>'),
# Passing attrs to add extra attributes on the <label>
(
(),
{"attrs": {"class": "pretty"}},
'<%(tag)s for="id_field" class="pretty">Field:</%(tag)s>',
),
]
for args, kwargs, expected in testcases:
with self.subTest(args=args, kwargs=kwargs):
self.assertHTMLEqual(
boundfield.label_tag(*args, **kwargs),
expected % {"tag": "label"},
)
self.assertHTMLEqual(
boundfield.legend_tag(*args, **kwargs),
expected % {"tag": "legend"},
)
def test_boundfield_label_tag_no_id(self):
"""
If a widget has no id, label_tag() and legend_tag() return the text
with no surrounding <label>.
"""
class SomeForm(Form):
field = CharField()
boundfield = SomeForm(auto_id="")["field"]
self.assertHTMLEqual(boundfield.label_tag(), "Field:")
self.assertHTMLEqual(boundfield.legend_tag(), "Field:")
self.assertHTMLEqual(boundfield.label_tag("Custom&"), "Custom&:")
self.assertHTMLEqual(boundfield.legend_tag("Custom&"), "Custom&:")
def test_boundfield_label_tag_custom_widget_id_for_label(self):
class CustomIdForLabelTextInput(TextInput):
def id_for_label(self, id):
return "custom_" + id
class EmptyIdForLabelTextInput(TextInput):
def id_for_label(self, id):
return None
class SomeForm(Form):
custom = CharField(widget=CustomIdForLabelTextInput)
empty = CharField(widget=EmptyIdForLabelTextInput)
form = SomeForm()
self.assertHTMLEqual(
form["custom"].label_tag(), '<label for="custom_id_custom">Custom:</label>'
)
self.assertHTMLEqual(
form["custom"].legend_tag(),
'<legend for="custom_id_custom">Custom:</legend>',
)
self.assertHTMLEqual(form["empty"].label_tag(), "<label>Empty:</label>")
self.assertHTMLEqual(form["empty"].legend_tag(), "<legend>Empty:</legend>")
def test_boundfield_empty_label(self):
class SomeForm(Form):
field = CharField(label="")
boundfield = SomeForm()["field"]
self.assertHTMLEqual(boundfield.label_tag(), '<label for="id_field"></label>')
self.assertHTMLEqual(
boundfield.legend_tag(),
'<legend for="id_field"></legend>',
)
def test_boundfield_id_for_label(self):
class SomeForm(Form):
field = CharField(label="")
self.assertEqual(SomeForm()["field"].id_for_label, "id_field")
def test_boundfield_id_for_label_override_by_attrs(self):
"""
If an id is provided in `Widget.attrs`, it overrides the generated ID,
unless it is `None`.
"""
class SomeForm(Form):
field = CharField(widget=TextInput(attrs={"id": "myCustomID"}))
field_none = CharField(widget=TextInput(attrs={"id": None}))
form = SomeForm()
self.assertEqual(form["field"].id_for_label, "myCustomID")
self.assertEqual(form["field_none"].id_for_label, "id_field_none")
def test_boundfield_subwidget_id_for_label(self):
"""
If auto_id is provided when initializing the form, the generated ID in
subwidgets must reflect that prefix.
"""
class SomeForm(Form):
field = MultipleChoiceField(
choices=[("a", "A"), ("b", "B")],
widget=CheckboxSelectMultiple,
)
form = SomeForm(auto_id="prefix_%s")
subwidgets = form["field"].subwidgets
self.assertEqual(subwidgets[0].id_for_label, "prefix_field_0")
self.assertEqual(subwidgets[1].id_for_label, "prefix_field_1")
def test_boundfield_widget_type(self):
class SomeForm(Form):
first_name = CharField()
birthday = SplitDateTimeField(widget=SplitHiddenDateTimeWidget)
f = SomeForm()
self.assertEqual(f["first_name"].widget_type, "text")
self.assertEqual(f["birthday"].widget_type, "splithiddendatetime")
def test_boundfield_css_classes(self):
form = Person()
field = form["first_name"]
self.assertEqual(field.css_classes(), "")
self.assertEqual(field.css_classes(extra_classes=""), "")
self.assertEqual(field.css_classes(extra_classes="test"), "test")
self.assertEqual(field.css_classes(extra_classes="test test"), "test")
def test_label_suffix_override(self):
"""
BoundField label_suffix (if provided) overrides Form label_suffix
"""
class SomeForm(Form):
field = CharField()
boundfield = SomeForm(label_suffix="!")["field"]
self.assertHTMLEqual(
boundfield.label_tag(label_suffix="$"),
'<label for="id_field">Field$</label>',
)
self.assertHTMLEqual(
boundfield.legend_tag(label_suffix="$"),
'<legend for="id_field">Field$</legend>',
)
def test_error_dict(self):
class MyForm(Form):
foo = CharField()
bar = CharField()
def clean(self):
raise ValidationError(
"Non-field error.", code="secret", params={"a": 1, "b": 2}
)
form = MyForm({})
self.assertIs(form.is_valid(), False)
errors = form.errors.as_text()
control = [
"* foo\n * This field is required.",
"* bar\n * This field is required.",
"* __all__\n * Non-field error.",
]
for error in control:
self.assertIn(error, errors)
errors = form.errors.as_ul()
control = [
'<li>foo<ul class="errorlist"><li>This field is required.</li></ul></li>',
'<li>bar<ul class="errorlist"><li>This field is required.</li></ul></li>',
'<li>__all__<ul class="errorlist nonfield"><li>Non-field error.</li></ul>'
"</li>",
]
for error in control:
self.assertInHTML(error, errors)
errors = form.errors.get_json_data()
control = {
"foo": [{"code": "required", "message": "This field is required."}],
"bar": [{"code": "required", "message": "This field is required."}],
"__all__": [{"code": "secret", "message": "Non-field error."}],
}
self.assertEqual(errors, control)
self.assertEqual(json.dumps(errors), form.errors.as_json())
def test_error_dict_as_json_escape_html(self):
"""#21962 - adding html escape flag to ErrorDict"""
class MyForm(Form):
foo = CharField()
bar = CharField()
def clean(self):
raise ValidationError(
"<p>Non-field error.</p>",
code="secret",
params={"a": 1, "b": 2},
)
control = {
"foo": [{"code": "required", "message": "This field is required."}],
"bar": [{"code": "required", "message": "This field is required."}],
"__all__": [{"code": "secret", "message": "<p>Non-field error.</p>"}],
}
form = MyForm({})
self.assertFalse(form.is_valid())
errors = json.loads(form.errors.as_json())
self.assertEqual(errors, control)
escaped_error = "<p>Non-field error.</p>"
self.assertEqual(
form.errors.get_json_data(escape_html=True)["__all__"][0]["message"],
escaped_error,
)
errors = json.loads(form.errors.as_json(escape_html=True))
control["__all__"][0]["message"] = escaped_error
self.assertEqual(errors, control)
def test_error_list(self):
e = ErrorList()
e.append("Foo")
e.append(ValidationError("Foo%(bar)s", code="foobar", params={"bar": "bar"}))
self.assertIsInstance(e, list)
self.assertIn("Foo", e)
self.assertIn("Foo", ValidationError(e))
self.assertEqual(e.as_text(), "* Foo\n* Foobar")
self.assertEqual(
e.as_ul(), '<ul class="errorlist"><li>Foo</li><li>Foobar</li></ul>'
)
errors = e.get_json_data()
self.assertEqual(
errors,
[{"message": "Foo", "code": ""}, {"message": "Foobar", "code": "foobar"}],
)
self.assertEqual(json.dumps(errors), e.as_json())
def test_error_list_class_not_specified(self):
e = ErrorList()
e.append("Foo")
e.append(ValidationError("Foo%(bar)s", code="foobar", params={"bar": "bar"}))
self.assertEqual(
e.as_ul(), '<ul class="errorlist"><li>Foo</li><li>Foobar</li></ul>'
)
def test_error_list_class_has_one_class_specified(self):
e = ErrorList(error_class="foobar-error-class")
e.append("Foo")
e.append(ValidationError("Foo%(bar)s", code="foobar", params={"bar": "bar"}))
self.assertEqual(
e.as_ul(),
'<ul class="errorlist foobar-error-class"><li>Foo</li><li>Foobar</li></ul>',
)
def test_error_list_with_hidden_field_errors_has_correct_class(self):
class Person(Form):
first_name = CharField()
last_name = CharField(widget=HiddenInput)
p = Person({"first_name": "John"})
self.assertHTMLEqual(
p.as_ul(),
"""<li><ul class="errorlist nonfield">
<li>(Hidden field last_name) This field is required.</li></ul></li><li>
<label for="id_first_name">First name:</label>
<input id="id_first_name" name="first_name" type="text" value="John" required>
<input id="id_last_name" name="last_name" type="hidden"></li>""",
)
self.assertHTMLEqual(
p.as_p(),
"""
<ul class="errorlist nonfield">
<li>(Hidden field last_name) This field is required.</li></ul>
<p><label for="id_first_name">First name:</label>
<input id="id_first_name" name="first_name" type="text" value="John"
required>
<input id="id_last_name" name="last_name" type="hidden"></p>
""",
)
self.assertHTMLEqual(
p.as_table(),
"""<tr><td colspan="2"><ul class="errorlist nonfield">
<li>(Hidden field last_name) This field is required.</li></ul></td></tr>
<tr><th><label for="id_first_name">First name:</label></th><td>
<input id="id_first_name" name="first_name" type="text" value="John" required>
<input id="id_last_name" name="last_name" type="hidden"></td></tr>""",
)
self.assertHTMLEqual(
p.as_div(),
'<ul class="errorlist nonfield"><li>(Hidden field last_name) This field '
'is required.</li></ul><div><label for="id_first_name">First name:</label>'
'<input id="id_first_name" name="first_name" type="text" value="John" '
'required><input id="id_last_name" name="last_name" type="hidden"></div>',
)
def test_error_list_with_non_field_errors_has_correct_class(self):
class Person(Form):
first_name = CharField()
last_name = CharField()
def clean(self):
raise ValidationError("Generic validation error")
p = Person({"first_name": "John", "last_name": "Lennon"})
self.assertHTMLEqual(
str(p.non_field_errors()),
'<ul class="errorlist nonfield"><li>Generic validation error</li></ul>',
)
self.assertHTMLEqual(
p.as_ul(),
"""<li>
<ul class="errorlist nonfield"><li>Generic validation error</li></ul></li>
<li><label for="id_first_name">First name:</label>
<input id="id_first_name" name="first_name" type="text" value="John" required></li>
<li><label for="id_last_name">Last name:</label>
<input id="id_last_name" name="last_name" type="text" value="Lennon" required></li>""",
)
self.assertHTMLEqual(
p.non_field_errors().as_text(), "* Generic validation error"
)
self.assertHTMLEqual(
p.as_p(),
"""<ul class="errorlist nonfield"><li>Generic validation error</li></ul>
<p><label for="id_first_name">First name:</label>
<input id="id_first_name" name="first_name" type="text" value="John" required></p>
<p><label for="id_last_name">Last name:</label>
<input id="id_last_name" name="last_name" type="text" value="Lennon" required></p>""",
)
self.assertHTMLEqual(
p.as_table(),
"""
<tr><td colspan="2"><ul class="errorlist nonfield">
<li>Generic validation error</li></ul></td></tr>
<tr><th><label for="id_first_name">First name:</label></th><td>
<input id="id_first_name" name="first_name" type="text" value="John"
required>
</td></tr>
<tr><th><label for="id_last_name">Last name:</label></th><td>
<input id="id_last_name" name="last_name" type="text" value="Lennon"
required>
</td></tr>
""",
)
self.assertHTMLEqual(
p.as_div(),
'<ul class="errorlist nonfield"><li>Generic validation error</li></ul>'
'<div><label for="id_first_name">First name:</label><input '
'id="id_first_name" name="first_name" type="text" value="John" required>'
'</div><div><label for="id_last_name">Last name:</label><input '
'id="id_last_name" name="last_name" type="text" value="Lennon" required>'
"</div>",
)
def test_error_escaping(self):
class TestForm(Form):
hidden = CharField(widget=HiddenInput(), required=False)
visible = CharField()
def clean_hidden(self):
raise ValidationError('Foo & "bar"!')
clean_visible = clean_hidden
form = TestForm({"hidden": "a", "visible": "b"})
form.is_valid()
self.assertHTMLEqual(
form.as_ul(),
'<li><ul class="errorlist nonfield">'
"<li>(Hidden field hidden) Foo & "bar"!</li></ul></li>"
'<li><ul class="errorlist"><li>Foo & "bar"!</li></ul>'
'<label for="id_visible">Visible:</label> '
'<input type="text" name="visible" value="b" id="id_visible" required>'
'<input type="hidden" name="hidden" value="a" id="id_hidden"></li>',
)
def test_baseform_repr(self):
"""
BaseForm.__repr__() should contain some basic information about the
form.
"""
p = Person()
self.assertEqual(
repr(p),
"<Person bound=False, valid=Unknown, "
"fields=(first_name;last_name;birthday)>",
)
p = Person(
{"first_name": "John", "last_name": "Lennon", "birthday": "1940-10-9"}
)
self.assertEqual(
repr(p),
"<Person bound=True, valid=Unknown, "
"fields=(first_name;last_name;birthday)>",
)
p.is_valid()
self.assertEqual(
repr(p),
"<Person bound=True, valid=True, fields=(first_name;last_name;birthday)>",
)
p = Person(
{"first_name": "John", "last_name": "Lennon", "birthday": "fakedate"}
)
p.is_valid()
self.assertEqual(
repr(p),
"<Person bound=True, valid=False, fields=(first_name;last_name;birthday)>",
)
def test_baseform_repr_dont_trigger_validation(self):
"""
BaseForm.__repr__() shouldn't trigger the form validation.
"""
p = Person(
{"first_name": "John", "last_name": "Lennon", "birthday": "fakedate"}
)
repr(p)
with self.assertRaises(AttributeError):
p.cleaned_data
self.assertFalse(p.is_valid())
self.assertEqual(p.cleaned_data, {"first_name": "John", "last_name": "Lennon"})
def test_accessing_clean(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
data = self.cleaned_data
if not self.errors:
data["username"] = data["username"].lower()
return data
f = UserForm({"username": "SirRobin", "password": "blue"})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data["username"], "sirrobin")
def test_changing_cleaned_data_nothing_returned(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
self.cleaned_data["username"] = self.cleaned_data["username"].lower()
# don't return anything
f = UserForm({"username": "SirRobin", "password": "blue"})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data["username"], "sirrobin")
def test_changing_cleaned_data_in_clean(self):
class UserForm(Form):
username = CharField(max_length=10)
password = CharField(widget=PasswordInput)
def clean(self):
data = self.cleaned_data
# Return a different dict. We have not changed self.cleaned_data.
return {
"username": data["username"].lower(),
"password": "this_is_not_a_secret",
}
f = UserForm({"username": "SirRobin", "password": "blue"})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data["username"], "sirrobin")
def test_multipart_encoded_form(self):
class FormWithoutFile(Form):
username = CharField()
class FormWithFile(Form):
username = CharField()
file = FileField()
class FormWithImage(Form):
image = ImageField()
self.assertFalse(FormWithoutFile().is_multipart())
self.assertTrue(FormWithFile().is_multipart())
self.assertTrue(FormWithImage().is_multipart())
def test_html_safe(self):
class SimpleForm(Form):
username = CharField()
form = SimpleForm()
self.assertTrue(hasattr(SimpleForm, "__html__"))
self.assertEqual(str(form), form.__html__())
self.assertTrue(hasattr(form["username"], "__html__"))
self.assertEqual(str(form["username"]), form["username"].__html__())
def test_use_required_attribute_true(self):
class MyForm(Form):
use_required_attribute = True
f1 = CharField(max_length=30)
f2 = CharField(max_length=30, required=False)
f3 = CharField(widget=Textarea)
f4 = ChoiceField(choices=[("P", "Python"), ("J", "Java")])
form = MyForm()
self.assertHTMLEqual(
form.as_p(),
'<p><label for="id_f1">F1:</label>'
'<input id="id_f1" maxlength="30" name="f1" type="text" required></p>'
'<p><label for="id_f2">F2:</label>'
'<input id="id_f2" maxlength="30" name="f2" type="text"></p>'
'<p><label for="id_f3">F3:</label>'
'<textarea cols="40" id="id_f3" name="f3" rows="10" required>'
"</textarea></p>"
'<p><label for="id_f4">F4:</label> <select id="id_f4" name="f4">'
'<option value="P">Python</option>'
'<option value="J">Java</option>'
"</select></p>",
)
self.assertHTMLEqual(
form.as_ul(),
'<li><label for="id_f1">F1:</label> '
'<input id="id_f1" maxlength="30" name="f1" type="text" required></li>'
'<li><label for="id_f2">F2:</label>'
'<input id="id_f2" maxlength="30" name="f2" type="text"></li>'
'<li><label for="id_f3">F3:</label>'
'<textarea cols="40" id="id_f3" name="f3" rows="10" required>'
"</textarea></li>"
'<li><label for="id_f4">F4:</label> <select id="id_f4" name="f4">'
'<option value="P">Python</option>'
'<option value="J">Java</option>'
"</select></li>",
)
self.assertHTMLEqual(
form.as_table(),
'<tr><th><label for="id_f1">F1:</label></th>'
'<td><input id="id_f1" maxlength="30" name="f1" type="text" required>'
"</td></tr>"
'<tr><th><label for="id_f2">F2:</label></th>'
'<td><input id="id_f2" maxlength="30" name="f2" type="text"></td></tr>'
'<tr><th><label for="id_f3">F3:</label></th>'
'<td><textarea cols="40" id="id_f3" name="f3" rows="10" required>'
"</textarea></td></tr>"
'<tr><th><label for="id_f4">F4:</label></th><td>'
'<select id="id_f4" name="f4">'
'<option value="P">Python</option>'
'<option value="J">Java</option>'
"</select></td></tr>",
)
self.assertHTMLEqual(
form.render(form.template_name_div),
'<div><label for="id_f1">F1:</label><input id="id_f1" maxlength="30" '
'name="f1" type="text" required></div><div><label for="id_f2">F2:</label>'
'<input id="id_f2" maxlength="30" name="f2" type="text"></div><div><label '
'for="id_f3">F3:</label><textarea cols="40" id="id_f3" name="f3" '
'rows="10" required></textarea></div><div><label for="id_f4">F4:</label>'
'<select id="id_f4" name="f4"><option value="P">Python</option>'
'<option value="J">Java</option></select></div>',
)
def test_use_required_attribute_false(self):
class MyForm(Form):
use_required_attribute = False
f1 = CharField(max_length=30)
f2 = CharField(max_length=30, required=False)
f3 = CharField(widget=Textarea)
f4 = ChoiceField(choices=[("P", "Python"), ("J", "Java")])
form = MyForm()
self.assertHTMLEqual(
form.as_p(),
'<p><label for="id_f1">F1:</label>'
'<input id="id_f1" maxlength="30" name="f1" type="text"></p>'
'<p><label for="id_f2">F2:</label>'
'<input id="id_f2" maxlength="30" name="f2" type="text"></p>'
'<p><label for="id_f3">F3:</label>'
'<textarea cols="40" id="id_f3" name="f3" rows="10"></textarea></p>'
'<p><label for="id_f4">F4:</label> <select id="id_f4" name="f4">'
'<option value="P">Python</option>'
'<option value="J">Java</option>'
"</select></p>",
)
self.assertHTMLEqual(
form.as_ul(),
'<li><label for="id_f1">F1:</label>'
'<input id="id_f1" maxlength="30" name="f1" type="text"></li>'
'<li><label for="id_f2">F2:</label>'
'<input id="id_f2" maxlength="30" name="f2" type="text"></li>'
'<li><label for="id_f3">F3:</label>'
'<textarea cols="40" id="id_f3" name="f3" rows="10"></textarea></li>'
'<li><label for="id_f4">F4:</label> <select id="id_f4" name="f4">'
'<option value="P">Python</option>'
'<option value="J">Java</option>'
"</select></li>",
)
self.assertHTMLEqual(
form.as_table(),
'<tr><th><label for="id_f1">F1:</label></th>'
'<td><input id="id_f1" maxlength="30" name="f1" type="text"></td></tr>'
'<tr><th><label for="id_f2">F2:</label></th>'
'<td><input id="id_f2" maxlength="30" name="f2" type="text"></td></tr>'
'<tr><th><label for="id_f3">F3:</label></th><td>'
'<textarea cols="40" id="id_f3" name="f3" rows="10">'
"</textarea></td></tr>"
'<tr><th><label for="id_f4">F4:</label></th><td>'
'<select id="id_f4" name="f4">'
'<option value="P">Python</option>'
'<option value="J">Java</option>'
"</select></td></tr>",
)
self.assertHTMLEqual(
form.render(form.template_name_div),
'<div><label for="id_f1">F1:</label> <input id="id_f1" maxlength="30" '
'name="f1" type="text"></div><div><label for="id_f2">F2:</label>'
'<input id="id_f2" maxlength="30" name="f2" type="text"></div><div>'
'<label for="id_f3">F3:</label> <textarea cols="40" id="id_f3" name="f3" '
'rows="10"></textarea></div><div><label for="id_f4">F4:</label>'
'<select id="id_f4" name="f4"><option value="P">Python</option>'
'<option value="J">Java</option></select></div>',
)
def test_only_hidden_fields(self):
# A form with *only* hidden fields that has errors is going to be very unusual.
class HiddenForm(Form):
data = IntegerField(widget=HiddenInput)
f = HiddenForm({})
self.assertHTMLEqual(
f.as_p(),
'<ul class="errorlist nonfield">'
"<li>(Hidden field data) This field is required.</li></ul>\n<p> "
'<input type="hidden" name="data" id="id_data"></p>',
)
self.assertHTMLEqual(
f.as_table(),
'<tr><td colspan="2"><ul class="errorlist nonfield">'
"<li>(Hidden field data) This field is required.</li></ul>"
'<input type="hidden" name="data" id="id_data"></td></tr>',
)
def test_field_named_data(self):
class DataForm(Form):
data = CharField(max_length=10)
f = DataForm({"data": "xyzzy"})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data, {"data": "xyzzy"})
def test_empty_data_files_multi_value_dict(self):
p = Person()
self.assertIsInstance(p.data, MultiValueDict)
self.assertIsInstance(p.files, MultiValueDict)
def test_field_deep_copy_error_messages(self):
class CustomCharField(CharField):
def __init__(self, **kwargs):
kwargs["error_messages"] = {"invalid": "Form custom error message."}
super().__init__(**kwargs)
field = CustomCharField()
field_copy = copy.deepcopy(field)
self.assertIsInstance(field_copy, CustomCharField)
self.assertIsNot(field_copy.error_messages, field.error_messages)
def test_label_does_not_include_new_line(self):
form = Person()
field = form["first_name"]
self.assertEqual(
field.label_tag(), '<label for="id_first_name">First name:</label>'
)
self.assertEqual(
field.legend_tag(),
'<legend for="id_first_name">First name:</legend>',
)
@override_settings(USE_THOUSAND_SEPARATOR=True)
def test_label_attrs_not_localized(self):
form = Person()
field = form["first_name"]
self.assertHTMLEqual(
field.label_tag(attrs={"number": 9999}),
'<label number="9999" for="id_first_name">First name:</label>',
)
self.assertHTMLEqual(
field.legend_tag(attrs={"number": 9999}),
'<legend number="9999" for="id_first_name">First name:</legend>',
)
def test_remove_cached_field(self):
class TestForm(Form):
name = CharField(max_length=10)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Populate fields cache.
[field for field in self]
# Removed cached field.
del self.fields["name"]
f = TestForm({"name": "abcde"})
with self.assertRaises(KeyError):
f["name"]
@jinja2_tests
class Jinja2FormsTestCase(FormsTestCase):
pass
class CustomRenderer(DjangoTemplates):
form_template_name = "forms_tests/form_snippet.html"
class RendererTests(SimpleTestCase):
def test_default(self):
form = Form()
self.assertEqual(form.renderer, get_default_renderer())
def test_kwarg_instance(self):
custom = CustomRenderer()
form = Form(renderer=custom)
self.assertEqual(form.renderer, custom)
def test_kwarg_class(self):
custom = CustomRenderer()
form = Form(renderer=custom)
self.assertEqual(form.renderer, custom)
def test_attribute_instance(self):
class CustomForm(Form):
default_renderer = DjangoTemplates()
form = CustomForm()
self.assertEqual(form.renderer, CustomForm.default_renderer)
def test_attribute_class(self):
class CustomForm(Form):
default_renderer = CustomRenderer
form = CustomForm()
self.assertIsInstance(form.renderer, CustomForm.default_renderer)
def test_attribute_override(self):
class CustomForm(Form):
default_renderer = DjangoTemplates()
custom = CustomRenderer()
form = CustomForm(renderer=custom)
self.assertEqual(form.renderer, custom)
class TemplateTests(SimpleTestCase):
def test_iterate_radios(self):
f = FrameworkForm(auto_id="id_%s")
t = Template(
"{% for radio in form.language %}"
'<div class="myradio">{{ radio }}</div>'
"{% endfor %}"
)
self.assertHTMLEqual(
t.render(Context({"form": f})),
'<div class="myradio"><label for="id_language_0">'
'<input id="id_language_0" name="language" type="radio" value="P" '
"required> Python</label></div>"
'<div class="myradio"><label for="id_language_1">'
'<input id="id_language_1" name="language" type="radio" value="J" '
"required> Java</label></div>",
)
def test_iterate_checkboxes(self):
f = SongForm({"composers": ["J", "P"]}, auto_id=False)
t = Template(
"{% for checkbox in form.composers %}"
'<div class="mycheckbox">{{ checkbox }}</div>'
"{% endfor %}"
)
self.assertHTMLEqual(
t.render(Context({"form": f})),
'<div class="mycheckbox"><label>'
'<input checked name="composers" type="checkbox" value="J"> '
"John Lennon</label></div>"
'<div class="mycheckbox"><label>'
'<input checked name="composers" type="checkbox" value="P"> '
"Paul McCartney</label></div>",
)
def test_templates_with_forms(self):
class UserRegistration(Form):
username = CharField(
max_length=10,
help_text=("Good luck picking a username that doesn't already exist."),
)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
if (
self.cleaned_data.get("password1")
and self.cleaned_data.get("password2")
and self.cleaned_data["password1"] != self.cleaned_data["password2"]
):
raise ValidationError("Please make sure your passwords match.")
return self.cleaned_data
# There is full flexibility in displaying form fields in a template.
# Just pass a Form instance to the template, and use "dot" access to
# refer to individual fields. However, this flexibility comes with the
# responsibility of displaying all the errors, including any that might
# not be associated with a particular field.
t = Template(
"<form>"
"{{ form.username.errors.as_ul }}"
"<p><label>Your username: {{ form.username }}</label></p>"
"{{ form.password1.errors.as_ul }}"
"<p><label>Password: {{ form.password1 }}</label></p>"
"{{ form.password2.errors.as_ul }}"
"<p><label>Password (again): {{ form.password2 }}</label></p>"
'<input type="submit" required>'
"</form>"
)
f = UserRegistration(auto_id=False)
self.assertHTMLEqual(
t.render(Context({"form": f})),
"<form>"
"<p><label>Your username: "
'<input type="text" name="username" maxlength="10" required></label></p>'
"<p><label>Password: "
'<input type="password" name="password1" required></label></p>'
"<p><label>Password (again): "
'<input type="password" name="password2" required></label></p>'
'<input type="submit" required>'
"</form>",
)
f = UserRegistration({"username": "django"}, auto_id=False)
self.assertHTMLEqual(
t.render(Context({"form": f})),
"<form>"
"<p><label>Your username: "
'<input type="text" name="username" value="django" maxlength="10" required>'
"</label></p>"
'<ul class="errorlist"><li>This field is required.</li></ul><p>'
"<label>Password: "
'<input type="password" name="password1" required></label></p>'
'<ul class="errorlist"><li>This field is required.</li></ul>'
"<p><label>Password (again): "
'<input type="password" name="password2" required></label></p>'
'<input type="submit" required>'
"</form>",
)
# Use form.[field].label to output a field's label. 'label' for a field
# can by specified by using the 'label' argument to a Field class. If
# 'label' is not specified, Django will use the field name with
# underscores converted to spaces, and the initial letter capitalized.
t = Template(
"<form>"
"<p><label>{{ form.username.label }}: {{ form.username }}</label></p>"
"<p><label>{{ form.password1.label }}: {{ form.password1 }}</label></p>"
"<p><label>{{ form.password2.label }}: {{ form.password2 }}</label></p>"
'<input type="submit" required>'
"</form>"
)
f = UserRegistration(auto_id=False)
self.assertHTMLEqual(
t.render(Context({"form": f})),
"<form>"
"<p><label>Username: "
'<input type="text" name="username" maxlength="10" required></label></p>'
"<p><label>Password1: "
'<input type="password" name="password1" required></label></p>'
"<p><label>Password2: "
'<input type="password" name="password2" required></label></p>'
'<input type="submit" required>'
"</form>",
)
# Use form.[field].label_tag to output a field's label with a <label>
# tag wrapped around it, but *only* if the given field has an "id"
# attribute. Recall from above that passing the "auto_id" argument to a
# Form gives each field an "id" attribute.
t = Template(
"<form>"
"<p>{{ form.username.label_tag }} {{ form.username }}</p>"
"<p>{{ form.password1.label_tag }} {{ form.password1 }}</p>"
"<p>{{ form.password2.label_tag }} {{ form.password2 }}</p>"
'<input type="submit" required>'
"</form>"
)
self.assertHTMLEqual(
t.render(Context({"form": f})),
"<form>"
"<p>Username: "
'<input type="text" name="username" maxlength="10" required></p>'
'<p>Password1: <input type="password" name="password1" required></p>'
'<p>Password2: <input type="password" name="password2" required></p>'
'<input type="submit" required>'
"</form>",
)
f = UserRegistration(auto_id="id_%s")
self.assertHTMLEqual(
t.render(Context({"form": f})),
"<form>"
'<p><label for="id_username">Username:</label>'
'<input id="id_username" type="text" name="username" maxlength="10" '
"required></p>"
'<p><label for="id_password1">Password1:</label>'
'<input type="password" name="password1" id="id_password1" required></p>'
'<p><label for="id_password2">Password2:</label>'
'<input type="password" name="password2" id="id_password2" required></p>'
'<input type="submit" required>'
"</form>",
)
# Use form.[field].legend_tag to output a field's label with a <legend>
# tag wrapped around it, but *only* if the given field has an "id"
# attribute. Recall from above that passing the "auto_id" argument to a
# Form gives each field an "id" attribute.
t = Template(
"<form>"
"<p>{{ form.username.legend_tag }} {{ form.username }}</p>"
"<p>{{ form.password1.legend_tag }} {{ form.password1 }}</p>"
"<p>{{ form.password2.legend_tag }} {{ form.password2 }}</p>"
'<input type="submit" required>'
"</form>"
)
f = UserRegistration(auto_id=False)
self.assertHTMLEqual(
t.render(Context({"form": f})),
"<form>"
"<p>Username: "
'<input type="text" name="username" maxlength="10" required></p>'
'<p>Password1: <input type="password" name="password1" required></p>'
'<p>Password2: <input type="password" name="password2" required></p>'
'<input type="submit" required>'
"</form>",
)
f = UserRegistration(auto_id="id_%s")
self.assertHTMLEqual(
t.render(Context({"form": f})),
"<form>"
'<p><legend for="id_username">Username:</legend>'
'<input id="id_username" type="text" name="username" maxlength="10" '
"required></p>"
'<p><legend for="id_password1">Password1:</legend>'
'<input type="password" name="password1" id="id_password1" required></p>'
'<p><legend for="id_password2">Password2:</legend>'
'<input type="password" name="password2" id="id_password2" required></p>'
'<input type="submit" required>'
"</form>",
)
# Use form.[field].help_text to output a field's help text. If the
# given field does not have help text, nothing will be output.
t = Template(
"<form>"
"<p>{{ form.username.label_tag }} {{ form.username }}<br>"
"{{ form.username.help_text }}</p>"
"<p>{{ form.password1.label_tag }} {{ form.password1 }}</p>"
"<p>{{ form.password2.label_tag }} {{ form.password2 }}</p>"
'<input type="submit" required>'
"</form>"
)
f = UserRegistration(auto_id=False)
self.assertHTMLEqual(
t.render(Context({"form": f})),
"<form>"
"<p>Username: "
'<input type="text" name="username" maxlength="10" required><br>'
"Good luck picking a username that doesn't already exist.</p>"
'<p>Password1: <input type="password" name="password1" required></p>'
'<p>Password2: <input type="password" name="password2" required></p>'
'<input type="submit" required>'
"</form>",
)
self.assertEqual(
Template("{{ form.password1.help_text }}").render(Context({"form": f})),
"",
)
# To display the errors that aren't associated with a particular field
# e.g. the errors caused by Form.clean() -- use
# {{ form.non_field_errors }} in the template. If used on its own, it
# is displayed as a <ul> (or an empty string, if the list of errors is
# empty).
t = Template(
"<form>"
"{{ form.username.errors.as_ul }}"
"<p><label>Your username: {{ form.username }}</label></p>"
"{{ form.password1.errors.as_ul }}"
"<p><label>Password: {{ form.password1 }}</label></p>"
"{{ form.password2.errors.as_ul }}"
"<p><label>Password (again): {{ form.password2 }}</label></p>"
'<input type="submit" required>'
"</form>"
)
f = UserRegistration(
{"username": "django", "password1": "foo", "password2": "bar"},
auto_id=False,
)
self.assertHTMLEqual(
t.render(Context({"form": f})),
"<form>"
"<p><label>Your username: "
'<input type="text" name="username" value="django" maxlength="10" required>'
"</label></p>"
"<p><label>Password: "
'<input type="password" name="password1" required></label></p>'
"<p><label>Password (again): "
'<input type="password" name="password2" required></label></p>'
'<input type="submit" required>'
"</form>",
)
t = Template(
"<form>"
"{{ form.non_field_errors }}"
"{{ form.username.errors.as_ul }}"
"<p><label>Your username: {{ form.username }}</label></p>"
"{{ form.password1.errors.as_ul }}"
"<p><label>Password: {{ form.password1 }}</label></p>"
"{{ form.password2.errors.as_ul }}"
"<p><label>Password (again): {{ form.password2 }}</label></p>"
'<input type="submit" required>'
"</form>"
)
self.assertHTMLEqual(
t.render(Context({"form": f})),
"<form>"
'<ul class="errorlist nonfield">'
"<li>Please make sure your passwords match.</li></ul>"
"<p><label>Your username: "
'<input type="text" name="username" value="django" maxlength="10" required>'
"</label></p>"
"<p><label>Password: "
'<input type="password" name="password1" required></label></p>'
"<p><label>Password (again): "
'<input type="password" name="password2" required></label></p>'
'<input type="submit" required>'
"</form>",
)
def test_basic_processing_in_view(self):
class UserRegistration(Form):
username = CharField(max_length=10)
password1 = CharField(widget=PasswordInput)
password2 = CharField(widget=PasswordInput)
def clean(self):
if (
self.cleaned_data.get("password1")
and self.cleaned_data.get("password2")
and self.cleaned_data["password1"] != self.cleaned_data["password2"]
):
raise ValidationError("Please make sure your passwords match.")
return self.cleaned_data
def my_function(method, post_data):
if method == "POST":
form = UserRegistration(post_data, auto_id=False)
else:
form = UserRegistration(auto_id=False)
if form.is_valid():
return "VALID: %r" % sorted(form.cleaned_data.items())
t = Template(
'<form method="post">'
"{{ form }}"
'<input type="submit" required>'
"</form>"
)
return t.render(Context({"form": form}))
# GET with an empty form and no errors.
self.assertHTMLEqual(
my_function("GET", {}),
'<form method="post">'
"<div>Username:"
'<input type="text" name="username" maxlength="10" required></div>'
"<div>Password1:"
'<input type="password" name="password1" required></div>'
"<div>Password2:"
'<input type="password" name="password2" required></div>'
'<input type="submit" required>'
"</form>",
)
# POST with erroneous data, a redisplayed form, with errors.
self.assertHTMLEqual(
my_function(
"POST",
{
"username": "this-is-a-long-username",
"password1": "foo",
"password2": "bar",
},
),
'<form method="post">'
'<ul class="errorlist nonfield">'
"<li>Please make sure your passwords match.</li></ul>"
'<div>Username:<ul class="errorlist">'
"<li>Ensure this value has at most 10 characters (it has 23).</li></ul>"
'<input type="text" name="username" '
'value="this-is-a-long-username" maxlength="10" required></div>'
"<div>Password1:"
'<input type="password" name="password1" required></div>'
"<div>Password2:"
'<input type="password" name="password2" required></div>'
'<input type="submit" required>'
"</form>",
)
# POST with valid data (the success message).
self.assertEqual(
my_function(
"POST",
{
"username": "adrian",
"password1": "secret",
"password2": "secret",
},
),
"VALID: [('password1', 'secret'), ('password2', 'secret'), "
"('username', 'adrian')]",
)
class OverrideTests(SimpleTestCase):
@override_settings(FORM_RENDERER="forms_tests.tests.test_forms.CustomRenderer")
def test_custom_renderer_template_name(self):
class Person(Form):
first_name = CharField()
get_default_renderer.cache_clear()
t = Template("{{ form }}")
html = t.render(Context({"form": Person()}))
expected = """
<div class="fieldWrapper"><label for="id_first_name">First name:</label>
<input type="text" name="first_name" required id="id_first_name"></div>
"""
self.assertHTMLEqual(html, expected)
get_default_renderer.cache_clear()
def test_per_form_template_name(self):
class Person(Form):
first_name = CharField()
template_name = "forms_tests/form_snippet.html"
t = Template("{{ form }}")
html = t.render(Context({"form": Person()}))
expected = """
<div class="fieldWrapper"><label for="id_first_name">First name:</label>
<input type="text" name="first_name" required id="id_first_name"></div>
"""
self.assertHTMLEqual(html, expected)
def test_errorlist_override(self):
class CustomErrorList(ErrorList):
template_name = "forms_tests/error.html"
class CommentForm(Form):
name = CharField(max_length=50, required=False)
email = EmailField()
comment = CharField()
data = {"email": "invalid"}
f = CommentForm(data, auto_id=False, error_class=CustomErrorList)
self.assertHTMLEqual(
f.as_p(),
'<p>Name: <input type="text" name="name" maxlength="50"></p>'
'<div class="errorlist">'
'<div class="error">Enter a valid email address.</div></div>'
'<p>Email: <input type="email" name="email" value="invalid" required></p>'
'<div class="errorlist">'
'<div class="error">This field is required.</div></div>'
'<p>Comment: <input type="text" name="comment" required></p>',
)
def test_cyclic_context_boundfield_render(self):
class FirstNameForm(Form):
first_name = CharField()
template_name_label = "forms_tests/cyclic_context_boundfield_render.html"
f = FirstNameForm()
try:
f.render()
except RecursionError:
self.fail("Cyclic reference in BoundField.render().")
def test_legend_tag(self):
class CustomFrameworkForm(FrameworkForm):
template_name = "forms_tests/legend_test.html"
required_css_class = "required"
f = CustomFrameworkForm()
self.assertHTMLEqual(
str(f),
'<label for="id_name" class="required">Name:</label>'
'<legend class="required">Language:</legend>',
)
class DeprecationTests(SimpleTestCase):
def test_warning(self):
from django.forms.utils import DEFAULT_TEMPLATE_DEPRECATION_MSG
with isolate_lru_cache(get_default_renderer), self.settings(
FORM_RENDERER="django.forms.renderers.DjangoTemplates"
), self.assertRaisesMessage(
RemovedInDjango50Warning, DEFAULT_TEMPLATE_DEPRECATION_MSG
):
form = Person()
str(form)
|
bfa3d665d2ffe1e8cf0f51ffa4efc5224a8cadcafa662350e40e9c71c9ac6b9b | import mimetypes
import unittest
from os import path
from urllib.parse import quote
from django.conf.urls.static import static
from django.core.exceptions import ImproperlyConfigured
from django.http import FileResponse, HttpResponseNotModified
from django.test import SimpleTestCase, override_settings
from django.utils.http import http_date
from django.views.static import was_modified_since
from .. import urls
from ..urls import media_dir
@override_settings(DEBUG=True, ROOT_URLCONF="view_tests.urls")
class StaticTests(SimpleTestCase):
"""Tests django views in django/views/static.py"""
prefix = "site_media"
def test_serve(self):
"The static view can serve static media"
media_files = ["file.txt", "file.txt.gz", "%2F.txt"]
for filename in media_files:
response = self.client.get("/%s/%s" % (self.prefix, quote(filename)))
response_content = b"".join(response)
file_path = path.join(media_dir, filename)
with open(file_path, "rb") as fp:
self.assertEqual(fp.read(), response_content)
self.assertEqual(
len(response_content), int(response.headers["Content-Length"])
)
self.assertEqual(
mimetypes.guess_type(file_path)[1],
response.get("Content-Encoding", None),
)
def test_chunked(self):
"The static view should stream files in chunks to avoid large memory usage"
response = self.client.get("/%s/%s" % (self.prefix, "long-line.txt"))
first_chunk = next(response.streaming_content)
self.assertEqual(len(first_chunk), FileResponse.block_size)
second_chunk = next(response.streaming_content)
response.close()
# strip() to prevent OS line endings from causing differences
self.assertEqual(len(second_chunk.strip()), 1449)
def test_unknown_mime_type(self):
response = self.client.get("/%s/file.unknown" % self.prefix)
self.assertEqual("application/octet-stream", response.headers["Content-Type"])
response.close()
def test_copes_with_empty_path_component(self):
file_name = "file.txt"
response = self.client.get("/%s//%s" % (self.prefix, file_name))
response_content = b"".join(response)
with open(path.join(media_dir, file_name), "rb") as fp:
self.assertEqual(fp.read(), response_content)
def test_is_modified_since(self):
file_name = "file.txt"
response = self.client.get(
"/%s/%s" % (self.prefix, file_name),
HTTP_IF_MODIFIED_SINCE="Thu, 1 Jan 1970 00:00:00 GMT",
)
response_content = b"".join(response)
with open(path.join(media_dir, file_name), "rb") as fp:
self.assertEqual(fp.read(), response_content)
def test_not_modified_since(self):
file_name = "file.txt"
response = self.client.get(
"/%s/%s" % (self.prefix, file_name),
HTTP_IF_MODIFIED_SINCE="Mon, 18 Jan 2038 05:14:07 GMT"
# This is 24h before max Unix time. Remember to fix Django and
# update this test well before 2038 :)
)
self.assertIsInstance(response, HttpResponseNotModified)
def test_invalid_if_modified_since(self):
"""Handle bogus If-Modified-Since values gracefully
Assume that a file is modified since an invalid timestamp as per RFC
9110 Section 13.1.3.
"""
file_name = "file.txt"
invalid_date = "Mon, 28 May 999999999999 28:25:26 GMT"
response = self.client.get(
"/%s/%s" % (self.prefix, file_name), HTTP_IF_MODIFIED_SINCE=invalid_date
)
response_content = b"".join(response)
with open(path.join(media_dir, file_name), "rb") as fp:
self.assertEqual(fp.read(), response_content)
self.assertEqual(len(response_content), int(response.headers["Content-Length"]))
def test_invalid_if_modified_since2(self):
"""Handle even more bogus If-Modified-Since values gracefully
Assume that a file is modified since an invalid timestamp as per RFC
9110 Section 13.1.3.
"""
file_name = "file.txt"
invalid_date = ": 1291108438, Wed, 20 Oct 2010 14:05:00 GMT"
response = self.client.get(
"/%s/%s" % (self.prefix, file_name), HTTP_IF_MODIFIED_SINCE=invalid_date
)
response_content = b"".join(response)
with open(path.join(media_dir, file_name), "rb") as fp:
self.assertEqual(fp.read(), response_content)
self.assertEqual(len(response_content), int(response.headers["Content-Length"]))
def test_404(self):
response = self.client.get("/%s/nonexistent_resource" % self.prefix)
self.assertEqual(404, response.status_code)
def test_index(self):
response = self.client.get("/%s/" % self.prefix)
self.assertContains(response, "Index of ./")
# Directories have a trailing slash.
self.assertIn("subdir/", response.context["file_list"])
def test_index_subdir(self):
response = self.client.get("/%s/subdir/" % self.prefix)
self.assertContains(response, "Index of subdir/")
# File with a leading dot (e.g. .hidden) aren't displayed.
self.assertEqual(response.context["file_list"], ["visible"])
@override_settings(
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"OPTIONS": {
"loaders": [
(
"django.template.loaders.locmem.Loader",
{
"static/directory_index.html": "Test index",
},
),
],
},
}
]
)
def test_index_custom_template(self):
response = self.client.get("/%s/" % self.prefix)
self.assertEqual(response.content, b"Test index")
class StaticHelperTest(StaticTests):
"""
Test case to make sure the static URL pattern helper works as expected
"""
def setUp(self):
super().setUp()
self._old_views_urlpatterns = urls.urlpatterns[:]
urls.urlpatterns += static("media/", document_root=media_dir)
def tearDown(self):
super().tearDown()
urls.urlpatterns = self._old_views_urlpatterns
def test_prefix(self):
self.assertEqual(static("test")[0].pattern.regex.pattern, "^test(?P<path>.*)$")
@override_settings(DEBUG=False)
def test_debug_off(self):
"""No URLs are served if DEBUG=False."""
self.assertEqual(static("test"), [])
def test_empty_prefix(self):
with self.assertRaisesMessage(
ImproperlyConfigured, "Empty static prefix not permitted"
):
static("")
def test_special_prefix(self):
"""No URLs are served if prefix contains a netloc part."""
self.assertEqual(static("http://example.org"), [])
self.assertEqual(static("//example.org"), [])
class StaticUtilsTests(unittest.TestCase):
def test_was_modified_since_fp(self):
"""
A floating point mtime does not disturb was_modified_since (#18675).
"""
mtime = 1343416141.107817
header = http_date(mtime)
self.assertFalse(was_modified_since(header, mtime))
def test_was_modified_since_empty_string(self):
self.assertTrue(was_modified_since(header="", mtime=1))
|
013dd8eb60bd1c9b4263e244d99410674c2b648de376b9554b8b7db8b910e4ee | from unittest.mock import MagicMock, patch
from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction
from django.db.backends.base.base import BaseDatabaseWrapper
from django.test import (
SimpleTestCase,
TestCase,
TransactionTestCase,
skipUnlessDBFeature,
)
from django.test.utils import CaptureQueriesContext, override_settings
from ..models import Person, Square
class DatabaseWrapperTests(SimpleTestCase):
def test_repr(self):
conn = connections[DEFAULT_DB_ALIAS]
self.assertEqual(
repr(conn),
f"<DatabaseWrapper vendor={connection.vendor!r} alias='default'>",
)
def test_initialization_class_attributes(self):
"""
The "initialization" class attributes like client_class and
creation_class should be set on the class and reflected in the
corresponding instance attributes of the instantiated backend.
"""
conn = connections[DEFAULT_DB_ALIAS]
conn_class = type(conn)
attr_names = [
("client_class", "client"),
("creation_class", "creation"),
("features_class", "features"),
("introspection_class", "introspection"),
("ops_class", "ops"),
("validation_class", "validation"),
]
for class_attr_name, instance_attr_name in attr_names:
class_attr_value = getattr(conn_class, class_attr_name)
self.assertIsNotNone(class_attr_value)
instance_attr_value = getattr(conn, instance_attr_name)
self.assertIsInstance(instance_attr_value, class_attr_value)
def test_initialization_display_name(self):
self.assertEqual(BaseDatabaseWrapper.display_name, "unknown")
self.assertNotEqual(connection.display_name, "unknown")
def test_get_database_version(self):
with patch.object(BaseDatabaseWrapper, "__init__", return_value=None):
msg = (
"subclasses of BaseDatabaseWrapper may require a "
"get_database_version() method."
)
with self.assertRaisesMessage(NotImplementedError, msg):
BaseDatabaseWrapper().get_database_version()
def test_check_database_version_supported_with_none_as_database_version(self):
with patch.object(connection.features, "minimum_database_version", None):
connection.check_database_version_supported()
class DatabaseWrapperLoggingTests(TransactionTestCase):
available_apps = []
@override_settings(DEBUG=True)
def test_commit_debug_log(self):
conn = connections[DEFAULT_DB_ALIAS]
with CaptureQueriesContext(conn):
with self.assertLogs("django.db.backends", "DEBUG") as cm:
with transaction.atomic():
Person.objects.create(first_name="first", last_name="last")
self.assertGreaterEqual(len(conn.queries_log), 3)
self.assertEqual(conn.queries_log[-3]["sql"], "BEGIN")
self.assertRegex(
cm.output[0],
r"DEBUG:django.db.backends:\(\d+.\d{3}\) "
rf"BEGIN; args=None; alias={DEFAULT_DB_ALIAS}",
)
self.assertEqual(conn.queries_log[-1]["sql"], "COMMIT")
self.assertRegex(
cm.output[-1],
r"DEBUG:django.db.backends:\(\d+.\d{3}\) "
rf"COMMIT; args=None; alias={DEFAULT_DB_ALIAS}",
)
@override_settings(DEBUG=True)
def test_rollback_debug_log(self):
conn = connections[DEFAULT_DB_ALIAS]
with CaptureQueriesContext(conn):
with self.assertLogs("django.db.backends", "DEBUG") as cm:
with self.assertRaises(Exception), transaction.atomic():
Person.objects.create(first_name="first", last_name="last")
raise Exception("Force rollback")
self.assertEqual(conn.queries_log[-1]["sql"], "ROLLBACK")
self.assertRegex(
cm.output[-1],
r"DEBUG:django.db.backends:\(\d+.\d{3}\) "
rf"ROLLBACK; args=None; alias={DEFAULT_DB_ALIAS}",
)
def test_no_logs_without_debug(self):
with self.assertNoLogs("django.db.backends", "DEBUG"):
with self.assertRaises(Exception), transaction.atomic():
Person.objects.create(first_name="first", last_name="last")
raise Exception("Force rollback")
conn = connections[DEFAULT_DB_ALIAS]
self.assertEqual(len(conn.queries_log), 0)
class ExecuteWrapperTests(TestCase):
@staticmethod
def call_execute(connection, params=None):
ret_val = "1" if params is None else "%s"
sql = "SELECT " + ret_val + connection.features.bare_select_suffix
with connection.cursor() as cursor:
cursor.execute(sql, params)
def call_executemany(self, connection, params=None):
# executemany() must use an update query. Make sure it does nothing
# by putting a false condition in the WHERE clause.
sql = "DELETE FROM {} WHERE 0=1 AND 0=%s".format(Square._meta.db_table)
if params is None:
params = [(i,) for i in range(3)]
with connection.cursor() as cursor:
cursor.executemany(sql, params)
@staticmethod
def mock_wrapper():
return MagicMock(side_effect=lambda execute, *args: execute(*args))
def test_wrapper_invoked(self):
wrapper = self.mock_wrapper()
with connection.execute_wrapper(wrapper):
self.call_execute(connection)
self.assertTrue(wrapper.called)
(_, sql, params, many, context), _ = wrapper.call_args
self.assertIn("SELECT", sql)
self.assertIsNone(params)
self.assertIs(many, False)
self.assertEqual(context["connection"], connection)
def test_wrapper_invoked_many(self):
wrapper = self.mock_wrapper()
with connection.execute_wrapper(wrapper):
self.call_executemany(connection)
self.assertTrue(wrapper.called)
(_, sql, param_list, many, context), _ = wrapper.call_args
self.assertIn("DELETE", sql)
self.assertIsInstance(param_list, (list, tuple))
self.assertIs(many, True)
self.assertEqual(context["connection"], connection)
def test_database_queried(self):
wrapper = self.mock_wrapper()
with connection.execute_wrapper(wrapper):
with connection.cursor() as cursor:
sql = "SELECT 17" + connection.features.bare_select_suffix
cursor.execute(sql)
seventeen = cursor.fetchall()
self.assertEqual(list(seventeen), [(17,)])
self.call_executemany(connection)
def test_nested_wrapper_invoked(self):
outer_wrapper = self.mock_wrapper()
inner_wrapper = self.mock_wrapper()
with connection.execute_wrapper(outer_wrapper), connection.execute_wrapper(
inner_wrapper
):
self.call_execute(connection)
self.assertEqual(inner_wrapper.call_count, 1)
self.call_executemany(connection)
self.assertEqual(inner_wrapper.call_count, 2)
def test_outer_wrapper_blocks(self):
def blocker(*args):
pass
wrapper = self.mock_wrapper()
c = connection # This alias shortens the next line.
with c.execute_wrapper(wrapper), c.execute_wrapper(blocker), c.execute_wrapper(
wrapper
):
with c.cursor() as cursor:
cursor.execute("The database never sees this")
self.assertEqual(wrapper.call_count, 1)
cursor.executemany("The database never sees this %s", [("either",)])
self.assertEqual(wrapper.call_count, 2)
def test_wrapper_gets_sql(self):
wrapper = self.mock_wrapper()
sql = "SELECT 'aloha'" + connection.features.bare_select_suffix
with connection.execute_wrapper(wrapper), connection.cursor() as cursor:
cursor.execute(sql)
(_, reported_sql, _, _, _), _ = wrapper.call_args
self.assertEqual(reported_sql, sql)
def test_wrapper_connection_specific(self):
wrapper = self.mock_wrapper()
with connections["other"].execute_wrapper(wrapper):
self.assertEqual(connections["other"].execute_wrappers, [wrapper])
self.call_execute(connection)
self.assertFalse(wrapper.called)
self.assertEqual(connection.execute_wrappers, [])
self.assertEqual(connections["other"].execute_wrappers, [])
class ConnectionHealthChecksTests(SimpleTestCase):
databases = {"default"}
def setUp(self):
# All test cases here need newly configured and created connections.
# Use the default db connection for convenience.
connection.close()
self.addCleanup(connection.close)
def patch_settings_dict(self, conn_health_checks):
self.settings_dict_patcher = patch.dict(
connection.settings_dict,
{
**connection.settings_dict,
"CONN_MAX_AGE": None,
"CONN_HEALTH_CHECKS": conn_health_checks,
},
)
self.settings_dict_patcher.start()
self.addCleanup(self.settings_dict_patcher.stop)
def run_query(self):
with connection.cursor() as cursor:
cursor.execute("SELECT 42" + connection.features.bare_select_suffix)
@skipUnlessDBFeature("test_db_allows_multiple_connections")
def test_health_checks_enabled(self):
self.patch_settings_dict(conn_health_checks=True)
self.assertIsNone(connection.connection)
# Newly created connections are considered healthy without performing
# the health check.
with patch.object(connection, "is_usable", side_effect=AssertionError):
self.run_query()
old_connection = connection.connection
# Simulate request_finished.
connection.close_if_unusable_or_obsolete()
self.assertIs(old_connection, connection.connection)
# Simulate connection health check failing.
with patch.object(
connection, "is_usable", return_value=False
) as mocked_is_usable:
self.run_query()
new_connection = connection.connection
# A new connection is established.
self.assertIsNot(new_connection, old_connection)
# Only one health check per "request" is performed, so the next
# query will carry on even if the health check fails. Next query
# succeeds because the real connection is healthy and only the
# health check failure is mocked.
self.run_query()
self.assertIs(new_connection, connection.connection)
self.assertEqual(mocked_is_usable.call_count, 1)
# Simulate request_finished.
connection.close_if_unusable_or_obsolete()
# The underlying connection is being reused further with health checks
# succeeding.
self.run_query()
self.run_query()
self.assertIs(new_connection, connection.connection)
@skipUnlessDBFeature("test_db_allows_multiple_connections")
def test_health_checks_enabled_errors_occurred(self):
self.patch_settings_dict(conn_health_checks=True)
self.assertIsNone(connection.connection)
# Newly created connections are considered healthy without performing
# the health check.
with patch.object(connection, "is_usable", side_effect=AssertionError):
self.run_query()
old_connection = connection.connection
# Simulate errors_occurred.
connection.errors_occurred = True
# Simulate request_started (the connection is healthy).
connection.close_if_unusable_or_obsolete()
# Persistent connections are enabled.
self.assertIs(old_connection, connection.connection)
# No additional health checks after the one in
# close_if_unusable_or_obsolete() are executed during this "request"
# when running queries.
with patch.object(connection, "is_usable", side_effect=AssertionError):
self.run_query()
@skipUnlessDBFeature("test_db_allows_multiple_connections")
def test_health_checks_disabled(self):
self.patch_settings_dict(conn_health_checks=False)
self.assertIsNone(connection.connection)
# Newly created connections are considered healthy without performing
# the health check.
with patch.object(connection, "is_usable", side_effect=AssertionError):
self.run_query()
old_connection = connection.connection
# Simulate request_finished.
connection.close_if_unusable_or_obsolete()
# Persistent connections are enabled (connection is not).
self.assertIs(old_connection, connection.connection)
# Health checks are not performed.
with patch.object(connection, "is_usable", side_effect=AssertionError):
self.run_query()
# Health check wasn't performed and the connection is unchanged.
self.assertIs(old_connection, connection.connection)
self.run_query()
# The connection is unchanged after the next query either during
# the current "request".
self.assertIs(old_connection, connection.connection)
@skipUnlessDBFeature("test_db_allows_multiple_connections")
def test_set_autocommit_health_checks_enabled(self):
self.patch_settings_dict(conn_health_checks=True)
self.assertIsNone(connection.connection)
# Newly created connections are considered healthy without performing
# the health check.
with patch.object(connection, "is_usable", side_effect=AssertionError):
# Simulate outermost atomic block: changing autocommit for
# a connection.
connection.set_autocommit(False)
self.run_query()
connection.commit()
connection.set_autocommit(True)
old_connection = connection.connection
# Simulate request_finished.
connection.close_if_unusable_or_obsolete()
# Persistent connections are enabled.
self.assertIs(old_connection, connection.connection)
# Simulate connection health check failing.
with patch.object(
connection, "is_usable", return_value=False
) as mocked_is_usable:
# Simulate outermost atomic block: changing autocommit for
# a connection.
connection.set_autocommit(False)
new_connection = connection.connection
self.assertIsNot(new_connection, old_connection)
# Only one health check per "request" is performed, so a query will
# carry on even if the health check fails. This query succeeds
# because the real connection is healthy and only the health check
# failure is mocked.
self.run_query()
connection.commit()
connection.set_autocommit(True)
# The connection is unchanged.
self.assertIs(new_connection, connection.connection)
self.assertEqual(mocked_is_usable.call_count, 1)
# Simulate request_finished.
connection.close_if_unusable_or_obsolete()
# The underlying connection is being reused further with health checks
# succeeding.
connection.set_autocommit(False)
self.run_query()
connection.commit()
connection.set_autocommit(True)
self.assertIs(new_connection, connection.connection)
class MultiDatabaseTests(TestCase):
databases = {"default", "other"}
def test_multi_database_init_connection_state_called_once(self):
for db in self.databases:
with self.subTest(database=db):
with patch.object(connections[db], "commit", return_value=None):
with patch.object(
connections[db],
"check_database_version_supported",
) as mocked_check_database_version_supported:
connections[db].init_connection_state()
after_first_calls = len(
mocked_check_database_version_supported.mock_calls
)
connections[db].init_connection_state()
self.assertEqual(
len(mocked_check_database_version_supported.mock_calls),
after_first_calls,
)
|
6858d47946dab459e0a61b64c2eaf9d33ba66efb566debd939413bed78608e9f | import asyncio
import difflib
import inspect
import json
import logging
import posixpath
import sys
import threading
import unittest
import warnings
from collections import Counter
from contextlib import contextmanager
from copy import copy, deepcopy
from difflib import get_close_matches
from functools import wraps
from unittest.suite import _DebugResult
from unittest.util import safe_repr
from urllib.parse import (
parse_qsl,
unquote,
urlencode,
urljoin,
urlparse,
urlsplit,
urlunparse,
)
from urllib.request import url2pathname
from asgiref.sync import async_to_sync
from django.apps import apps
from django.conf import settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.files import locks
from django.core.handlers.wsgi import WSGIHandler, get_path_info
from django.core.management import call_command
from django.core.management.color import no_style
from django.core.management.sql import emit_post_migrate_signal
from django.core.servers.basehttp import ThreadedWSGIServer, WSGIRequestHandler
from django.core.signals import setting_changed
from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction
from django.forms.fields import CharField
from django.http import QueryDict
from django.http.request import split_domain_port, validate_host
from django.http.response import HttpResponseBase
from django.test.client import AsyncClient, Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import template_rendered
from django.test.utils import (
CaptureQueriesContext,
ContextList,
compare_xml,
modify_settings,
override_settings,
)
from django.utils.deprecation import RemovedInDjango50Warning, RemovedInDjango51Warning
from django.utils.functional import classproperty
from django.utils.version import PY310
from django.views.static import serve
logger = logging.getLogger("django.test")
__all__ = (
"TestCase",
"TransactionTestCase",
"SimpleTestCase",
"skipIfDBFeature",
"skipUnlessDBFeature",
)
def to_list(value):
"""Put value into a list if it's not already one."""
if not isinstance(value, list):
value = [value]
return value
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = "%s\n%s" % (msg, e)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
super().__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super().__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
executed = len(self)
self.test_case.assertEqual(
executed,
self.num,
"%d queries executed, %d expected\nCaptured queries were:\n%s"
% (
executed,
self.num,
"\n".join(
"%d. %s" % (i, query["sql"])
for i, query in enumerate(self.captured_queries, start=1)
),
),
)
class _AssertTemplateUsedContext:
def __init__(self, test_case, template_name, msg_prefix="", count=None):
self.test_case = test_case
self.template_name = template_name
self.msg_prefix = msg_prefix
self.count = count
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
self.test_case._assert_template_used(
self.template_name,
self.rendered_template_names,
self.msg_prefix,
self.count,
)
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
self.test()
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
self.test_case.assertFalse(
self.template_name in self.rendered_template_names,
f"{self.msg_prefix}Template '{self.template_name}' was used "
f"unexpectedly in rendering the response",
)
class DatabaseOperationForbidden(AssertionError):
pass
class _DatabaseFailure:
def __init__(self, wrapped, message):
self.wrapped = wrapped
self.message = message
def __call__(self):
raise DatabaseOperationForbidden(self.message)
# RemovedInDjango50Warning
class _AssertFormErrorDeprecationHelper:
@staticmethod
def assertFormError(self, response, form, field, errors, msg_prefix=""):
"""
Search through all the rendered contexts of the `response` for a form named
`form` then dispatch to the new assertFormError() using that instance.
If multiple contexts contain the form, they're all checked in order and any
failure will abort (this matches the old behavior).
"""
warning_msg = (
f"Passing response to assertFormError() is deprecated. Use the form object "
f"directly: assertFormError(response.context[{form!r}], {field!r}, ...)"
)
warnings.warn(warning_msg, RemovedInDjango50Warning, stacklevel=2)
full_msg_prefix = f"{msg_prefix}: " if msg_prefix else ""
contexts = to_list(response.context) if response.context is not None else []
if not contexts:
self.fail(
f"{full_msg_prefix}Response did not use any contexts to render the "
f"response"
)
# Search all contexts for the error.
found_form = False
for i, context in enumerate(contexts):
if form not in context:
continue
found_form = True
self.assertFormError(context[form], field, errors, msg_prefix=msg_prefix)
if not found_form:
self.fail(
f"{full_msg_prefix}The form '{form}' was not used to render the "
f"response"
)
@staticmethod
def assertFormSetError(
self, response, formset, form_index, field, errors, msg_prefix=""
):
"""
Search for a formset named "formset" in the "response" and dispatch to
the new assertFormSetError() using that instance. If the name is found
in multiple contexts they're all checked in order and any failure will
abort the test.
"""
warning_msg = (
f"Passing response to assertFormSetError() is deprecated. Use the formset "
f"object directly: assertFormSetError(response.context[{formset!r}], "
f"{form_index!r}, ...)"
)
warnings.warn(warning_msg, RemovedInDjango50Warning, stacklevel=2)
full_msg_prefix = f"{msg_prefix}: " if msg_prefix else ""
contexts = to_list(response.context) if response.context is not None else []
if not contexts:
self.fail(
f"{full_msg_prefix}Response did not use any contexts to render the "
f"response"
)
found_formset = False
for i, context in enumerate(contexts):
if formset not in context or not hasattr(context[formset], "forms"):
continue
found_formset = True
self.assertFormSetError(
context[formset], form_index, field, errors, msg_prefix
)
if not found_formset:
self.fail(
f"{full_msg_prefix}The formset '{formset}' was not used to render the "
f"response"
)
@classmethod
def patch_signature(cls, new_method):
"""
Replace the decorated method with a new one that inspects the passed
args/kwargs and dispatch to the old implementation (with deprecation
warning) when it detects the old signature.
"""
@wraps(new_method)
def patched_method(self, *args, **kwargs):
old_method = getattr(cls, new_method.__name__)
old_signature = inspect.signature(old_method)
try:
old_bound_args = old_signature.bind(self, *args, **kwargs)
except TypeError:
# If old signature doesn't match then either:
# 1) new signature will match
# 2) or a TypeError will be raised showing the user information
# about the new signature.
return new_method(self, *args, **kwargs)
new_signature = inspect.signature(new_method)
try:
new_bound_args = new_signature.bind(self, *args, **kwargs)
except TypeError:
# Old signature matches but not the new one (because of
# previous try/except).
return old_method(self, *args, **kwargs)
# If both signatures match, decide on which method to call by
# inspecting the first arg (arg[0] = self).
assert old_bound_args.args[1] == new_bound_args.args[1]
if hasattr(
old_bound_args.args[1], "context"
): # Looks like a response object => old method.
return old_method(self, *args, **kwargs)
elif isinstance(old_bound_args.args[1], HttpResponseBase):
raise ValueError(
f"{old_method.__name__}() is only usable on responses fetched "
f"using the Django test Client."
)
else:
return new_method(self, *args, **kwargs)
return patched_method
class SimpleTestCase(unittest.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
async_client_class = AsyncClient
_overridden_settings = None
_modified_settings = None
databases = set()
_disallowed_database_msg = (
"Database %(operation)s to %(alias)r are not allowed in SimpleTestCase "
"subclasses. Either subclass TestCase or TransactionTestCase to ensure "
"proper test isolation or add %(alias)r to %(test)s.databases to silence "
"this failure."
)
_disallowed_connection_methods = [
("connect", "connections"),
("temporary_connection", "connections"),
("cursor", "queries"),
("chunked_cursor", "queries"),
]
@classmethod
def setUpClass(cls):
super().setUpClass()
if cls._overridden_settings:
cls._cls_overridden_context = override_settings(**cls._overridden_settings)
cls._cls_overridden_context.enable()
cls.addClassCleanup(cls._cls_overridden_context.disable)
if cls._modified_settings:
cls._cls_modified_context = modify_settings(cls._modified_settings)
cls._cls_modified_context.enable()
cls.addClassCleanup(cls._cls_modified_context.disable)
cls._add_databases_failures()
cls.addClassCleanup(cls._remove_databases_failures)
@classmethod
def _validate_databases(cls):
if cls.databases == "__all__":
return frozenset(connections)
for alias in cls.databases:
if alias not in connections:
message = (
"%s.%s.databases refers to %r which is not defined in "
"settings.DATABASES."
% (
cls.__module__,
cls.__qualname__,
alias,
)
)
close_matches = get_close_matches(alias, list(connections))
if close_matches:
message += " Did you mean %r?" % close_matches[0]
raise ImproperlyConfigured(message)
return frozenset(cls.databases)
@classmethod
def _add_databases_failures(cls):
cls.databases = cls._validate_databases()
for alias in connections:
if alias in cls.databases:
continue
connection = connections[alias]
for name, operation in cls._disallowed_connection_methods:
message = cls._disallowed_database_msg % {
"test": "%s.%s" % (cls.__module__, cls.__qualname__),
"alias": alias,
"operation": operation,
}
method = getattr(connection, name)
setattr(connection, name, _DatabaseFailure(method, message))
@classmethod
def _remove_databases_failures(cls):
for alias in connections:
if alias in cls.databases:
continue
connection = connections[alias]
for name, _ in cls._disallowed_connection_methods:
method = getattr(connection, name)
setattr(connection, name, method.wrapped)
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
self._setup_and_call(result)
def debug(self):
"""Perform the same as __call__(), without catching the exception."""
debug_result = _DebugResult()
self._setup_and_call(debug_result, debug=True)
def _setup_and_call(self, result, debug=False):
"""
Perform the following in order: pre-setup, run test, post-teardown,
skipping pre/post hooks if test is set to be skipped.
If debug=True, reraise any errors in setup and use super().debug()
instead of __call__() to run the test.
"""
testMethod = getattr(self, self._testMethodName)
skipped = getattr(self.__class__, "__unittest_skip__", False) or getattr(
testMethod, "__unittest_skip__", False
)
# Convert async test methods.
if asyncio.iscoroutinefunction(testMethod):
setattr(self, self._testMethodName, async_to_sync(testMethod))
if not skipped:
try:
self._pre_setup()
except Exception:
if debug:
raise
result.addError(self, sys.exc_info())
return
if debug:
super().debug()
else:
super().__call__(result)
if not skipped:
try:
self._post_teardown()
except Exception:
if debug:
raise
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
"""
Perform pre-test setup:
* Create a test client.
* Clear the mail test outbox.
"""
self.client = self.client_class()
self.async_client = self.async_client_class()
mail.outbox = []
def _post_teardown(self):
"""Perform post-test things."""
pass
def settings(self, **kwargs):
"""
A context manager that temporarily sets a setting and reverts to the
original value when exiting the context.
"""
return override_settings(**kwargs)
def modify_settings(self, **kwargs):
"""
A context manager that temporarily applies changes a list setting and
reverts back to the original value when exiting the context.
"""
return modify_settings(**kwargs)
def assertRedirects(
self,
response,
expected_url,
status_code=302,
target_status_code=200,
msg_prefix="",
fetch_redirect_response=True,
):
"""
Assert that a response redirected to a specific URL and that the
redirect URL can be loaded.
Won't work for external links since it uses the test client to do a
request (use fetch_redirect_response=False to check such links without
fetching them).
"""
if msg_prefix:
msg_prefix += ": "
if hasattr(response, "redirect_chain"):
# The request was a followed redirect
self.assertTrue(
response.redirect_chain,
msg_prefix
+ (
"Response didn't redirect as expected: Response code was %d "
"(expected %d)"
)
% (response.status_code, status_code),
)
self.assertEqual(
response.redirect_chain[0][1],
status_code,
msg_prefix
+ (
"Initial response didn't redirect as expected: Response code was "
"%d (expected %d)"
)
% (response.redirect_chain[0][1], status_code),
)
url, status_code = response.redirect_chain[-1]
self.assertEqual(
response.status_code,
target_status_code,
msg_prefix
+ (
"Response didn't redirect as expected: Final Response code was %d "
"(expected %d)"
)
% (response.status_code, target_status_code),
)
else:
# Not a followed redirect
self.assertEqual(
response.status_code,
status_code,
msg_prefix
+ (
"Response didn't redirect as expected: Response code was %d "
"(expected %d)"
)
% (response.status_code, status_code),
)
url = response.url
scheme, netloc, path, query, fragment = urlsplit(url)
# Prepend the request path to handle relative path redirects.
if not path.startswith("/"):
url = urljoin(response.request["PATH_INFO"], url)
path = urljoin(response.request["PATH_INFO"], path)
if fetch_redirect_response:
# netloc might be empty, or in cases where Django tests the
# HTTP scheme, the convention is for netloc to be 'testserver'.
# Trust both as "internal" URLs here.
domain, port = split_domain_port(netloc)
if domain and not validate_host(domain, settings.ALLOWED_HOSTS):
raise ValueError(
"The test client is unable to fetch remote URLs (got %s). "
"If the host is served by Django, add '%s' to ALLOWED_HOSTS. "
"Otherwise, use "
"assertRedirects(..., fetch_redirect_response=False)."
% (url, domain)
)
# Get the redirection page, using the same client that was used
# to obtain the original response.
extra = response.client.extra or {}
redirect_response = response.client.get(
path,
QueryDict(query),
secure=(scheme == "https"),
**extra,
)
self.assertEqual(
redirect_response.status_code,
target_status_code,
msg_prefix
+ (
"Couldn't retrieve redirection page '%s': response code was %d "
"(expected %d)"
)
% (path, redirect_response.status_code, target_status_code),
)
self.assertURLEqual(
url,
expected_url,
msg_prefix
+ "Response redirected to '%s', expected '%s'" % (url, expected_url),
)
def assertURLEqual(self, url1, url2, msg_prefix=""):
"""
Assert that two URLs are the same, ignoring the order of query string
parameters except for parameters with the same name.
For example, /path/?x=1&y=2 is equal to /path/?y=2&x=1, but
/path/?a=1&a=2 isn't equal to /path/?a=2&a=1.
"""
def normalize(url):
"""Sort the URL's query string parameters."""
url = str(url) # Coerce reverse_lazy() URLs.
scheme, netloc, path, params, query, fragment = urlparse(url)
query_parts = sorted(parse_qsl(query))
return urlunparse(
(scheme, netloc, path, params, urlencode(query_parts), fragment)
)
self.assertEqual(
normalize(url1),
normalize(url2),
msg_prefix + "Expected '%s' to equal '%s'." % (url1, url2),
)
def _assert_contains(self, response, text, status_code, msg_prefix, html):
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if (
hasattr(response, "render")
and callable(response.render)
and not response.is_rendered
):
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(
response.status_code,
status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code),
)
if response.streaming:
content = b"".join(response.streaming_content)
else:
content = response.content
if not isinstance(text, bytes) or html:
text = str(text)
content = content.decode(response.charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(
self, content, None, "Response's content is not valid HTML:"
)
text = assert_and_parse_html(
self, text, None, "Second argument is not valid HTML:"
)
real_count = content.count(text)
return (text_repr, real_count, msg_prefix)
def assertContains(
self, response, text, count=None, status_code=200, msg_prefix="", html=False
):
"""
Assert that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected) and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html
)
if count is not None:
self.assertEqual(
real_count,
count,
msg_prefix
+ "Found %d instances of %s in response (expected %d)"
% (real_count, text_repr, count),
)
else:
self.assertTrue(
real_count != 0, msg_prefix + "Couldn't find %s in response" % text_repr
)
def assertNotContains(
self, response, text, status_code=200, msg_prefix="", html=False
):
"""
Assert that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected) and that
``text`` doesn't occur in the content of the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html
)
self.assertEqual(
real_count, 0, msg_prefix + "Response should not contain %s" % text_repr
)
def _check_test_client_response(self, response, attribute, method_name):
"""
Raise a ValueError if the given response doesn't have the required
attribute.
"""
if not hasattr(response, attribute):
raise ValueError(
f"{method_name}() is only usable on responses fetched using "
"the Django test Client."
)
def _assert_form_error(self, form, field, errors, msg_prefix, form_repr):
if not form.is_bound:
self.fail(
f"{msg_prefix}The {form_repr} is not bound, it will never have any "
f"errors."
)
if field is not None and field not in form.fields:
self.fail(
f"{msg_prefix}The {form_repr} does not contain the field {field!r}."
)
if field is None:
field_errors = form.non_field_errors()
failure_message = f"The non-field errors of {form_repr} don't match."
else:
field_errors = form.errors.get(field, [])
failure_message = (
f"The errors of field {field!r} on {form_repr} don't match."
)
self.assertEqual(field_errors, errors, msg_prefix + failure_message)
# RemovedInDjango50Warning: When the deprecation ends, remove the
# decorator.
@_AssertFormErrorDeprecationHelper.patch_signature
def assertFormError(self, form, field, errors, msg_prefix=""):
"""
Assert that a field named "field" on the given form object has specific
errors.
errors can be either a single error message or a list of errors
messages. Using errors=[] test that the field has no errors.
You can pass field=None to check the form's non-field errors.
"""
if errors is None:
warnings.warn(
"Passing errors=None to assertFormError() is deprecated, use "
"errors=[] instead.",
RemovedInDjango50Warning,
stacklevel=2,
)
errors = []
if msg_prefix:
msg_prefix += ": "
errors = to_list(errors)
self._assert_form_error(form, field, errors, msg_prefix, f"form {form!r}")
# RemovedInDjango51Warning.
def assertFormsetError(self, *args, **kw):
warnings.warn(
"assertFormsetError() is deprecated in favor of assertFormSetError().",
category=RemovedInDjango51Warning,
stacklevel=2,
)
return self.assertFormSetError(*args, **kw)
# RemovedInDjango50Warning: When the deprecation ends, remove the
# decorator.
@_AssertFormErrorDeprecationHelper.patch_signature
def assertFormSetError(self, formset, form_index, field, errors, msg_prefix=""):
"""
Similar to assertFormError() but for formsets.
Use form_index=None to check the formset's non-form errors (in that
case, you must also use field=None).
Otherwise use an integer to check the formset's n-th form for errors.
Other parameters are the same as assertFormError().
"""
if errors is None:
warnings.warn(
"Passing errors=None to assertFormSetError() is deprecated, "
"use errors=[] instead.",
RemovedInDjango50Warning,
stacklevel=2,
)
errors = []
if form_index is None and field is not None:
raise ValueError("You must use field=None with form_index=None.")
if msg_prefix:
msg_prefix += ": "
errors = to_list(errors)
if not formset.is_bound:
self.fail(
f"{msg_prefix}The formset {formset!r} is not bound, it will never have "
f"any errors."
)
if form_index is not None and form_index >= formset.total_form_count():
form_count = formset.total_form_count()
form_or_forms = "forms" if form_count > 1 else "form"
self.fail(
f"{msg_prefix}The formset {formset!r} only has {form_count} "
f"{form_or_forms}."
)
if form_index is not None:
form_repr = f"form {form_index} of formset {formset!r}"
self._assert_form_error(
formset.forms[form_index], field, errors, msg_prefix, form_repr
)
else:
failure_message = f"The non-form errors of formset {formset!r} don't match."
self.assertEqual(
formset.non_form_errors(), errors, msg_prefix + failure_message
)
def _get_template_used(self, response, template_name, msg_prefix, method_name):
if response is None and template_name is None:
raise TypeError("response and/or template_name argument must be provided")
if msg_prefix:
msg_prefix += ": "
if template_name is not None and response is not None:
self._check_test_client_response(response, "templates", method_name)
if not hasattr(response, "templates") or (response is None and template_name):
if response:
template_name = response
response = None
# use this template with context manager
return template_name, None, msg_prefix
template_names = [t.name for t in response.templates if t.name is not None]
return None, template_names, msg_prefix
def _assert_template_used(self, template_name, template_names, msg_prefix, count):
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(
template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s"
% (template_name, ", ".join(template_names)),
)
if count is not None:
self.assertEqual(
template_names.count(template_name),
count,
msg_prefix + "Template '%s' was expected to be rendered %d "
"time(s) but was actually rendered %d time(s)."
% (template_name, count, template_names.count(template_name)),
)
def assertTemplateUsed(
self, response=None, template_name=None, msg_prefix="", count=None
):
"""
Assert that the template with the provided name was used in rendering
the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._get_template_used(
response,
template_name,
msg_prefix,
"assertTemplateUsed",
)
if context_mgr_template:
# Use assertTemplateUsed as context manager.
return _AssertTemplateUsedContext(
self, context_mgr_template, msg_prefix, count
)
self._assert_template_used(template_name, template_names, msg_prefix, count)
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=""):
"""
Assert that the template with the provided name was NOT used in
rendering the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._get_template_used(
response,
template_name,
msg_prefix,
"assertTemplateNotUsed",
)
if context_mgr_template:
# Use assertTemplateNotUsed as context manager.
return _AssertTemplateNotUsedContext(self, context_mgr_template, msg_prefix)
self.assertFalse(
template_name in template_names,
msg_prefix
+ "Template '%s' was used unexpectedly in rendering the response"
% template_name,
)
@contextmanager
def _assert_raises_or_warns_cm(
self, func, cm_attr, expected_exception, expected_message
):
with func(expected_exception) as cm:
yield cm
self.assertIn(expected_message, str(getattr(cm, cm_attr)))
def _assertFooMessage(
self, func, cm_attr, expected_exception, expected_message, *args, **kwargs
):
callable_obj = None
if args:
callable_obj, *args = args
cm = self._assert_raises_or_warns_cm(
func, cm_attr, expected_exception, expected_message
)
# Assertion used in context manager fashion.
if callable_obj is None:
return cm
# Assertion was passed a callable.
with cm:
callable_obj(*args, **kwargs)
def assertRaisesMessage(
self, expected_exception, expected_message, *args, **kwargs
):
"""
Assert that expected_message is found in the message of a raised
exception.
Args:
expected_exception: Exception class expected to be raised.
expected_message: expected error message string value.
args: Function to be called and extra positional args.
kwargs: Extra kwargs.
"""
return self._assertFooMessage(
self.assertRaises,
"exception",
expected_exception,
expected_message,
*args,
**kwargs,
)
def assertWarnsMessage(self, expected_warning, expected_message, *args, **kwargs):
"""
Same as assertRaisesMessage but for assertWarns() instead of
assertRaises().
"""
return self._assertFooMessage(
self.assertWarns,
"warning",
expected_warning,
expected_message,
*args,
**kwargs,
)
# A similar method is available in Python 3.10+.
if not PY310:
@contextmanager
def assertNoLogs(self, logger, level=None):
"""
Assert no messages are logged on the logger, with at least the
given level.
"""
if isinstance(level, int):
level = logging.getLevelName(level)
elif level is None:
level = "INFO"
try:
with self.assertLogs(logger, level) as cm:
yield
except AssertionError as e:
msg = e.args[0]
expected_msg = (
f"no logs of level {level} or higher triggered on {logger}"
)
if msg != expected_msg:
raise e
else:
self.fail(f"Unexpected logs found: {cm.output!r}")
def assertFieldOutput(
self,
fieldclass,
valid,
invalid,
field_args=None,
field_kwargs=None,
empty_value="",
):
"""
Assert that a form field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in empty_values
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args, **{**field_kwargs, "required": False})
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [required.error_messages["required"]]
for e in required.empty_values:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages, error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({"min_length": 2, "max_length": 20})
self.assertIsInstance(fieldclass(*field_args, **field_kwargs), fieldclass)
def assertHTMLEqual(self, html1, html2, msg=None):
"""
Assert that two HTML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The arguments must be valid HTML.
"""
dom1 = assert_and_parse_html(
self, html1, msg, "First argument is not valid HTML:"
)
dom2 = assert_and_parse_html(
self, html2, msg, "Second argument is not valid HTML:"
)
if dom1 != dom2:
standardMsg = "%s != %s" % (safe_repr(dom1, True), safe_repr(dom2, True))
diff = "\n" + "\n".join(
difflib.ndiff(
str(dom1).splitlines(),
str(dom2).splitlines(),
)
)
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
"""Assert that two HTML snippets are not semantically equivalent."""
dom1 = assert_and_parse_html(
self, html1, msg, "First argument is not valid HTML:"
)
dom2 = assert_and_parse_html(
self, html2, msg, "Second argument is not valid HTML:"
)
if dom1 == dom2:
standardMsg = "%s == %s" % (safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertInHTML(self, needle, haystack, count=None, msg_prefix=""):
needle = assert_and_parse_html(
self, needle, None, "First argument is not valid HTML:"
)
haystack = assert_and_parse_html(
self, haystack, None, "Second argument is not valid HTML:"
)
real_count = haystack.count(needle)
if count is not None:
self.assertEqual(
real_count,
count,
msg_prefix
+ "Found %d instances of '%s' in response (expected %d)"
% (real_count, needle, count),
)
else:
self.assertTrue(
real_count != 0, msg_prefix + "Couldn't find '%s' in response" % needle
)
def assertJSONEqual(self, raw, expected_data, msg=None):
"""
Assert that the JSON fragments raw and expected_data are equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except json.JSONDecodeError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, str):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertEqual(data, expected_data, msg=msg)
def assertJSONNotEqual(self, raw, expected_data, msg=None):
"""
Assert that the JSON fragments raw and expected_data are not equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except json.JSONDecodeError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, str):
try:
expected_data = json.loads(expected_data)
except json.JSONDecodeError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertNotEqual(data, expected_data, msg=msg)
def assertXMLEqual(self, xml1, xml2, msg=None):
"""
Assert that two XML snippets are semantically the same.
Whitespace in most cases is ignored and attribute ordering is not
significant. The arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = "First or second argument is not valid XML\n%s" % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if not result:
standardMsg = "%s != %s" % (
safe_repr(xml1, True),
safe_repr(xml2, True),
)
diff = "\n" + "\n".join(
difflib.ndiff(xml1.splitlines(), xml2.splitlines())
)
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertXMLNotEqual(self, xml1, xml2, msg=None):
"""
Assert that two XML snippets are not semantically equivalent.
Whitespace in most cases is ignored and attribute ordering is not
significant. The arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = "First or second argument is not valid XML\n%s" % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if result:
standardMsg = "%s == %s" % (
safe_repr(xml1, True),
safe_repr(xml2, True),
)
self.fail(self._formatMessage(msg, standardMsg))
class TransactionTestCase(SimpleTestCase):
# Subclasses can ask for resetting of auto increment sequence before each
# test case
reset_sequences = False
# Subclasses can enable only a subset of apps for faster tests
available_apps = None
# Subclasses can define fixtures which will be automatically installed.
fixtures = None
databases = {DEFAULT_DB_ALIAS}
_disallowed_database_msg = (
"Database %(operation)s to %(alias)r are not allowed in this test. "
"Add %(alias)r to %(test)s.databases to ensure proper test isolation "
"and silence this failure."
)
# If transactions aren't available, Django will serialize the database
# contents into a fixture during setup and flush and reload them
# during teardown (as flush does not restore data from migrations).
# This can be slow; this flag allows enabling on a per-case basis.
serialized_rollback = False
def _pre_setup(self):
"""
Perform pre-test setup:
* If the class has an 'available_apps' attribute, restrict the app
registry to these applications, then fire the post_migrate signal --
it must run with the correct set of applications for the test case.
* If the class has a 'fixtures' attribute, install those fixtures.
"""
super()._pre_setup()
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
setting_changed.send(
sender=settings._wrapped.__class__,
setting="INSTALLED_APPS",
value=self.available_apps,
enter=True,
)
for db_name in self._databases_names(include_mirrors=False):
emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name)
try:
self._fixture_setup()
except Exception:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(
sender=settings._wrapped.__class__,
setting="INSTALLED_APPS",
value=settings.INSTALLED_APPS,
enter=False,
)
raise
# Clear the queries_log so that it's less likely to overflow (a single
# test probably won't execute 9K queries). If queries_log overflows,
# then assertNumQueries() doesn't work.
for db_name in self._databases_names(include_mirrors=False):
connections[db_name].queries_log.clear()
@classmethod
def _databases_names(cls, include_mirrors=True):
# Only consider allowed database aliases, including mirrors or not.
return [
alias
for alias in connections
if alias in cls.databases
and (
include_mirrors
or not connections[alias].settings_dict["TEST"]["MIRROR"]
)
]
def _reset_sequences(self, db_name):
conn = connections[db_name]
if conn.features.supports_sequence_reset:
sql_list = conn.ops.sequence_reset_by_name_sql(
no_style(), conn.introspection.sequence_list()
)
if sql_list:
with transaction.atomic(using=db_name):
with conn.cursor() as cursor:
for sql in sql_list:
cursor.execute(sql)
def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False):
# Reset sequences
if self.reset_sequences:
self._reset_sequences(db_name)
# Provide replica initial data from migrated apps, if needed.
if self.serialized_rollback and hasattr(
connections[db_name], "_test_serialized_contents"
):
if self.available_apps is not None:
apps.unset_available_apps()
connections[db_name].creation.deserialize_db_from_string(
connections[db_name]._test_serialized_contents
)
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
if self.fixtures:
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command(
"loaddata", *self.fixtures, **{"verbosity": 0, "database": db_name}
)
def _should_reload_connections(self):
return True
def _post_teardown(self):
"""
Perform post-test things:
* Flush the contents of the database to leave a clean slate. If the
class has an 'available_apps' attribute, don't fire post_migrate.
* Force-close the connection so the next test gets a clean cursor.
"""
try:
self._fixture_teardown()
super()._post_teardown()
if self._should_reload_connections():
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does a rollback, the effect
# of these statements is lost, which can affect the operation of
# tests (e.g., losing a timezone setting causing objects to be
# created with the wrong time). To make sure this doesn't
# happen, get a clean connection at the start of every test.
for conn in connections.all(initialized_only=True):
conn.close()
finally:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(
sender=settings._wrapped.__class__,
setting="INSTALLED_APPS",
value=settings.INSTALLED_APPS,
enter=False,
)
def _fixture_teardown(self):
# Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal
# when flushing only a subset of the apps
for db_name in self._databases_names(include_mirrors=False):
# Flush the database
inhibit_post_migrate = (
self.available_apps is not None
or ( # Inhibit the post_migrate signal when using serialized
# rollback to avoid trying to recreate the serialized data.
self.serialized_rollback
and hasattr(connections[db_name], "_test_serialized_contents")
)
)
call_command(
"flush",
verbosity=0,
interactive=False,
database=db_name,
reset_sequences=False,
allow_cascade=self.available_apps is not None,
inhibit_post_migrate=inhibit_post_migrate,
)
# RemovedInDjango51Warning.
def assertQuerysetEqual(self, *args, **kw):
warnings.warn(
"assertQuerysetEqual() is deprecated in favor of assertQuerySetEqual().",
category=RemovedInDjango51Warning,
stacklevel=2,
)
return self.assertQuerySetEqual(*args, **kw)
def assertQuerySetEqual(self, qs, values, transform=None, ordered=True, msg=None):
values = list(values)
items = qs
if transform is not None:
items = map(transform, items)
if not ordered:
return self.assertDictEqual(Counter(items), Counter(values), msg=msg)
# For example qs.iterator() could be passed as qs, but it does not
# have 'ordered' attribute.
if len(values) > 1 and hasattr(qs, "ordered") and not qs.ordered:
raise ValueError(
"Trying to compare non-ordered queryset against more than one "
"ordered value."
)
return self.assertEqual(list(items), values, msg=msg)
def assertNumQueries(self, num, func=None, *args, using=DEFAULT_DB_ALIAS, **kwargs):
conn = connections[using]
context = _AssertNumQueriesContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def connections_support_transactions(aliases=None):
"""
Return whether or not all (or specified) connections support
transactions.
"""
conns = (
connections.all()
if aliases is None
else (connections[alias] for alias in aliases)
)
return all(conn.features.supports_transactions for conn in conns)
class TestData:
"""
Descriptor to provide TestCase instance isolation for attributes assigned
during the setUpTestData() phase.
Allow safe alteration of objects assigned in setUpTestData() by test
methods by exposing deep copies instead of the original objects.
Objects are deep copied using a memo kept on the test case instance in
order to maintain their original relationships.
"""
memo_attr = "_testdata_memo"
def __init__(self, name, data):
self.name = name
self.data = data
def get_memo(self, testcase):
try:
memo = getattr(testcase, self.memo_attr)
except AttributeError:
memo = {}
setattr(testcase, self.memo_attr, memo)
return memo
def __get__(self, instance, owner):
if instance is None:
return self.data
memo = self.get_memo(instance)
data = deepcopy(self.data, memo)
setattr(instance, self.name, data)
return data
def __repr__(self):
return "<TestData: name=%r, data=%r>" % (self.name, self.data)
class TestCase(TransactionTestCase):
"""
Similar to TransactionTestCase, but use `transaction.atomic()` to achieve
test isolation.
In most situations, TestCase should be preferred to TransactionTestCase as
it allows faster execution. However, there are some situations where using
TransactionTestCase might be necessary (e.g. testing some transactional
behavior).
On database backends with no transaction support, TestCase behaves as
TransactionTestCase.
"""
@classmethod
def _enter_atomics(cls):
"""Open atomic blocks for multiple databases."""
atomics = {}
for db_name in cls._databases_names():
atomic = transaction.atomic(using=db_name)
atomic._from_testcase = True
atomic.__enter__()
atomics[db_name] = atomic
return atomics
@classmethod
def _rollback_atomics(cls, atomics):
"""Rollback atomic blocks opened by the previous method."""
for db_name in reversed(cls._databases_names()):
transaction.set_rollback(True, using=db_name)
atomics[db_name].__exit__(None, None, None)
@classmethod
def _databases_support_transactions(cls):
return connections_support_transactions(cls.databases)
@classmethod
def setUpClass(cls):
super().setUpClass()
if not cls._databases_support_transactions():
return
cls.cls_atomics = cls._enter_atomics()
if cls.fixtures:
for db_name in cls._databases_names(include_mirrors=False):
try:
call_command(
"loaddata",
*cls.fixtures,
**{"verbosity": 0, "database": db_name},
)
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
pre_attrs = cls.__dict__.copy()
try:
cls.setUpTestData()
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
for name, value in cls.__dict__.items():
if value is not pre_attrs.get(name):
setattr(cls, name, TestData(name, value))
@classmethod
def tearDownClass(cls):
if cls._databases_support_transactions():
cls._rollback_atomics(cls.cls_atomics)
for conn in connections.all(initialized_only=True):
conn.close()
super().tearDownClass()
@classmethod
def setUpTestData(cls):
"""Load initial data for the TestCase."""
pass
def _should_reload_connections(self):
if self._databases_support_transactions():
return False
return super()._should_reload_connections()
def _fixture_setup(self):
if not self._databases_support_transactions():
# If the backend does not support transactions, we should reload
# class data before each test
self.setUpTestData()
return super()._fixture_setup()
if self.reset_sequences:
raise TypeError("reset_sequences cannot be used on TestCase instances")
self.atomics = self._enter_atomics()
def _fixture_teardown(self):
if not self._databases_support_transactions():
return super()._fixture_teardown()
try:
for db_name in reversed(self._databases_names()):
if self._should_check_constraints(connections[db_name]):
connections[db_name].check_constraints()
finally:
self._rollback_atomics(self.atomics)
def _should_check_constraints(self, connection):
return (
connection.features.can_defer_constraint_checks
and not connection.needs_rollback
and connection.is_usable()
)
@classmethod
@contextmanager
def captureOnCommitCallbacks(cls, *, using=DEFAULT_DB_ALIAS, execute=False):
"""Context manager to capture transaction.on_commit() callbacks."""
callbacks = []
start_count = len(connections[using].run_on_commit)
try:
yield callbacks
finally:
while True:
callback_count = len(connections[using].run_on_commit)
for _, callback, robust in connections[using].run_on_commit[
start_count:
]:
callbacks.append(callback)
if execute:
if robust:
try:
callback()
except Exception as e:
logger.error(
f"Error calling {callback.__qualname__} in "
f"on_commit() (%s).",
e,
exc_info=True,
)
else:
callback()
if callback_count == len(connections[using].run_on_commit):
break
start_count = callback_count
class CheckCondition:
"""Descriptor class for deferred condition checking."""
def __init__(self, *conditions):
self.conditions = conditions
def add_condition(self, condition, reason):
return self.__class__(*self.conditions, (condition, reason))
def __get__(self, instance, cls=None):
# Trigger access for all bases.
if any(getattr(base, "__unittest_skip__", False) for base in cls.__bases__):
return True
for condition, reason in self.conditions:
if condition():
# Override this descriptor's value and set the skip reason.
cls.__unittest_skip__ = True
cls.__unittest_skip_why__ = reason
return True
return False
def _deferredSkip(condition, reason, name):
def decorator(test_func):
nonlocal condition
if not (
isinstance(test_func, type) and issubclass(test_func, unittest.TestCase)
):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if (
args
and isinstance(args[0], unittest.TestCase)
and connection.alias not in getattr(args[0], "databases", {})
):
raise ValueError(
"%s cannot be used on %s as %s doesn't allow queries "
"against the %r database."
% (
name,
args[0],
args[0].__class__.__qualname__,
connection.alias,
)
)
if condition():
raise unittest.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
# Assume a class is decorated
test_item = test_func
databases = getattr(test_item, "databases", None)
if not databases or connection.alias not in databases:
# Defer raising to allow importing test class's module.
def condition():
raise ValueError(
"%s cannot be used on %s as it doesn't allow queries "
"against the '%s' database."
% (
name,
test_item,
connection.alias,
)
)
# Retrieve the possibly existing value from the class's dict to
# avoid triggering the descriptor.
skip = test_func.__dict__.get("__unittest_skip__")
if isinstance(skip, CheckCondition):
test_item.__unittest_skip__ = skip.add_condition(condition, reason)
elif skip is not True:
test_item.__unittest_skip__ = CheckCondition((condition, reason))
return test_item
return decorator
def skipIfDBFeature(*features):
"""Skip a test if a database has at least one of the named features."""
return _deferredSkip(
lambda: any(
getattr(connection.features, feature, False) for feature in features
),
"Database has feature(s) %s" % ", ".join(features),
"skipIfDBFeature",
)
def skipUnlessDBFeature(*features):
"""Skip a test unless a database has all the named features."""
return _deferredSkip(
lambda: not all(
getattr(connection.features, feature, False) for feature in features
),
"Database doesn't support feature(s): %s" % ", ".join(features),
"skipUnlessDBFeature",
)
def skipUnlessAnyDBFeature(*features):
"""Skip a test unless a database has any of the named features."""
return _deferredSkip(
lambda: not any(
getattr(connection.features, feature, False) for feature in features
),
"Database doesn't support any of the feature(s): %s" % ", ".join(features),
"skipUnlessAnyDBFeature",
)
class QuietWSGIRequestHandler(WSGIRequestHandler):
"""
A WSGIRequestHandler that doesn't log to standard output any of the
requests received, so as to not clutter the test result output.
"""
def log_message(*args):
pass
class FSFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to a directory, as defined by one of
the *_ROOT settings, and serves those files, publishing them under *_URL.
"""
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super().__init__()
def _should_handle(self, path):
"""
Check if the path should be handled. Ignore the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""Return the relative path to the file on disk for the given URL."""
relative_url = url[len(self.base_url[2]) :]
return url2pathname(relative_url)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404:
pass
return super().get_response(request)
def serve(self, request):
os_rel_path = self.file_path(request.path)
os_rel_path = posixpath.normpath(unquote(os_rel_path))
# Emulate behavior of django.contrib.staticfiles.views.serve() when it
# invokes staticfiles' finders functionality.
# TODO: Modify if/when that internal API is refactored
final_rel_path = os_rel_path.replace("\\", "/").lstrip("/")
return serve(request, final_rel_path, document_root=self.get_base_dir())
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super().__call__(environ, start_response)
class _StaticFilesHandler(FSFilesHandler):
"""
Handler for serving static files. A private class that is meant to be used
solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
return settings.STATIC_URL
class _MediaFilesHandler(FSFilesHandler):
"""
Handler for serving the media files. A private class that is meant to be
used solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
class LiveServerThread(threading.Thread):
"""Thread for running a live HTTP server while the tests are running."""
server_class = ThreadedWSGIServer
def __init__(self, host, static_handler, connections_override=None, port=0):
self.host = host
self.port = port
self.is_ready = threading.Event()
self.error = None
self.static_handler = static_handler
self.connections_override = connections_override
super().__init__()
def run(self):
"""
Set up the live server and databases, and then loop over handling
HTTP requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
self.httpd = self._create_server(
connections_override=self.connections_override,
)
# If binding to port zero, assign the port allocated by the OS.
if self.port == 0:
self.port = self.httpd.server_address[1]
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
finally:
connections.close_all()
def _create_server(self, connections_override=None):
return self.server_class(
(self.host, self.port),
QuietWSGIRequestHandler,
allow_reuse_address=False,
connections_override=connections_override,
)
def terminate(self):
if hasattr(self, "httpd"):
# Stop the WSGI server
self.httpd.shutdown()
self.httpd.server_close()
self.join()
class LiveServerTestCase(TransactionTestCase):
"""
Do basically the same as TransactionTestCase but also launch a live HTTP
server in a separate thread so that the tests may use another testing
framework, such as Selenium for example, instead of the built-in dummy
client.
It inherits from TransactionTestCase instead of TestCase because the
threads don't share the same transactions (unless if using in-memory sqlite)
and each thread needs to commit all their transactions so that the other
thread can see the changes.
"""
host = "localhost"
port = 0
server_thread_class = LiveServerThread
static_handler = _StaticFilesHandler
@classproperty
def live_server_url(cls):
return "http://%s:%s" % (cls.host, cls.server_thread.port)
@classproperty
def allowed_host(cls):
return cls.host
@classmethod
def _make_connections_override(cls):
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if conn.vendor == "sqlite" and conn.is_in_memory_db():
connections_override[conn.alias] = conn
return connections_override
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._live_server_modified_settings = modify_settings(
ALLOWED_HOSTS={"append": cls.allowed_host},
)
cls._live_server_modified_settings.enable()
cls.addClassCleanup(cls._live_server_modified_settings.disable)
cls._start_server_thread()
@classmethod
def _start_server_thread(cls):
connections_override = cls._make_connections_override()
for conn in connections_override.values():
# Explicitly enable thread-shareability for this connection.
conn.inc_thread_sharing()
cls.server_thread = cls._create_server_thread(connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
cls.addClassCleanup(cls._terminate_thread)
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
raise cls.server_thread.error
@classmethod
def _create_server_thread(cls, connections_override):
return cls.server_thread_class(
cls.host,
cls.static_handler,
connections_override=connections_override,
port=cls.port,
)
@classmethod
def _terminate_thread(cls):
# Terminate the live server's thread.
cls.server_thread.terminate()
# Restore shared connections' non-shareability.
for conn in cls.server_thread.connections_override.values():
conn.dec_thread_sharing()
class SerializeMixin:
"""
Enforce serialization of TestCases that share a common resource.
Define a common 'lockfile' for each set of TestCases to serialize. This
file must exist on the filesystem.
Place it early in the MRO in order to isolate setUpClass()/tearDownClass().
"""
lockfile = None
def __init_subclass__(cls, /, **kwargs):
super().__init_subclass__(**kwargs)
if cls.lockfile is None:
raise ValueError(
"{}.lockfile isn't set. Set it to a unique value "
"in the base class.".format(cls.__name__)
)
@classmethod
def setUpClass(cls):
cls._lockfile = open(cls.lockfile)
cls.addClassCleanup(cls._lockfile.close)
locks.lock(cls._lockfile, locks.LOCK_EX)
super().setUpClass()
|
08f207949312a7159c9efdff25974786b97dd572588446c0955603e6c35393e3 | import functools
import re
import sys
import types
import warnings
from pathlib import Path
from django.conf import settings
from django.http import Http404, HttpResponse, HttpResponseNotFound
from django.template import Context, Engine, TemplateDoesNotExist
from django.template.defaultfilters import pprint
from django.urls import resolve
from django.utils import timezone
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str
from django.utils.module_loading import import_string
from django.utils.regex_helper import _lazy_re_compile
from django.utils.version import get_docs_version
# Minimal Django templates engine to render the error templates
# regardless of the project's TEMPLATES setting. Templates are
# read directly from the filesystem so that the error handler
# works even if the template loader is broken.
DEBUG_ENGINE = Engine(
debug=True,
libraries={"i18n": "django.templatetags.i18n"},
)
def builtin_template_path(name):
"""
Return a path to a builtin template.
Avoid calling this function at the module level or in a class-definition
because __file__ may not exist, e.g. in frozen environments.
"""
return Path(__file__).parent / "templates" / name
class ExceptionCycleWarning(UserWarning):
pass
class CallableSettingWrapper:
"""
Object to wrap callable appearing in settings.
* Not to call in the debug page (#21345).
* Not to break the debug page if the callable forbidding to set attributes
(#23070).
"""
def __init__(self, callable_setting):
self._wrapped = callable_setting
def __repr__(self):
return repr(self._wrapped)
def technical_500_response(request, exc_type, exc_value, tb, status_code=500):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = get_exception_reporter_class(request)(request, exc_type, exc_value, tb)
if request.accepts("text/html"):
html = reporter.get_traceback_html()
return HttpResponse(html, status=status_code)
else:
text = reporter.get_traceback_text()
return HttpResponse(
text, status=status_code, content_type="text/plain; charset=utf-8"
)
@functools.lru_cache
def get_default_exception_reporter_filter():
# Instantiate the default filter for the first time and cache it.
return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
def get_exception_reporter_filter(request):
default_filter = get_default_exception_reporter_filter()
return getattr(request, "exception_reporter_filter", default_filter)
def get_exception_reporter_class(request):
default_exception_reporter_class = import_string(
settings.DEFAULT_EXCEPTION_REPORTER
)
return getattr(
request, "exception_reporter_class", default_exception_reporter_class
)
def get_caller(request):
resolver_match = request.resolver_match
if resolver_match is None:
try:
resolver_match = resolve(request.path)
except Http404:
pass
return "" if resolver_match is None else resolver_match._func_path
class SafeExceptionReporterFilter:
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
cleansed_substitute = "********************"
hidden_settings = _lazy_re_compile(
"API|TOKEN|KEY|SECRET|PASS|SIGNATURE|HTTP_COOKIE", flags=re.I
)
def cleanse_setting(self, key, value):
"""
Cleanse an individual setting key/value of sensitive content. If the
value is a dictionary, recursively cleanse the keys in that dictionary.
"""
if key == settings.SESSION_COOKIE_NAME:
is_sensitive = True
else:
try:
is_sensitive = self.hidden_settings.search(key)
except TypeError:
is_sensitive = False
if is_sensitive:
cleansed = self.cleansed_substitute
elif isinstance(value, dict):
cleansed = {k: self.cleanse_setting(k, v) for k, v in value.items()}
elif isinstance(value, list):
cleansed = [self.cleanse_setting("", v) for v in value]
elif isinstance(value, tuple):
cleansed = tuple([self.cleanse_setting("", v) for v in value])
else:
cleansed = value
if callable(cleansed):
cleansed = CallableSettingWrapper(cleansed)
return cleansed
def get_safe_settings(self):
"""
Return a dictionary of the settings module with values of sensitive
settings replaced with stars (*********).
"""
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = self.cleanse_setting(k, getattr(settings, k))
return settings_dict
def get_safe_request_meta(self, request):
"""
Return a dictionary of request.META with sensitive values redacted.
"""
if not hasattr(request, "META"):
return {}
return {k: self.cleanse_setting(k, v) for k, v in request.META.items()}
def get_safe_cookies(self, request):
"""
Return a dictionary of request.COOKIES with sensitive values redacted.
"""
if not hasattr(request, "COOKIES"):
return {}
return {k: self.cleanse_setting(k, v) for k, v in request.COOKIES.items()}
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_cleansed_multivaluedict(self, request, multivaluedict):
"""
Replace the keys in a MultiValueDict marked as sensitive with stars.
This mitigates leaking sensitive POST parameters if something like
request.POST['nonexistent_key'] throws an exception (#21098).
"""
sensitive_post_parameters = getattr(request, "sensitive_post_parameters", [])
if self.is_active(request) and sensitive_post_parameters:
multivaluedict = multivaluedict.copy()
for param in sensitive_post_parameters:
if param in multivaluedict:
multivaluedict[param] = self.cleansed_substitute
return multivaluedict
def get_post_parameters(self, request):
"""
Replace the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(
request, "sensitive_post_parameters", []
)
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == "__ALL__":
# Cleanse all parameters.
for k in cleansed:
cleansed[k] = self.cleansed_substitute
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = self.cleansed_substitute
return cleansed
else:
return request.POST
def cleanse_special_types(self, request, value):
try:
# If value is lazy or a complex object of another kind, this check
# might raise an exception. isinstance checks that lazy
# MultiValueDicts will have a return value.
is_multivalue_dict = isinstance(value, MultiValueDict)
except Exception as e:
return "{!r} while evaluating {!r}".format(e, value)
if is_multivalue_dict:
# Cleanse MultiValueDicts (request.POST is the one we usually care about)
value = self.get_cleansed_multivaluedict(request, value)
return value
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replace the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (
current_frame.f_code.co_name == "sensitive_variables_wrapper"
and "sensitive_variables_wrapper" in current_frame.f_locals
):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals["sensitive_variables_wrapper"]
sensitive_variables = getattr(wrapper, "sensitive_variables", None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == "__ALL__":
# Cleanse all variables
for name in tb_frame.f_locals:
cleansed[name] = self.cleansed_substitute
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = self.cleansed_substitute
else:
value = self.cleanse_special_types(request, value)
cleansed[name] = value
else:
# Potentially cleanse the request and any MultiValueDicts if they
# are one of the frame variables.
for name, value in tb_frame.f_locals.items():
cleansed[name] = self.cleanse_special_types(request, value)
if (
tb_frame.f_code.co_name == "sensitive_variables_wrapper"
and "sensitive_variables_wrapper" in tb_frame.f_locals
):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed["func_args"] = self.cleansed_substitute
cleansed["func_kwargs"] = self.cleansed_substitute
return cleansed.items()
class ExceptionReporter:
"""Organize and coordinate reporting on exceptions."""
@property
def html_template_path(self):
return builtin_template_path("technical_500.html")
@property
def text_template_path(self):
return builtin_template_path("technical_500.txt")
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = getattr(self.exc_value, "template_debug", None)
self.template_does_not_exist = False
self.postmortem = None
def _get_raw_insecure_uri(self):
"""
Return an absolute URI from variables available in this request. Skip
allowed hosts protection, so may return insecure URI.
"""
return "{scheme}://{host}{path}".format(
scheme=self.request.scheme,
host=self.request._get_raw_host(),
path=self.request.get_full_path(),
)
def get_traceback_data(self):
"""Return a dictionary containing traceback information."""
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
self.template_does_not_exist = True
self.postmortem = self.exc_value.chain or [self.exc_value]
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if "vars" in frame:
frame_vars = []
for k, v in frame["vars"]:
v = pprint(v)
# Trim large blobs of data
if len(v) > 4096:
v = "%s… <trimmed %d bytes string>" % (v[0:4096], len(v))
frame_vars.append((k, v))
frame["vars"] = frame_vars
frames[i] = frame
unicode_hint = ""
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, "start", None)
end = getattr(self.exc_value, "end", None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = force_str(
unicode_str[max(start - 5, 0) : min(end + 5, len(unicode_str))],
"ascii",
errors="replace",
)
from django import get_version
if self.request is None:
user_str = None
else:
try:
user_str = str(self.request.user)
except Exception:
# request.user may raise OperationalError if the database is
# unavailable, for example.
user_str = "[unable to retrieve the current user]"
c = {
"is_email": self.is_email,
"unicode_hint": unicode_hint,
"frames": frames,
"request": self.request,
"request_meta": self.filter.get_safe_request_meta(self.request),
"request_COOKIES_items": self.filter.get_safe_cookies(self.request).items(),
"user_str": user_str,
"filtered_POST_items": list(
self.filter.get_post_parameters(self.request).items()
),
"settings": self.filter.get_safe_settings(),
"sys_executable": sys.executable,
"sys_version_info": "%d.%d.%d" % sys.version_info[0:3],
"server_time": timezone.now(),
"django_version_info": get_version(),
"sys_path": sys.path,
"template_info": self.template_info,
"template_does_not_exist": self.template_does_not_exist,
"postmortem": self.postmortem,
}
if self.request is not None:
c["request_GET_items"] = self.request.GET.items()
c["request_FILES_items"] = self.request.FILES.items()
c["request_insecure_uri"] = self._get_raw_insecure_uri()
c["raising_view_name"] = get_caller(self.request)
# Check whether exception info is available
if self.exc_type:
c["exception_type"] = self.exc_type.__name__
if self.exc_value:
c["exception_value"] = str(self.exc_value)
if exc_notes := getattr(self.exc_value, "__notes__", None):
c["exception_notes"] = "\n" + "\n".join(exc_notes)
if frames:
c["lastframe"] = frames[-1]
return c
def get_traceback_html(self):
"""Return HTML version of debug 500 HTTP error page."""
with self.html_template_path.open(encoding="utf-8") as fh:
t = DEBUG_ENGINE.from_string(fh.read())
c = Context(self.get_traceback_data(), use_l10n=False)
return t.render(c)
def get_traceback_text(self):
"""Return plain text version of debug 500 HTTP error page."""
with self.text_template_path.open(encoding="utf-8") as fh:
t = DEBUG_ENGINE.from_string(fh.read())
c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False)
return t.render(c)
def _get_source(self, filename, loader, module_name):
source = None
if hasattr(loader, "get_source"):
try:
source = loader.get_source(module_name)
except ImportError:
pass
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, "rb") as fp:
source = fp.read().splitlines()
except OSError:
pass
return source
def _get_lines_from_file(
self, filename, lineno, context_lines, loader=None, module_name=None
):
"""
Return context_lines before and after lineno from file.
Return (pre_context_lineno, pre_context, context_line, post_context).
"""
source = self._get_source(filename, loader, module_name)
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a
# string, then we should do that ourselves.
if isinstance(source[0], bytes):
encoding = "ascii"
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (https://www.python.org/dev/peps/pep-0263/)
match = re.search(rb"coding[:=]\s*([-\w.]+)", line)
if match:
encoding = match[1].decode("ascii")
break
source = [str(sline, encoding, "replace") for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
try:
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1 : upper_bound]
except IndexError:
return None, [], None, []
return lower_bound, pre_context, context_line, post_context
def _get_explicit_or_implicit_cause(self, exc_value):
explicit = getattr(exc_value, "__cause__", None)
suppress_context = getattr(exc_value, "__suppress_context__", None)
implicit = getattr(exc_value, "__context__", None)
return explicit or (None if suppress_context else implicit)
def get_traceback_frames(self):
# Get the exception and all its causes
exceptions = []
exc_value = self.exc_value
while exc_value:
exceptions.append(exc_value)
exc_value = self._get_explicit_or_implicit_cause(exc_value)
if exc_value in exceptions:
warnings.warn(
"Cycle in the exception chain detected: exception '%s' "
"encountered again." % exc_value,
ExceptionCycleWarning,
)
# Avoid infinite loop if there's a cyclic reference (#29393).
break
frames = []
# No exceptions were supplied to ExceptionReporter
if not exceptions:
return frames
# In case there's just one exception, take the traceback from self.tb
exc_value = exceptions.pop()
tb = self.tb if not exceptions else exc_value.__traceback__
while True:
frames.extend(self.get_exception_traceback_frames(exc_value, tb))
try:
exc_value = exceptions.pop()
except IndexError:
break
tb = exc_value.__traceback__
return frames
def get_exception_traceback_frames(self, exc_value, tb):
exc_cause = self._get_explicit_or_implicit_cause(exc_value)
exc_cause_explicit = getattr(exc_value, "__cause__", True)
if tb is None:
yield {
"exc_cause": exc_cause,
"exc_cause_explicit": exc_cause_explicit,
"tb": None,
"type": "user",
}
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get("__traceback_hide__"):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get("__loader__")
module_name = tb.tb_frame.f_globals.get("__name__") or ""
(
pre_context_lineno,
pre_context,
context_line,
post_context,
) = self._get_lines_from_file(
filename,
lineno,
7,
loader,
module_name,
)
if pre_context_lineno is None:
pre_context_lineno = lineno
pre_context = []
context_line = "<source code not available>"
post_context = []
yield {
"exc_cause": exc_cause,
"exc_cause_explicit": exc_cause_explicit,
"tb": tb,
"type": "django" if module_name.startswith("django.") else "user",
"filename": filename,
"function": function,
"lineno": lineno + 1,
"vars": self.filter.get_traceback_frame_variables(
self.request, tb.tb_frame
),
"id": id(tb),
"pre_context": pre_context,
"context_line": context_line,
"post_context": post_context,
"pre_context_lineno": pre_context_lineno + 1,
}
tb = tb.tb_next
def technical_404_response(request, exception):
"""Create a technical 404 error response. `exception` is the Http404."""
try:
error_url = exception.args[0]["path"]
except (IndexError, TypeError, KeyError):
error_url = request.path_info[1:] # Trim leading slash
try:
tried = exception.args[0]["tried"]
except (IndexError, TypeError, KeyError):
resolved = True
tried = request.resolver_match.tried if request.resolver_match else None
else:
resolved = False
if not tried or ( # empty URLconf
request.path == "/"
and len(tried) == 1
and len(tried[0]) == 1 # default URLconf
and getattr(tried[0][0], "app_name", "")
== getattr(tried[0][0], "namespace", "")
== "admin"
):
return default_urlconf(request)
urlconf = getattr(request, "urlconf", settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
with builtin_template_path("technical_404.html").open(encoding="utf-8") as fh:
t = DEBUG_ENGINE.from_string(fh.read())
reporter_filter = get_default_exception_reporter_filter()
c = Context(
{
"urlconf": urlconf,
"root_urlconf": settings.ROOT_URLCONF,
"request_path": error_url,
"urlpatterns": tried,
"resolved": resolved,
"reason": str(exception),
"request": request,
"settings": reporter_filter.get_safe_settings(),
"raising_view_name": get_caller(request),
}
)
return HttpResponseNotFound(t.render(c))
def default_urlconf(request):
"""Create an empty URLconf 404 error response."""
with builtin_template_path("default_urlconf.html").open(encoding="utf-8") as fh:
t = DEBUG_ENGINE.from_string(fh.read())
c = Context(
{
"version": get_docs_version(),
}
)
return HttpResponse(t.render(c))
|
e7650e2fd20c2482b9a862624fbdd55733af8608c39f2fc2e6adc09d596c88e9 | """
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import operator
import warnings
from itertools import chain, islice
from asgiref.sync import sync_to_async
import django
from django.conf import settings
from django.core import exceptions
from django.db import (
DJANGO_VERSION_PICKLE_KEY,
IntegrityError,
NotSupportedError,
connections,
router,
transaction,
)
from django.db.models import AutoField, DateField, DateTimeField, Field, sql
from django.db.models.constants import LOOKUP_SEP, OnConflict
from django.db.models.deletion import Collector
from django.db.models.expressions import Case, F, Value, When
from django.db.models.functions import Cast, Trunc
from django.db.models.query_utils import FilteredRelation, Q
from django.db.models.sql.constants import CURSOR, GET_ITERATOR_CHUNK_SIZE
from django.db.models.utils import (
AltersData,
create_namedtuple_class,
resolve_callables,
)
from django.utils import timezone
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.functional import cached_property, partition
# The maximum number of results to fetch in a get() query.
MAX_GET_RESULTS = 21
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
class BaseIterable:
def __init__(
self, queryset, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE
):
self.queryset = queryset
self.chunked_fetch = chunked_fetch
self.chunk_size = chunk_size
async def _async_generator(self):
# Generators don't actually start running until the first time you call
# next() on them, so make the generator object in the async thread and
# then repeatedly dispatch to it in a sync thread.
sync_generator = self.__iter__()
def next_slice(gen):
return list(islice(gen, self.chunk_size))
while True:
chunk = await sync_to_async(next_slice)(sync_generator)
for item in chunk:
yield item
if len(chunk) < self.chunk_size:
break
# __aiter__() is a *synchronous* method that has to then return an
# *asynchronous* iterator/generator. Thus, nest an async generator inside
# it.
# This is a generic iterable converter for now, and is going to suffer a
# performance penalty on large sets of items due to the cost of crossing
# over the sync barrier for each chunk. Custom __aiter__() methods should
# be added to each Iterable subclass, but that needs some work in the
# Compiler first.
def __aiter__(self):
return self._async_generator()
class ModelIterable(BaseIterable):
"""Iterable that yields a model instance for each row."""
def __iter__(self):
queryset = self.queryset
db = queryset.db
compiler = queryset.query.get_compiler(using=db)
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql(
chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
)
select, klass_info, annotation_col_map = (
compiler.select,
compiler.klass_info,
compiler.annotation_col_map,
)
model_cls = klass_info["model"]
select_fields = klass_info["select_fields"]
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [
f[0].target.attname for f in select[model_fields_start:model_fields_end]
]
related_populators = get_related_populators(klass_info, select, db)
known_related_objects = [
(
field,
related_objs,
operator.attrgetter(
*[
field.attname
if from_field == "self"
else queryset.model._meta.get_field(from_field).attname
for from_field in field.from_fields
]
),
)
for field, related_objs in queryset._known_related_objects.items()
]
for row in compiler.results_iter(results):
obj = model_cls.from_db(
db, init_list, row[model_fields_start:model_fields_end]
)
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model.
for field, rel_objs, rel_getter in known_related_objects:
# Avoid overwriting objects loaded by, e.g., select_related().
if field.is_cached(obj):
continue
rel_obj_id = rel_getter(obj)
try:
rel_obj = rel_objs[rel_obj_id]
except KeyError:
pass # May happen in qs1 | qs2 scenarios.
else:
setattr(obj, field.name, rel_obj)
yield obj
class RawModelIterable(BaseIterable):
"""
Iterable that yields a model instance for each row from a raw queryset.
"""
def __iter__(self):
# Cache some things for performance reasons outside the loop.
db = self.queryset.db
query = self.queryset.query
connection = connections[db]
compiler = connection.ops.compiler("SQLCompiler")(query, connection, db)
query_iterator = iter(query)
try:
(
model_init_names,
model_init_pos,
annotation_fields,
) = self.queryset.resolve_model_init_order()
model_cls = self.queryset.model
if model_cls._meta.pk.attname not in model_init_names:
raise exceptions.FieldDoesNotExist(
"Raw query must include the primary key"
)
fields = [self.queryset.model_fields.get(c) for c in self.queryset.columns]
converters = compiler.get_converters(
[f.get_col(f.model._meta.db_table) if f else None for f in fields]
)
if converters:
query_iterator = compiler.apply_converters(query_iterator, converters)
for values in query_iterator:
# Associate fields to values
model_init_values = [values[pos] for pos in model_init_pos]
instance = model_cls.from_db(db, model_init_names, model_init_values)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(query, "cursor") and query.cursor:
query.cursor.close()
class ValuesIterable(BaseIterable):
"""
Iterable returned by QuerySet.values() that yields a dict for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
# extra(select=...) cols are always at the start of the row.
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
indexes = range(len(names))
for row in compiler.results_iter(
chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
):
yield {names[i]: row[i] for i in indexes}
class ValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=False) that yields a tuple
for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
if queryset._fields:
# extra(select=...) cols are always at the start of the row.
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
fields = [
*queryset._fields,
*(f for f in query.annotation_select if f not in queryset._fields),
]
if fields != names:
# Reorder according to fields.
index_map = {name: idx for idx, name in enumerate(names)}
rowfactory = operator.itemgetter(*[index_map[f] for f in fields])
return map(
rowfactory,
compiler.results_iter(
chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
),
)
return compiler.results_iter(
tuple_expected=True,
chunked_fetch=self.chunked_fetch,
chunk_size=self.chunk_size,
)
class NamedValuesListIterable(ValuesListIterable):
"""
Iterable returned by QuerySet.values_list(named=True) that yields a
namedtuple for each row.
"""
def __iter__(self):
queryset = self.queryset
if queryset._fields:
names = queryset._fields
else:
query = queryset.query
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
tuple_class = create_namedtuple_class(*names)
new = tuple.__new__
for row in super().__iter__():
yield new(tuple_class, row)
class FlatValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=True) that yields single
values.
"""
def __iter__(self):
queryset = self.queryset
compiler = queryset.query.get_compiler(queryset.db)
for row in compiler.results_iter(
chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size
):
yield row[0]
class QuerySet(AltersData):
"""Represent a lazy database lookup for a set of objects."""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self._query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = ()
self._prefetch_done = False
self._known_related_objects = {} # {rel_field: {pk: rel_obj}}
self._iterable_class = ModelIterable
self._fields = None
self._defer_next_filter = False
self._deferred_filter = None
@property
def query(self):
if self._deferred_filter:
negate, args, kwargs = self._deferred_filter
self._filter_or_exclude_inplace(negate, args, kwargs)
self._deferred_filter = None
return self._query
@query.setter
def query(self, value):
if value.values_select:
self._iterable_class = ValuesIterable
self._query = value
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
manager = Manager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""Don't populate the QuerySet's cache."""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == "_result_cache":
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
# Force the cache to be fully populated.
self._fetch_all()
return {**self.__dict__, DJANGO_VERSION_PICKLE_KEY: django.__version__}
def __setstate__(self, state):
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
if pickled_version != django.__version__:
warnings.warn(
"Pickled queryset instance's Django version %s does not "
"match the current version %s."
% (pickled_version, django.__version__),
RuntimeWarning,
stacklevel=2,
)
else:
warnings.warn(
"Pickled queryset instance's Django version is not specified.",
RuntimeWarning,
stacklevel=2,
)
self.__dict__.update(state)
def __repr__(self):
data = list(self[: REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return "<%s %r>" % (self.__class__.__name__, data)
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler.execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql.compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __aiter__(self):
# Remember, __aiter__ itself is synchronous, it's the thing it returns
# that is async!
async def generator():
await sync_to_async(self._fetch_all)()
for item in self._result_cache:
yield item
return generator()
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __getitem__(self, k):
"""Retrieve an item or slice from the set of results."""
if not isinstance(k, (int, slice)):
raise TypeError(
"QuerySet indices must be integers or slices, not %s."
% type(k).__name__
)
if (isinstance(k, int) and k < 0) or (
isinstance(k, slice)
and (
(k.start is not None and k.start < 0)
or (k.stop is not None and k.stop < 0)
)
):
raise ValueError("Negative indexing is not supported.")
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._chain()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[:: k.step] if k.step else qs
qs = self._chain()
qs.query.set_limits(k, k + 1)
qs._fetch_all()
return qs._result_cache[0]
def __class_getitem__(cls, *args, **kwargs):
return cls
def __and__(self, other):
self._check_operator_queryset(other, "&")
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._chain()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._check_operator_queryset(other, "|")
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
query = (
self
if self.query.can_filter()
else self.model._base_manager.filter(pk__in=self.values("pk"))
)
combined = query._chain()
combined._merge_known_related_objects(other)
if not other.query.can_filter():
other = other.model._base_manager.filter(pk__in=other.values("pk"))
combined.query.combine(other.query, sql.OR)
return combined
def __xor__(self, other):
self._check_operator_queryset(other, "^")
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
query = (
self
if self.query.can_filter()
else self.model._base_manager.filter(pk__in=self.values("pk"))
)
combined = query._chain()
combined._merge_known_related_objects(other)
if not other.query.can_filter():
other = other.model._base_manager.filter(pk__in=other.values("pk"))
combined.query.combine(other.query, sql.XOR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def _iterator(self, use_chunked_fetch, chunk_size):
iterable = self._iterable_class(
self,
chunked_fetch=use_chunked_fetch,
chunk_size=chunk_size or 2000,
)
if not self._prefetch_related_lookups or chunk_size is None:
yield from iterable
return
iterator = iter(iterable)
while results := list(islice(iterator, chunk_size)):
prefetch_related_objects(results, *self._prefetch_related_lookups)
yield from results
def iterator(self, chunk_size=None):
"""
An iterator over the results from applying this QuerySet to the
database. chunk_size must be provided for QuerySets that prefetch
related objects. Otherwise, a default chunk_size of 2000 is supplied.
"""
if chunk_size is None:
if self._prefetch_related_lookups:
# When the deprecation ends, replace with:
# raise ValueError(
# 'chunk_size must be provided when using '
# 'QuerySet.iterator() after prefetch_related().'
# )
warnings.warn(
"Using QuerySet.iterator() after prefetch_related() "
"without specifying chunk_size is deprecated.",
category=RemovedInDjango50Warning,
stacklevel=2,
)
elif chunk_size <= 0:
raise ValueError("Chunk size must be strictly positive.")
use_chunked_fetch = not connections[self.db].settings_dict.get(
"DISABLE_SERVER_SIDE_CURSORS"
)
return self._iterator(use_chunked_fetch, chunk_size)
async def aiterator(self, chunk_size=2000):
"""
An asynchronous iterator over the results from applying this QuerySet
to the database.
"""
if self._prefetch_related_lookups:
raise NotSupportedError(
"Using QuerySet.aiterator() after prefetch_related() is not supported."
)
if chunk_size <= 0:
raise ValueError("Chunk size must be strictly positive.")
use_chunked_fetch = not connections[self.db].settings_dict.get(
"DISABLE_SERVER_SIDE_CURSORS"
)
async for item in self._iterable_class(
self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size
):
yield item
def aggregate(self, *args, **kwargs):
"""
Return a dictionary containing the calculations (aggregation)
over the current queryset.
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
self._validate_values_are_expressions(
(*args, *kwargs.values()), method_name="aggregate"
)
for arg in args:
# The default_alias property raises TypeError if default_alias
# can't be set automatically or AttributeError if it isn't an
# attribute.
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
return self.query.chain().get_aggregation(self.db, kwargs)
async def aaggregate(self, *args, **kwargs):
return await sync_to_async(self.aggregate)(*args, **kwargs)
def count(self):
"""
Perform a SELECT COUNT() and return the number of records as an
integer.
If the QuerySet is already fully cached, return the length of the
cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
async def acount(self):
return await sync_to_async(self.count)()
def get(self, *args, **kwargs):
"""
Perform the query and return a single object matching the given
keyword arguments.
"""
if self.query.combinator and (args or kwargs):
raise NotSupportedError(
"Calling QuerySet.get(...) with filters after %s() is not "
"supported." % self.query.combinator
)
clone = self._chain() if self.query.combinator else self.filter(*args, **kwargs)
if self.query.can_filter() and not self.query.distinct_fields:
clone = clone.order_by()
limit = None
if (
not clone.query.select_for_update
or connections[clone.db].features.supports_select_for_update_with_limit
):
limit = MAX_GET_RESULTS
clone.query.set_limits(high=limit)
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." % self.model._meta.object_name
)
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s!"
% (
self.model._meta.object_name,
num if not limit or num < limit else "more than %s" % (limit - 1),
)
)
async def aget(self, *args, **kwargs):
return await sync_to_async(self.get)(*args, **kwargs)
def create(self, **kwargs):
"""
Create a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
async def acreate(self, **kwargs):
return await sync_to_async(self.create)(**kwargs)
def _prepare_for_bulk_create(self, objs):
for obj in objs:
if obj.pk is None:
# Populate new PK values.
obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
obj._prepare_related_fields_for_save(operation_name="bulk_create")
def _check_bulk_create_options(
self, ignore_conflicts, update_conflicts, update_fields, unique_fields
):
if ignore_conflicts and update_conflicts:
raise ValueError(
"ignore_conflicts and update_conflicts are mutually exclusive."
)
db_features = connections[self.db].features
if ignore_conflicts:
if not db_features.supports_ignore_conflicts:
raise NotSupportedError(
"This database backend does not support ignoring conflicts."
)
return OnConflict.IGNORE
elif update_conflicts:
if not db_features.supports_update_conflicts:
raise NotSupportedError(
"This database backend does not support updating conflicts."
)
if not update_fields:
raise ValueError(
"Fields that will be updated when a row insertion fails "
"on conflicts must be provided."
)
if unique_fields and not db_features.supports_update_conflicts_with_target:
raise NotSupportedError(
"This database backend does not support updating "
"conflicts with specifying unique fields that can trigger "
"the upsert."
)
if not unique_fields and db_features.supports_update_conflicts_with_target:
raise ValueError(
"Unique fields that can trigger the upsert must be provided."
)
# Updating primary keys and non-concrete fields is forbidden.
if any(not f.concrete or f.many_to_many for f in update_fields):
raise ValueError(
"bulk_create() can only be used with concrete fields in "
"update_fields."
)
if any(f.primary_key for f in update_fields):
raise ValueError(
"bulk_create() cannot be used with primary keys in "
"update_fields."
)
if unique_fields:
if any(not f.concrete or f.many_to_many for f in unique_fields):
raise ValueError(
"bulk_create() can only be used with concrete fields "
"in unique_fields."
)
return OnConflict.UPDATE
return None
def bulk_create(
self,
objs,
batch_size=None,
ignore_conflicts=False,
update_conflicts=False,
update_fields=None,
unique_fields=None,
):
"""
Insert each of the instances into the database. Do *not* call
save() on each of the instances, do not send any pre/post_save
signals, and do not set the primary key attribute if it is an
autoincrement field (except if features.can_return_rows_from_bulk_insert=True).
Multi-table models are not supported.
"""
# When you bulk insert you don't get the primary keys back (if it's an
# autoincrement, except if can_return_rows_from_bulk_insert=True), so
# you can't insert into the child tables which references this. There
# are two workarounds:
# 1) This could be implemented if you didn't have an autoincrement pk
# 2) You could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back and then doing a single bulk
# insert into the childmost table.
# We currently set the primary keys on the objects when using
# PostgreSQL via the RETURNING ID clause. It should be possible for
# Oracle as well, but the semantics for extracting the primary keys is
# trickier so it's not done yet.
if batch_size is not None and batch_size <= 0:
raise ValueError("Batch size must be a positive integer.")
# Check that the parents share the same concrete model with the our
# model to detect the inheritance pattern ConcreteGrandParent ->
# MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy
# would not identify that case as involving multiple tables.
for parent in self.model._meta.get_parent_list():
if parent._meta.concrete_model is not self.model._meta.concrete_model:
raise ValueError("Can't bulk create a multi-table inherited model")
if not objs:
return objs
opts = self.model._meta
if unique_fields:
# Primary key is allowed in unique_fields.
unique_fields = [
self.model._meta.get_field(opts.pk.name if name == "pk" else name)
for name in unique_fields
]
if update_fields:
update_fields = [self.model._meta.get_field(name) for name in update_fields]
on_conflict = self._check_bulk_create_options(
ignore_conflicts,
update_conflicts,
update_fields,
unique_fields,
)
self._for_write = True
fields = opts.concrete_fields
objs = list(objs)
self._prepare_for_bulk_create(objs)
with transaction.atomic(using=self.db, savepoint=False):
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
returned_columns = self._batched_insert(
objs_with_pk,
fields,
batch_size,
on_conflict=on_conflict,
update_fields=update_fields,
unique_fields=unique_fields,
)
for obj_with_pk, results in zip(objs_with_pk, returned_columns):
for result, field in zip(results, opts.db_returning_fields):
if field != opts.pk:
setattr(obj_with_pk, field.attname, result)
for obj_with_pk in objs_with_pk:
obj_with_pk._state.adding = False
obj_with_pk._state.db = self.db
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
returned_columns = self._batched_insert(
objs_without_pk,
fields,
batch_size,
on_conflict=on_conflict,
update_fields=update_fields,
unique_fields=unique_fields,
)
connection = connections[self.db]
if (
connection.features.can_return_rows_from_bulk_insert
and on_conflict is None
):
assert len(returned_columns) == len(objs_without_pk)
for obj_without_pk, results in zip(objs_without_pk, returned_columns):
for result, field in zip(results, opts.db_returning_fields):
setattr(obj_without_pk, field.attname, result)
obj_without_pk._state.adding = False
obj_without_pk._state.db = self.db
return objs
async def abulk_create(
self,
objs,
batch_size=None,
ignore_conflicts=False,
update_conflicts=False,
update_fields=None,
unique_fields=None,
):
return await sync_to_async(self.bulk_create)(
objs=objs,
batch_size=batch_size,
ignore_conflicts=ignore_conflicts,
update_conflicts=update_conflicts,
update_fields=update_fields,
unique_fields=unique_fields,
)
def bulk_update(self, objs, fields, batch_size=None):
"""
Update the given fields in each of the given objects in the database.
"""
if batch_size is not None and batch_size <= 0:
raise ValueError("Batch size must be a positive integer.")
if not fields:
raise ValueError("Field names must be given to bulk_update().")
objs = tuple(objs)
if any(obj.pk is None for obj in objs):
raise ValueError("All bulk_update() objects must have a primary key set.")
fields = [self.model._meta.get_field(name) for name in fields]
if any(not f.concrete or f.many_to_many for f in fields):
raise ValueError("bulk_update() can only be used with concrete fields.")
if any(f.primary_key for f in fields):
raise ValueError("bulk_update() cannot be used with primary key fields.")
if not objs:
return 0
for obj in objs:
obj._prepare_related_fields_for_save(
operation_name="bulk_update", fields=fields
)
# PK is used twice in the resulting update query, once in the filter
# and once in the WHEN. Each field will also have one CAST.
self._for_write = True
connection = connections[self.db]
max_batch_size = connection.ops.bulk_batch_size(["pk", "pk"] + fields, objs)
batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size
requires_casting = connection.features.requires_casted_case_in_updates
batches = (objs[i : i + batch_size] for i in range(0, len(objs), batch_size))
updates = []
for batch_objs in batches:
update_kwargs = {}
for field in fields:
when_statements = []
for obj in batch_objs:
attr = getattr(obj, field.attname)
if not hasattr(attr, "resolve_expression"):
attr = Value(attr, output_field=field)
when_statements.append(When(pk=obj.pk, then=attr))
case_statement = Case(*when_statements, output_field=field)
if requires_casting:
case_statement = Cast(case_statement, output_field=field)
update_kwargs[field.attname] = case_statement
updates.append(([obj.pk for obj in batch_objs], update_kwargs))
rows_updated = 0
queryset = self.using(self.db)
with transaction.atomic(using=self.db, savepoint=False):
for pks, update_kwargs in updates:
rows_updated += queryset.filter(pk__in=pks).update(**update_kwargs)
return rows_updated
bulk_update.alters_data = True
async def abulk_update(self, objs, fields, batch_size=None):
return await sync_to_async(self.bulk_update)(
objs=objs,
fields=fields,
batch_size=batch_size,
)
abulk_update.alters_data = True
def get_or_create(self, defaults=None, **kwargs):
"""
Look up an object with the given kwargs, creating one if necessary.
Return a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
# The get() needs to be targeted at the write database in order
# to avoid potential transaction consistency problems.
self._for_write = True
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
params = self._extract_model_params(defaults, **kwargs)
# Try to create an object using passed params.
try:
with transaction.atomic(using=self.db):
params = dict(resolve_callables(params))
return self.create(**params), True
except IntegrityError:
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
pass
raise
async def aget_or_create(self, defaults=None, **kwargs):
return await sync_to_async(self.get_or_create)(
defaults=defaults,
**kwargs,
)
def update_or_create(self, defaults=None, **kwargs):
"""
Look up an object with the given kwargs, updating one with defaults
if it exists, otherwise create a new one.
Return a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
self._for_write = True
with transaction.atomic(using=self.db):
# Lock the row so that a concurrent update is blocked until
# update_or_create() has performed its save.
obj, created = self.select_for_update().get_or_create(defaults, **kwargs)
if created:
return obj, created
for k, v in resolve_callables(defaults):
setattr(obj, k, v)
update_fields = set(defaults)
concrete_field_names = self.model._meta._non_pk_concrete_field_names
# update_fields does not support non-concrete fields.
if concrete_field_names.issuperset(update_fields):
# Add fields which are set on pre_save(), e.g. auto_now fields.
# This is to maintain backward compatibility as these fields
# are not updated unless explicitly specified in the
# update_fields list.
for field in self.model._meta.local_concrete_fields:
if not (
field.primary_key or field.__class__.pre_save is Field.pre_save
):
update_fields.add(field.name)
if field.name != field.attname:
update_fields.add(field.attname)
obj.save(using=self.db, update_fields=update_fields)
else:
obj.save(using=self.db)
return obj, False
async def aupdate_or_create(self, defaults=None, **kwargs):
return await sync_to_async(self.update_or_create)(
defaults=defaults,
**kwargs,
)
def _extract_model_params(self, defaults, **kwargs):
"""
Prepare `params` for creating a model instance based on the given
kwargs; for use by get_or_create().
"""
defaults = defaults or {}
params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
params.update(defaults)
property_names = self.model._meta._property_names
invalid_params = []
for param in params:
try:
self.model._meta.get_field(param)
except exceptions.FieldDoesNotExist:
# It's okay to use a model's property if it has a setter.
if not (param in property_names and getattr(self.model, param).fset):
invalid_params.append(param)
if invalid_params:
raise exceptions.FieldError(
"Invalid field name(s) for model %s: '%s'."
% (
self.model._meta.object_name,
"', '".join(sorted(invalid_params)),
)
)
return params
def _earliest(self, *fields):
"""
Return the earliest object according to fields (if given) or by the
model's Meta.get_latest_by.
"""
if fields:
order_by = fields
else:
order_by = getattr(self.model._meta, "get_latest_by")
if order_by and not isinstance(order_by, (tuple, list)):
order_by = (order_by,)
if order_by is None:
raise ValueError(
"earliest() and latest() require either fields as positional "
"arguments or 'get_latest_by' in the model's Meta."
)
obj = self._chain()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force=True)
obj.query.add_ordering(*order_by)
return obj.get()
def earliest(self, *fields):
if self.query.is_sliced:
raise TypeError("Cannot change a query once a slice has been taken.")
return self._earliest(*fields)
async def aearliest(self, *fields):
return await sync_to_async(self.earliest)(*fields)
def latest(self, *fields):
"""
Return the latest object according to fields (if given) or by the
model's Meta.get_latest_by.
"""
if self.query.is_sliced:
raise TypeError("Cannot change a query once a slice has been taken.")
return self.reverse()._earliest(*fields)
async def alatest(self, *fields):
return await sync_to_async(self.latest)(*fields)
def first(self):
"""Return the first object of a query or None if no match is found."""
if self.ordered:
queryset = self
else:
self._check_ordering_first_last_queryset_aggregation(method="first")
queryset = self.order_by("pk")
for obj in queryset[:1]:
return obj
async def afirst(self):
return await sync_to_async(self.first)()
def last(self):
"""Return the last object of a query or None if no match is found."""
if self.ordered:
queryset = self.reverse()
else:
self._check_ordering_first_last_queryset_aggregation(method="last")
queryset = self.order_by("-pk")
for obj in queryset[:1]:
return obj
async def alast(self):
return await sync_to_async(self.last)()
def in_bulk(self, id_list=None, *, field_name="pk"):
"""
Return a dictionary mapping each of the given IDs to the object with
that ID. If `id_list` isn't provided, evaluate the entire QuerySet.
"""
if self.query.is_sliced:
raise TypeError("Cannot use 'limit' or 'offset' with in_bulk().")
opts = self.model._meta
unique_fields = [
constraint.fields[0]
for constraint in opts.total_unique_constraints
if len(constraint.fields) == 1
]
if (
field_name != "pk"
and not opts.get_field(field_name).unique
and field_name not in unique_fields
and self.query.distinct_fields != (field_name,)
):
raise ValueError(
"in_bulk()'s field_name must be a unique field but %r isn't."
% field_name
)
if id_list is not None:
if not id_list:
return {}
filter_key = "{}__in".format(field_name)
batch_size = connections[self.db].features.max_query_params
id_list = tuple(id_list)
# If the database has a limit on the number of query parameters
# (e.g. SQLite), retrieve objects in batches if necessary.
if batch_size and batch_size < len(id_list):
qs = ()
for offset in range(0, len(id_list), batch_size):
batch = id_list[offset : offset + batch_size]
qs += tuple(self.filter(**{filter_key: batch}).order_by())
else:
qs = self.filter(**{filter_key: id_list}).order_by()
else:
qs = self._chain()
return {getattr(obj, field_name): obj for obj in qs}
async def ain_bulk(self, id_list=None, *, field_name="pk"):
return await sync_to_async(self.in_bulk)(
id_list=id_list,
field_name=field_name,
)
def delete(self):
"""Delete the records in the current QuerySet."""
self._not_support_combined_queries("delete")
if self.query.is_sliced:
raise TypeError("Cannot use 'limit' or 'offset' with delete().")
if self.query.distinct or self.query.distinct_fields:
raise TypeError("Cannot call delete() after .distinct().")
if self._fields is not None:
raise TypeError("Cannot call delete() after .values() or .values_list()")
del_query = self._chain()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force=True)
collector = Collector(using=del_query.db, origin=self)
collector.collect(del_query)
deleted, _rows_count = collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
return deleted, _rows_count
delete.alters_data = True
delete.queryset_only = True
async def adelete(self):
return await sync_to_async(self.delete)()
adelete.alters_data = True
adelete.queryset_only = True
def _raw_delete(self, using):
"""
Delete objects found from the given queryset in single direct SQL
query. No signals are sent and there is no protection for cascades.
"""
query = self.query.clone()
query.__class__ = sql.DeleteQuery
cursor = query.get_compiler(using).execute_sql(CURSOR)
if cursor:
with cursor:
return cursor.rowcount
return 0
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Update all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
self._not_support_combined_queries("update")
if self.query.is_sliced:
raise TypeError("Cannot update a query once a slice has been taken.")
self._for_write = True
query = self.query.chain(sql.UpdateQuery)
query.add_update_values(kwargs)
# Inline annotations in order_by(), if possible.
new_order_by = []
for col in query.order_by:
if annotation := query.annotations.get(col):
if getattr(annotation, "contains_aggregate", False):
raise exceptions.FieldError(
f"Cannot update when ordering by an aggregate: {annotation}"
)
new_order_by.append(annotation)
else:
new_order_by.append(col)
query.order_by = tuple(new_order_by)
# Clear any annotations so that they won't be present in subqueries.
query.annotations = {}
with transaction.mark_for_rollback_on_error(using=self.db):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows
update.alters_data = True
async def aupdate(self, **kwargs):
return await sync_to_async(self.update)(**kwargs)
aupdate.alters_data = True
def _update(self, values):
"""
A version of update() that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
if self.query.is_sliced:
raise TypeError("Cannot update a query once a slice has been taken.")
query = self.query.chain(sql.UpdateQuery)
query.add_update_fields(values)
# Clear any annotations so that they won't be present in subqueries.
query.annotations = {}
self._result_cache = None
return query.get_compiler(self.db).execute_sql(CURSOR)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
"""
Return True if the QuerySet would have any results, False otherwise.
"""
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
async def aexists(self):
return await sync_to_async(self.exists)()
def contains(self, obj):
"""
Return True if the QuerySet contains the provided obj,
False otherwise.
"""
self._not_support_combined_queries("contains")
if self._fields is not None:
raise TypeError(
"Cannot call QuerySet.contains() after .values() or .values_list()."
)
try:
if obj._meta.concrete_model != self.model._meta.concrete_model:
return False
except AttributeError:
raise TypeError("'obj' must be a model instance.")
if obj.pk is None:
raise ValueError("QuerySet.contains() cannot be used on unsaved objects.")
if self._result_cache is not None:
return obj in self._result_cache
return self.filter(pk=obj.pk).exists()
async def acontains(self, obj):
return await sync_to_async(self.contains)(obj=obj)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
def explain(self, *, format=None, **options):
"""
Runs an EXPLAIN on the SQL query this QuerySet would perform, and
returns the results.
"""
return self.query.explain(using=self.db, format=format, **options)
async def aexplain(self, *, format=None, **options):
return await sync_to_async(self.explain)(format=format, **options)
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=(), translations=None, using=None):
if using is None:
using = self.db
qs = RawQuerySet(
raw_query,
model=self.model,
params=params,
translations=translations,
using=using,
)
qs._prefetch_related_lookups = self._prefetch_related_lookups[:]
return qs
def _values(self, *fields, **expressions):
clone = self._chain()
if expressions:
clone = clone.annotate(**expressions)
clone._fields = fields
clone.query.set_values(fields)
return clone
def values(self, *fields, **expressions):
fields += tuple(expressions)
clone = self._values(*fields, **expressions)
clone._iterable_class = ValuesIterable
return clone
def values_list(self, *fields, flat=False, named=False):
if flat and named:
raise TypeError("'flat' and 'named' can't be used together.")
if flat and len(fields) > 1:
raise TypeError(
"'flat' is not valid when values_list is called with more than one "
"field."
)
field_names = {f for f in fields if not hasattr(f, "resolve_expression")}
_fields = []
expressions = {}
counter = 1
for field in fields:
if hasattr(field, "resolve_expression"):
field_id_prefix = getattr(
field, "default_alias", field.__class__.__name__.lower()
)
while True:
field_id = field_id_prefix + str(counter)
counter += 1
if field_id not in field_names:
break
expressions[field_id] = field
_fields.append(field_id)
else:
_fields.append(field)
clone = self._values(*_fields, **expressions)
clone._iterable_class = (
NamedValuesListIterable
if named
else FlatValuesListIterable
if flat
else ValuesListIterable
)
return clone
def dates(self, field_name, kind, order="ASC"):
"""
Return a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
if kind not in ("year", "month", "week", "day"):
raise ValueError("'kind' must be one of 'year', 'month', 'week', or 'day'.")
if order not in ("ASC", "DESC"):
raise ValueError("'order' must be either 'ASC' or 'DESC'.")
return (
self.annotate(
datefield=Trunc(field_name, kind, output_field=DateField()),
plain_field=F(field_name),
)
.values_list("datefield", flat=True)
.distinct()
.filter(plain_field__isnull=False)
.order_by(("-" if order == "DESC" else "") + "datefield")
)
# RemovedInDjango50Warning: when the deprecation ends, remove is_dst
# argument.
def datetimes(
self, field_name, kind, order="ASC", tzinfo=None, is_dst=timezone.NOT_PASSED
):
"""
Return a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
if kind not in ("year", "month", "week", "day", "hour", "minute", "second"):
raise ValueError(
"'kind' must be one of 'year', 'month', 'week', 'day', "
"'hour', 'minute', or 'second'."
)
if order not in ("ASC", "DESC"):
raise ValueError("'order' must be either 'ASC' or 'DESC'.")
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return (
self.annotate(
datetimefield=Trunc(
field_name,
kind,
output_field=DateTimeField(),
tzinfo=tzinfo,
is_dst=is_dst,
),
plain_field=F(field_name),
)
.values_list("datetimefield", flat=True)
.distinct()
.filter(plain_field__isnull=False)
.order_by(("-" if order == "DESC" else "") + "datetimefield")
)
def none(self):
"""Return an empty QuerySet."""
clone = self._chain()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Return a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._chain()
def filter(self, *args, **kwargs):
"""
Return a new QuerySet instance with the args ANDed to the existing
set.
"""
self._not_support_combined_queries("filter")
return self._filter_or_exclude(False, args, kwargs)
def exclude(self, *args, **kwargs):
"""
Return a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
self._not_support_combined_queries("exclude")
return self._filter_or_exclude(True, args, kwargs)
def _filter_or_exclude(self, negate, args, kwargs):
if (args or kwargs) and self.query.is_sliced:
raise TypeError("Cannot filter a query once a slice has been taken.")
clone = self._chain()
if self._defer_next_filter:
self._defer_next_filter = False
clone._deferred_filter = negate, args, kwargs
else:
clone._filter_or_exclude_inplace(negate, args, kwargs)
return clone
def _filter_or_exclude_inplace(self, negate, args, kwargs):
if negate:
self._query.add_q(~Q(*args, **kwargs))
else:
self._query.add_q(Q(*args, **kwargs))
def complex_filter(self, filter_obj):
"""
Return a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object or a dictionary of keyword lookup
arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q):
clone = self._chain()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(False, args=(), kwargs=filter_obj)
def _combinator_query(self, combinator, *other_qs, all=False):
# Clone the query to inherit the select list and everything
clone = self._chain()
# Clear limits and ordering so they can be reapplied
clone.query.clear_ordering(force=True)
clone.query.clear_limits()
clone.query.combined_queries = (self.query,) + tuple(
qs.query for qs in other_qs
)
clone.query.combinator = combinator
clone.query.combinator_all = all
return clone
def union(self, *other_qs, all=False):
# If the query is an EmptyQuerySet, combine all nonempty querysets.
if isinstance(self, EmptyQuerySet):
qs = [q for q in other_qs if not isinstance(q, EmptyQuerySet)]
if not qs:
return self
if len(qs) == 1:
return qs[0]
return qs[0]._combinator_query("union", *qs[1:], all=all)
return self._combinator_query("union", *other_qs, all=all)
def intersection(self, *other_qs):
# If any query is an EmptyQuerySet, return it.
if isinstance(self, EmptyQuerySet):
return self
for other in other_qs:
if isinstance(other, EmptyQuerySet):
return other
return self._combinator_query("intersection", *other_qs)
def difference(self, *other_qs):
# If the query is an EmptyQuerySet, return it.
if isinstance(self, EmptyQuerySet):
return self
return self._combinator_query("difference", *other_qs)
def select_for_update(self, nowait=False, skip_locked=False, of=(), no_key=False):
"""
Return a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
if nowait and skip_locked:
raise ValueError("The nowait option cannot be used with skip_locked.")
obj = self._chain()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
obj.query.select_for_update_skip_locked = skip_locked
obj.query.select_for_update_of = of
obj.query.select_for_no_key_update = no_key
return obj
def select_related(self, *fields):
"""
Return a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, clear the list.
"""
self._not_support_combined_queries("select_related")
if self._fields is not None:
raise TypeError(
"Cannot call select_related() after .values() or .values_list()"
)
obj = self._chain()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Return a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, append to the list of
prefetch lookups. If prefetch_related(None) is called, clear the list.
"""
self._not_support_combined_queries("prefetch_related")
clone = self._chain()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
for lookup in lookups:
if isinstance(lookup, Prefetch):
lookup = lookup.prefetch_to
lookup = lookup.split(LOOKUP_SEP, 1)[0]
if lookup in self.query._filtered_relations:
raise ValueError(
"prefetch_related() is not supported with FilteredRelation."
)
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
self._not_support_combined_queries("annotate")
return self._annotate(args, kwargs, select=True)
def alias(self, *args, **kwargs):
"""
Return a query set with added aliases for extra data or aggregations.
"""
self._not_support_combined_queries("alias")
return self._annotate(args, kwargs, select=False)
def _annotate(self, args, kwargs, select=True):
self._validate_values_are_expressions(
args + tuple(kwargs.values()), method_name="annotate"
)
annotations = {}
for arg in args:
# The default_alias property may raise a TypeError.
try:
if arg.default_alias in kwargs:
raise ValueError(
"The named annotation '%s' conflicts with the "
"default name for another annotation." % arg.default_alias
)
except TypeError:
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
annotations.update(kwargs)
clone = self._chain()
names = self._fields
if names is None:
names = set(
chain.from_iterable(
(field.name, field.attname)
if hasattr(field, "attname")
else (field.name,)
for field in self.model._meta.get_fields()
)
)
for alias, annotation in annotations.items():
if alias in names:
raise ValueError(
"The annotation '%s' conflicts with a field on "
"the model." % alias
)
if isinstance(annotation, FilteredRelation):
clone.query.add_filtered_relation(annotation, alias)
else:
clone.query.add_annotation(
annotation,
alias,
select=select,
)
for alias, annotation in clone.query.annotations.items():
if alias in annotations and annotation.contains_aggregate:
if clone._fields is None:
clone.query.group_by = True
else:
clone.query.set_group_by()
break
return clone
def order_by(self, *field_names):
"""Return a new QuerySet instance with the ordering changed."""
if self.query.is_sliced:
raise TypeError("Cannot reorder a query once a slice has been taken.")
obj = self._chain()
obj.query.clear_ordering(force=True, clear_default=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Return a new QuerySet instance that will select only distinct results.
"""
self._not_support_combined_queries("distinct")
if self.query.is_sliced:
raise TypeError(
"Cannot create distinct fields once a slice has been taken."
)
obj = self._chain()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(
self,
select=None,
where=None,
params=None,
tables=None,
order_by=None,
select_params=None,
):
"""Add extra SQL fragments to the query."""
self._not_support_combined_queries("extra")
if self.query.is_sliced:
raise TypeError("Cannot change a query once a slice has been taken.")
clone = self._chain()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""Reverse the ordering of the QuerySet."""
if self.query.is_sliced:
raise TypeError("Cannot reverse a query once a slice has been taken.")
clone = self._chain()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defer the loading of data for certain fields until they are accessed.
Add the set of deferred fields to any existing set of deferred fields.
The only exception to this is if None is passed in as the only
parameter, in which case removal all deferrals.
"""
self._not_support_combined_queries("defer")
if self._fields is not None:
raise TypeError("Cannot call defer() after .values() or .values_list()")
clone = self._chain()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer(). Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
self._not_support_combined_queries("only")
if self._fields is not None:
raise TypeError("Cannot call only() after .values() or .values_list()")
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
for field in fields:
field = field.split(LOOKUP_SEP, 1)[0]
if field in self.query._filtered_relations:
raise ValueError("only() is not supported with FilteredRelation.")
clone = self._chain()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""Select which database this QuerySet should execute against."""
clone = self._chain()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Return True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model (or is empty).
"""
if isinstance(self, EmptyQuerySet):
return True
if self.query.extra_order_by or self.query.order_by:
return True
elif (
self.query.default_ordering
and self.query.get_meta().ordering
and
# A default ordering doesn't affect GROUP BY queries.
not self.query.group_by
):
return True
else:
return False
@property
def db(self):
"""Return the database used if this query is executed now."""
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(
self,
objs,
fields,
returning_fields=None,
raw=False,
using=None,
on_conflict=None,
update_fields=None,
unique_fields=None,
):
"""
Insert a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(
self.model,
on_conflict=on_conflict,
update_fields=update_fields,
unique_fields=unique_fields,
)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(returning_fields)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(
self,
objs,
fields,
batch_size,
on_conflict=None,
update_fields=None,
unique_fields=None,
):
"""
Helper method for bulk_create() to insert objs one batch at a time.
"""
connection = connections[self.db]
ops = connection.ops
max_batch_size = max(ops.bulk_batch_size(fields, objs), 1)
batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size
inserted_rows = []
bulk_return = connection.features.can_return_rows_from_bulk_insert
for item in [objs[i : i + batch_size] for i in range(0, len(objs), batch_size)]:
if bulk_return and on_conflict is None:
inserted_rows.extend(
self._insert(
item,
fields=fields,
using=self.db,
returning_fields=self.model._meta.db_returning_fields,
)
)
else:
self._insert(
item,
fields=fields,
using=self.db,
on_conflict=on_conflict,
update_fields=update_fields,
unique_fields=unique_fields,
)
return inserted_rows
def _chain(self):
"""
Return a copy of the current QuerySet that's ready for another
operation.
"""
obj = self._clone()
if obj._sticky_filter:
obj.query.filter_is_sticky = True
obj._sticky_filter = False
return obj
def _clone(self):
"""
Return a copy of the current QuerySet. A lightweight alternative
to deepcopy().
"""
c = self.__class__(
model=self.model,
query=self.query.chain(),
using=self._db,
hints=self._hints,
)
c._sticky_filter = self._sticky_filter
c._for_write = self._for_write
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
c._known_related_objects = self._known_related_objects
c._iterable_class = self._iterable_class
c._fields = self._fields
return c
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self._iterable_class(self))
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicate that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""Check that two QuerySet classes may be merged."""
if self._fields is not None and (
set(self.query.values_select) != set(other.query.values_select)
or set(self.query.extra_select) != set(other.query.extra_select)
or set(self.query.annotation_select) != set(other.query.annotation_select)
):
raise TypeError(
"Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__
)
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def resolve_expression(self, *args, **kwargs):
if self._fields and len(self._fields) > 1:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
raise TypeError("Cannot use multi-field values as a filter value.")
query = self.query.resolve_expression(*args, **kwargs)
query._db = self._db
return query
resolve_expression.queryset_only = True
def _add_hints(self, **hints):
"""
Update hinting information for use by routers. Add new key/values or
overwrite existing key/values.
"""
self._hints.update(hints)
def _has_filters(self):
"""
Check if this QuerySet has any filtering going on. This isn't
equivalent with checking if all objects are present in results, for
example, qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
@staticmethod
def _validate_values_are_expressions(values, method_name):
invalid_args = sorted(
str(arg) for arg in values if not hasattr(arg, "resolve_expression")
)
if invalid_args:
raise TypeError(
"QuerySet.%s() received non-expression(s): %s."
% (
method_name,
", ".join(invalid_args),
)
)
def _not_support_combined_queries(self, operation_name):
if self.query.combinator:
raise NotSupportedError(
"Calling QuerySet.%s() after %s() is not supported."
% (operation_name, self.query.combinator)
)
def _check_operator_queryset(self, other, operator_):
if self.query.combinator or other.query.combinator:
raise TypeError(f"Cannot use {operator_} operator with combined queryset.")
def _check_ordering_first_last_queryset_aggregation(self, method):
if isinstance(self.query.group_by, tuple) and not any(
col.output_field is self.model._meta.pk for col in self.query.group_by
):
raise TypeError(
f"Cannot use QuerySet.{method}() on an unordered queryset performing "
f"aggregation. Add an ordering with order_by()."
)
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return isinstance(instance, QuerySet) and instance.query.is_empty()
class EmptyQuerySet(metaclass=InstanceCheckMeta):
"""
Marker class to checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class RawQuerySet:
"""
Provide an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(
self,
raw_query,
model=None,
query=None,
params=(),
translations=None,
using=None,
hints=None,
):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params
self.translations = translations or {}
self._result_cache = None
self._prefetch_related_lookups = ()
self._prefetch_done = False
def resolve_model_init_order(self):
"""Resolve the init field names and value positions."""
converter = connections[self.db].introspection.identifier_converter
model_init_fields = [
f for f in self.model._meta.fields if converter(f.column) in self.columns
]
annotation_fields = [
(column, pos)
for pos, column in enumerate(self.columns)
if column not in self.model_fields
]
model_init_order = [
self.columns.index(converter(f.column)) for f in model_init_fields
]
model_init_names = [f.attname for f in model_init_fields]
return model_init_names, model_init_order, annotation_fields
def prefetch_related(self, *lookups):
"""Same as QuerySet.prefetch_related()"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def _prefetch_related_objects(self):
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
def _clone(self):
"""Same as QuerySet._clone()"""
c = self.__class__(
self.raw_query,
model=self.model,
query=self.query,
params=self.params,
translations=self.translations,
using=self._db,
hints=self._hints,
)
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
return c
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self.iterator())
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __iter__(self):
self._fetch_all()
return iter(self._result_cache)
def __aiter__(self):
# Remember, __aiter__ itself is synchronous, it's the thing it returns
# that is async!
async def generator():
await sync_to_async(self._fetch_all)()
for item in self._result_cache:
yield item
return generator()
def iterator(self):
yield from RawModelIterable(self)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.query)
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"""Return the database used if this query is executed now."""
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""Select the database this RawQuerySet should execute against."""
return RawQuerySet(
self.raw_query,
model=self.model,
query=self.query.chain(using=alias),
params=self.params,
translations=self.translations,
using=alias,
)
@cached_property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
# Ignore translations for nonexistent column names
try:
index = columns.index(query_name)
except ValueError:
pass
else:
columns[index] = model_name
return columns
@cached_property
def model_fields(self):
"""A dict mapping column names to model field names."""
converter = connections[self.db].introspection.identifier_converter
model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
model_fields[converter(column)] = field
return model_fields
class Prefetch:
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if queryset is not None and (
isinstance(queryset, RawQuerySet)
or (
hasattr(queryset, "_iterable_class")
and not issubclass(queryset._iterable_class, ModelIterable)
)
):
raise ValueError(
"Prefetch querysets cannot use raw(), values(), and values_list()."
)
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(
lookup.split(LOOKUP_SEP)[:-1] + [to_attr]
)
self.queryset = queryset
self.to_attr = to_attr
def __getstate__(self):
obj_dict = self.__dict__.copy()
if self.queryset is not None:
queryset = self.queryset._chain()
# Prevent the QuerySet from being evaluated
queryset._result_cache = []
queryset._prefetch_done = True
obj_dict["queryset"] = queryset
return obj_dict
def add_prefix(self, prefix):
self.prefetch_through = prefix + LOOKUP_SEP + self.prefetch_through
self.prefetch_to = prefix + LOOKUP_SEP + self.prefetch_to
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[: level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_queryset(self, level):
if self.get_current_prefetch_to(level) == self.prefetch_to:
return self.queryset
return None
def __eq__(self, other):
if not isinstance(other, Prefetch):
return NotImplemented
return self.prefetch_to == other.prefetch_to
def __hash__(self):
return hash((self.__class__, self.prefetch_to))
def normalize_prefetch_lookups(lookups, prefix=None):
"""Normalize lookups into Prefetch objects."""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(model_instances, *related_lookups):
"""
Populate prefetched object caches for a list of model instances based on
the lookups/Prefetch instances given.
"""
if not model_instances:
return # nothing to do
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = normalize_prefetch_lookups(reversed(related_lookups))
while all_lookups:
lookup = all_lookups.pop()
if lookup.prefetch_to in done_queries:
if lookup.queryset is not None:
raise ValueError(
"'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups."
% lookup.prefetch_to
)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = model_instances
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if not obj_list:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, "_prefetched_objects_cache"):
try:
obj._prefetched_objects_cache = {}
except (AttributeError, TypeError):
# Must be an immutable object from
# values_list(flat=True), for example (TypeError) or
# a QuerySet subclass that isn't returning Model
# instances (AttributeError), either in Django or a 3rd
# party. prefetch_related() doesn't make sense, so quit.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
to_attr = lookup.get_current_to_attr(level)[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(
first_obj, through_attr, to_attr
)
if not attr_found:
raise AttributeError(
"Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()"
% (
through_attr,
first_obj.__class__.__name__,
lookup.prefetch_through,
)
)
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError(
"'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through
)
obj_to_fetch = None
if prefetcher is not None:
obj_to_fetch = [obj for obj in obj_list if not is_fetched(obj)]
if obj_to_fetch:
obj_list, additional_lookups = prefetch_one_level(
obj_to_fetch,
prefetcher,
lookup,
level,
)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (
prefetch_to in done_queries
and lookup in auto_lookups
and descriptor in followed_descriptors
):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(
reversed(additional_lookups), prefetch_to
)
auto_lookups.update(new_lookups)
all_lookups.extend(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
if through_attr in getattr(obj, "_prefetched_objects_cache", ()):
# If related objects have been prefetched, use the
# cache rather than the object's through_attr.
new_obj = list(obj._prefetched_objects_cache.get(through_attr))
else:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally match
# user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, through_attr, to_attr):
"""
For the attribute 'through_attr' on the given instance, find
an object that has a get_prefetch_queryset().
Return a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a function that takes an instance and returns a boolean that is True if
the attribute has already been fetched for that instance)
"""
def has_to_attr_attribute(instance):
return hasattr(instance, to_attr)
prefetcher = None
is_fetched = has_to_attr_attribute
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, through_attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, through_attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_queryset() method.
if hasattr(rel_obj_descriptor, "get_prefetch_queryset"):
prefetcher = rel_obj_descriptor
is_fetched = rel_obj_descriptor.is_cached
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, through_attr)
if hasattr(rel_obj, "get_prefetch_queryset"):
prefetcher = rel_obj
if through_attr != to_attr:
# Special case cached_property instances because hasattr
# triggers attribute computation and assignment.
if isinstance(
getattr(instance.__class__, to_attr, None), cached_property
):
def has_cached_property(instance):
return to_attr in instance.__dict__
is_fetched = has_cached_property
else:
def in_prefetched_cache(instance):
return through_attr in instance._prefetched_objects_cache
is_fetched = in_prefetched_cache
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects().
Run prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
Return the prefetched objects along with any additional prefetches that
must be done due to prefetch_related lookups found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache or field name to assign to,
# boolean that is True when the previous argument is a cache name vs a field name).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
(
rel_qs,
rel_obj_attr,
instance_attr,
single,
cache_name,
is_descriptor,
) = prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level))
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
# Copy the lookups in case it is a Prefetch object which could be reused
# later (happens in nested prefetch_related).
additional_lookups = [
copy.copy(additional_lookup)
for additional_lookup in getattr(rel_qs, "_prefetch_related_lookups", ())
]
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = ()
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
to_attr, as_attr = lookup.get_current_to_attr(level)
# Make sure `to_attr` does not conflict with a field.
if as_attr and instances:
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
model = instances[0].__class__
try:
model._meta.get_field(to_attr)
except exceptions.FieldDoesNotExist:
pass
else:
msg = "to_attr={} conflicts with a field on the {} model."
raise ValueError(msg.format(to_attr, model.__name__))
# Whether or not we're prefetching the last part of the lookup.
leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
if single:
val = vals[0] if vals else None
if as_attr:
# A to_attr has been given for the prefetch.
setattr(obj, to_attr, val)
elif is_descriptor:
# cache_name points to a field name in obj.
# This field is a descriptor for a related object.
setattr(obj, cache_name, val)
else:
# No to_attr has been given for this prefetch operation and the
# cache_name does not point to a descriptor. Store the value of
# the field in the object's field cache.
obj._state.fields_cache[cache_name] = val
else:
if as_attr:
setattr(obj, to_attr, vals)
else:
manager = getattr(obj, to_attr)
if leaf and lookup.queryset is not None:
qs = manager._apply_rel_filters(lookup.queryset)
else:
qs = manager.get_queryset()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
class RelatedPopulator:
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db):
self.db = db
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - local_setter, remote_setter: Methods to set cached values on
# the object being populated and on the remote object. Usually
# these are Field.set_cached_value() methods.
select_fields = klass_info["select_fields"]
from_parent = klass_info["from_parent"]
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start : self.cols_end]
]
self.reorder_for_init = None
else:
attname_indexes = {
select[idx][0].target.attname: idx for idx in select_fields
}
model_init_attnames = (
f.attname for f in klass_info["model"]._meta.concrete_fields
)
self.init_list = [
attname for attname in model_init_attnames if attname in attname_indexes
]
self.reorder_for_init = operator.itemgetter(
*[attname_indexes[attname] for attname in self.init_list]
)
self.model_cls = klass_info["model"]
self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
self.related_populators = get_related_populators(klass_info, select, self.db)
self.local_setter = klass_info["local_setter"]
self.remote_setter = klass_info["remote_setter"]
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start : self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
self.local_setter(from_obj, obj)
if obj is not None:
self.remote_setter(obj, from_obj)
def get_related_populators(klass_info, select, db):
iterators = []
related_klass_infos = klass_info.get("related_klass_infos", [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db)
iterators.append(rel_cls)
return iterators
|
d4f4087563c6208ac9220e1a66af2d3d7c2d381001dbede95caa61d13253c8e1 | """
Create SQL statements for QuerySets.
The code in here encapsulates all of the SQL construction so that QuerySets
themselves do not have to (and could be backed by things other than SQL
databases). The abstraction barrier only works one way: this module has to know
all about the internals of models in order to get the information it needs.
"""
import copy
import difflib
import functools
import sys
from collections import Counter, namedtuple
from collections.abc import Iterator, Mapping
from itertools import chain, count, product
from string import ascii_uppercase
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db import DEFAULT_DB_ALIAS, NotSupportedError, connections
from django.db.models.aggregates import Count
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import (
BaseExpression,
Col,
Exists,
F,
OuterRef,
Ref,
ResolvedOuterRef,
Value,
)
from django.db.models.fields import Field
from django.db.models.fields.related_lookups import MultiColSource
from django.db.models.lookups import Lookup
from django.db.models.query_utils import (
Q,
check_rel_lookup_compatibility,
refs_expression,
)
from django.db.models.sql.constants import INNER, LOUTER, ORDER_DIR, SINGLE
from django.db.models.sql.datastructures import BaseTable, Empty, Join, MultiJoin
from django.db.models.sql.where import AND, OR, ExtraWhere, NothingNode, WhereNode
from django.utils.functional import cached_property
from django.utils.regex_helper import _lazy_re_compile
from django.utils.tree import Node
__all__ = ["Query", "RawQuery"]
# Quotation marks ('"`[]), whitespace characters, semicolons, or inline
# SQL comments are forbidden in column aliases.
FORBIDDEN_ALIAS_PATTERN = _lazy_re_compile(r"['`\"\]\[;\s]|--|/\*|\*/")
# Inspired from
# https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS
EXPLAIN_OPTIONS_PATTERN = _lazy_re_compile(r"[\w\-]+")
def get_field_names_from_opts(opts):
if opts is None:
return set()
return set(
chain.from_iterable(
(f.name, f.attname) if f.concrete else (f.name,) for f in opts.get_fields()
)
)
def get_children_from_q(q):
for child in q.children:
if isinstance(child, Node):
yield from get_children_from_q(child)
else:
yield child
JoinInfo = namedtuple(
"JoinInfo",
("final_field", "targets", "opts", "joins", "path", "transform_function"),
)
class RawQuery:
"""A single raw SQL query."""
def __init__(self, sql, using, params=()):
self.params = params
self.sql = sql
self.using = using
self.cursor = None
# Mirror some properties of a normal query so that
# the compiler can be used to process results.
self.low_mark, self.high_mark = 0, None # Used for offset/limit
self.extra_select = {}
self.annotation_select = {}
def chain(self, using):
return self.clone(using)
def clone(self, using):
return RawQuery(self.sql, using, params=self.params)
def get_columns(self):
if self.cursor is None:
self._execute_query()
converter = connections[self.using].introspection.identifier_converter
return [converter(column_meta[0]) for column_meta in self.cursor.description]
def __iter__(self):
# Always execute a new query for a new iterator.
# This could be optimized with a cache at the expense of RAM.
self._execute_query()
if not connections[self.using].features.can_use_chunked_reads:
# If the database can't use chunked reads we need to make sure we
# evaluate the entire query up front.
result = list(self.cursor)
else:
result = self.cursor
return iter(result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
@property
def params_type(self):
if self.params is None:
return None
return dict if isinstance(self.params, Mapping) else tuple
def __str__(self):
if self.params_type is None:
return self.sql
return self.sql % self.params_type(self.params)
def _execute_query(self):
connection = connections[self.using]
# Adapt parameters to the database, as much as possible considering
# that the target type isn't known. See #17755.
params_type = self.params_type
adapter = connection.ops.adapt_unknown_value
if params_type is tuple:
params = tuple(adapter(val) for val in self.params)
elif params_type is dict:
params = {key: adapter(val) for key, val in self.params.items()}
elif params_type is None:
params = None
else:
raise RuntimeError("Unexpected params type: %s" % params_type)
self.cursor = connection.cursor()
self.cursor.execute(self.sql, params)
ExplainInfo = namedtuple("ExplainInfo", ("format", "options"))
class Query(BaseExpression):
"""A single SQL query."""
alias_prefix = "T"
empty_result_set_value = None
subq_aliases = frozenset([alias_prefix])
compiler = "SQLCompiler"
base_table_class = BaseTable
join_class = Join
default_cols = True
default_ordering = True
standard_ordering = True
filter_is_sticky = False
subquery = False
# SQL-related attributes.
# Select and related select clauses are expressions to use in the SELECT
# clause of the query. The select is used for cases where we want to set up
# the select clause to contain other than default fields (values(),
# subqueries...). Note that annotations go to annotations dictionary.
select = ()
# The group_by attribute can have one of the following forms:
# - None: no group by at all in the query
# - A tuple of expressions: group by (at least) those expressions.
# String refs are also allowed for now.
# - True: group by all select fields of the model
# See compiler.get_group_by() for details.
group_by = None
order_by = ()
low_mark = 0 # Used for offset/limit.
high_mark = None # Used for offset/limit.
distinct = False
distinct_fields = ()
select_for_update = False
select_for_update_nowait = False
select_for_update_skip_locked = False
select_for_update_of = ()
select_for_no_key_update = False
select_related = False
has_select_fields = False
# Arbitrary limit for select_related to prevents infinite recursion.
max_depth = 5
# Holds the selects defined by a call to values() or values_list()
# excluding annotation_select and extra_select.
values_select = ()
# SQL annotation-related attributes.
annotation_select_mask = None
_annotation_select_cache = None
# Set combination attributes.
combinator = None
combinator_all = False
combined_queries = ()
# These are for extensions. The contents are more or less appended verbatim
# to the appropriate clause.
extra_select_mask = None
_extra_select_cache = None
extra_tables = ()
extra_order_by = ()
# A tuple that is a set of model field names and either True, if these are
# the fields to defer, or False if these are the only fields to load.
deferred_loading = (frozenset(), True)
explain_info = None
def __init__(self, model, alias_cols=True):
self.model = model
self.alias_refcount = {}
# alias_map is the most important data structure regarding joins.
# It's used for recording which joins exist in the query and what
# types they are. The key is the alias of the joined table (possibly
# the table name) and the value is a Join-like object (see
# sql.datastructures.Join for more information).
self.alias_map = {}
# Whether to provide alias to columns during reference resolving.
self.alias_cols = alias_cols
# Sometimes the query contains references to aliases in outer queries (as
# a result of split_exclude). Correct alias quoting needs to know these
# aliases too.
# Map external tables to whether they are aliased.
self.external_aliases = {}
self.table_map = {} # Maps table names to list of aliases.
self.used_aliases = set()
self.where = WhereNode()
# Maps alias -> Annotation Expression.
self.annotations = {}
# These are for extensions. The contents are more or less appended
# verbatim to the appropriate clause.
self.extra = {} # Maps col_alias -> (col_sql, params).
self._filtered_relations = {}
@property
def output_field(self):
if len(self.select) == 1:
select = self.select[0]
return getattr(select, "target", None) or select.field
elif len(self.annotation_select) == 1:
return next(iter(self.annotation_select.values())).output_field
@cached_property
def base_table(self):
for alias in self.alias_map:
return alias
def __str__(self):
"""
Return the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
done by the database interface at execution time.
"""
sql, params = self.sql_with_params()
return sql % params
def sql_with_params(self):
"""
Return the query as an SQL string and the parameters that will be
substituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
def __deepcopy__(self, memo):
"""Limit the amount of work when a Query is deepcopied."""
result = self.clone()
memo[id(self)] = result
return result
def get_compiler(self, using=None, connection=None, elide_empty=True):
if using is None and connection is None:
raise ValueError("Need either using or connection")
if using:
connection = connections[using]
return connection.ops.compiler(self.compiler)(
self, connection, using, elide_empty
)
def get_meta(self):
"""
Return the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
if self.model:
return self.model._meta
def clone(self):
"""
Return a copy of the current Query. A lightweight alternative to
deepcopy().
"""
obj = Empty()
obj.__class__ = self.__class__
# Copy references to everything.
obj.__dict__ = self.__dict__.copy()
# Clone attributes that can't use shallow copy.
obj.alias_refcount = self.alias_refcount.copy()
obj.alias_map = self.alias_map.copy()
obj.external_aliases = self.external_aliases.copy()
obj.table_map = self.table_map.copy()
obj.where = self.where.clone()
obj.annotations = self.annotations.copy()
if self.annotation_select_mask is not None:
obj.annotation_select_mask = self.annotation_select_mask.copy()
if self.combined_queries:
obj.combined_queries = tuple(
[query.clone() for query in self.combined_queries]
)
# _annotation_select_cache cannot be copied, as doing so breaks the
# (necessary) state in which both annotations and
# _annotation_select_cache point to the same underlying objects.
# It will get re-populated in the cloned queryset the next time it's
# used.
obj._annotation_select_cache = None
obj.extra = self.extra.copy()
if self.extra_select_mask is not None:
obj.extra_select_mask = self.extra_select_mask.copy()
if self._extra_select_cache is not None:
obj._extra_select_cache = self._extra_select_cache.copy()
if self.select_related is not False:
# Use deepcopy because select_related stores fields in nested
# dicts.
obj.select_related = copy.deepcopy(obj.select_related)
if "subq_aliases" in self.__dict__:
obj.subq_aliases = self.subq_aliases.copy()
obj.used_aliases = self.used_aliases.copy()
obj._filtered_relations = self._filtered_relations.copy()
# Clear the cached_property, if it exists.
obj.__dict__.pop("base_table", None)
return obj
def chain(self, klass=None):
"""
Return a copy of the current Query that's ready for another operation.
The klass argument changes the type of the Query, e.g. UpdateQuery.
"""
obj = self.clone()
if klass and obj.__class__ != klass:
obj.__class__ = klass
if not obj.filter_is_sticky:
obj.used_aliases = set()
obj.filter_is_sticky = False
if hasattr(obj, "_setup_query"):
obj._setup_query()
return obj
def relabeled_clone(self, change_map):
clone = self.clone()
clone.change_aliases(change_map)
return clone
def _get_col(self, target, field, alias):
if not self.alias_cols:
alias = None
return target.get_col(alias, field)
def get_aggregation(self, using, aggregate_exprs):
"""
Return the dictionary with the values of the existing aggregations.
"""
if not aggregate_exprs:
return {}
aggregates = {}
for alias, aggregate_expr in aggregate_exprs.items():
self.check_alias(alias)
aggregate = aggregate_expr.resolve_expression(
self, allow_joins=True, reuse=None, summarize=True
)
if not aggregate.contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
aggregates[alias] = aggregate
# Existing usage of aggregation can be determined by the presence of
# selected aggregates but also by filters against aliased aggregates.
_, having, qualify = self.where.split_having_qualify()
has_existing_aggregation = (
any(
getattr(annotation, "contains_aggregate", True)
for annotation in self.annotations.values()
)
or having
)
# Decide if we need to use a subquery.
#
# Existing aggregations would cause incorrect results as
# get_aggregation() must produce just one result and thus must not use
# GROUP BY.
#
# If the query has limit or distinct, or uses set operations, then
# those operations must be done in a subquery so that the query
# aggregates on the limit and/or distinct results instead of applying
# the distinct and limit after the aggregation.
if (
isinstance(self.group_by, tuple)
or self.is_sliced
or has_existing_aggregation
or qualify
or self.distinct
or self.combinator
):
from django.db.models.sql.subqueries import AggregateQuery
inner_query = self.clone()
inner_query.subquery = True
outer_query = AggregateQuery(self.model, inner_query)
inner_query.select_for_update = False
inner_query.select_related = False
inner_query.set_annotation_mask(self.annotation_select)
# Queries with distinct_fields need ordering and when a limit is
# applied we must take the slice from the ordered query. Otherwise
# no need for ordering.
inner_query.clear_ordering(force=False)
if not inner_query.distinct:
# If the inner query uses default select and it has some
# aggregate annotations, then we must make sure the inner
# query is grouped by the main model's primary key. However,
# clearing the select clause can alter results if distinct is
# used.
if inner_query.default_cols and has_existing_aggregation:
inner_query.group_by = (
self.model._meta.pk.get_col(inner_query.get_initial_alias()),
)
inner_query.default_cols = False
if not qualify:
# Mask existing annotations that are not referenced by
# aggregates to be pushed to the outer query unless
# filtering against window functions is involved as it
# requires complex realising.
annotation_mask = set()
for aggregate in aggregates.values():
annotation_mask |= aggregate.get_refs()
inner_query.set_annotation_mask(annotation_mask)
# Add aggregates to the outer AggregateQuery. This requires making
# sure all columns referenced by the aggregates are selected in the
# inner query. It is achieved by retrieving all column references
# by the aggregates, explicitly selecting them in the inner query,
# and making sure the aggregates are repointed to them.
col_refs = {}
for alias, aggregate in aggregates.items():
replacements = {}
for col in self._gen_cols([aggregate], resolve_refs=False):
if not (col_ref := col_refs.get(col)):
index = len(col_refs) + 1
col_alias = f"__col{index}"
col_ref = Ref(col_alias, col)
col_refs[col] = col_ref
inner_query.annotations[col_alias] = col
inner_query.append_annotation_mask([col_alias])
replacements[col] = col_ref
outer_query.annotations[alias] = aggregate.replace_expressions(
replacements
)
if (
inner_query.select == ()
and not inner_query.default_cols
and not inner_query.annotation_select_mask
):
# In case of Model.objects[0:3].count(), there would be no
# field selected in the inner query, yet we must use a subquery.
# So, make sure at least one field is selected.
inner_query.select = (
self.model._meta.pk.get_col(inner_query.get_initial_alias()),
)
else:
outer_query = self
self.select = ()
self.default_cols = False
self.extra = {}
if self.annotations:
# Inline reference to existing annotations and mask them as
# they are unnecessary given only the summarized aggregations
# are requested.
replacements = {
Ref(alias, annotation): annotation
for alias, annotation in self.annotations.items()
}
self.annotations = {
alias: aggregate.replace_expressions(replacements)
for alias, aggregate in aggregates.items()
}
else:
self.annotations = aggregates
self.set_annotation_mask(aggregates)
empty_set_result = [
expression.empty_result_set_value
for expression in outer_query.annotation_select.values()
]
elide_empty = not any(result is NotImplemented for result in empty_set_result)
outer_query.clear_ordering(force=True)
outer_query.clear_limits()
outer_query.select_for_update = False
outer_query.select_related = False
compiler = outer_query.get_compiler(using, elide_empty=elide_empty)
result = compiler.execute_sql(SINGLE)
if result is None:
result = empty_set_result
converters = compiler.get_converters(outer_query.annotation_select.values())
result = next(compiler.apply_converters((result,), converters))
return dict(zip(outer_query.annotation_select, result))
def get_count(self, using):
"""
Perform a COUNT() query using the current filter constraints.
"""
obj = self.clone()
return obj.get_aggregation(using, {"__count": Count("*")})["__count"]
def has_filters(self):
return self.where
def exists(self, limit=True):
q = self.clone()
if not (q.distinct and q.is_sliced):
if q.group_by is True:
q.add_fields(
(f.attname for f in self.model._meta.concrete_fields), False
)
# Disable GROUP BY aliases to avoid orphaning references to the
# SELECT clause which is about to be cleared.
q.set_group_by(allow_aliases=False)
q.clear_select_clause()
if q.combined_queries and q.combinator == "union":
q.combined_queries = tuple(
combined_query.exists(limit=False)
for combined_query in q.combined_queries
)
q.clear_ordering(force=True)
if limit:
q.set_limits(high=1)
q.add_annotation(Value(1), "a")
return q
def has_results(self, using):
q = self.exists(using)
compiler = q.get_compiler(using=using)
return compiler.has_results()
def explain(self, using, format=None, **options):
q = self.clone()
for option_name in options:
if (
not EXPLAIN_OPTIONS_PATTERN.fullmatch(option_name)
or "--" in option_name
):
raise ValueError(f"Invalid option name: {option_name!r}.")
q.explain_info = ExplainInfo(format, options)
compiler = q.get_compiler(using=using)
return "\n".join(compiler.explain_query())
def combine(self, rhs, connector):
"""
Merge the 'rhs' query into the current one (with any 'rhs' effects
being applied *after* (that is, "to the right of") anything in the
current query. 'rhs' is not modified during a call to this function.
The 'connector' parameter describes how to connect filters from the
'rhs' query.
"""
if self.model != rhs.model:
raise TypeError("Cannot combine queries on two different base models.")
if self.is_sliced:
raise TypeError("Cannot combine queries once a slice has been taken.")
if self.distinct != rhs.distinct:
raise TypeError("Cannot combine a unique query with a non-unique query.")
if self.distinct_fields != rhs.distinct_fields:
raise TypeError("Cannot combine queries with different distinct fields.")
# If lhs and rhs shares the same alias prefix, it is possible to have
# conflicting alias changes like T4 -> T5, T5 -> T6, which might end up
# as T4 -> T6 while combining two querysets. To prevent this, change an
# alias prefix of the rhs and update current aliases accordingly,
# except if the alias is the base table since it must be present in the
# query on both sides.
initial_alias = self.get_initial_alias()
rhs.bump_prefix(self, exclude={initial_alias})
# Work out how to relabel the rhs aliases, if necessary.
change_map = {}
conjunction = connector == AND
# Determine which existing joins can be reused. When combining the
# query with AND we must recreate all joins for m2m filters. When
# combining with OR we can reuse joins. The reason is that in AND
# case a single row can't fulfill a condition like:
# revrel__col=1 & revrel__col=2
# But, there might be two different related rows matching this
# condition. In OR case a single True is enough, so single row is
# enough, too.
#
# Note that we will be creating duplicate joins for non-m2m joins in
# the AND case. The results will be correct but this creates too many
# joins. This is something that could be fixed later on.
reuse = set() if conjunction else set(self.alias_map)
joinpromoter = JoinPromoter(connector, 2, False)
joinpromoter.add_votes(
j for j in self.alias_map if self.alias_map[j].join_type == INNER
)
rhs_votes = set()
# Now, add the joins from rhs query into the new query (skipping base
# table).
rhs_tables = list(rhs.alias_map)[1:]
for alias in rhs_tables:
join = rhs.alias_map[alias]
# If the left side of the join was already relabeled, use the
# updated alias.
join = join.relabeled_clone(change_map)
new_alias = self.join(join, reuse=reuse)
if join.join_type == INNER:
rhs_votes.add(new_alias)
# We can't reuse the same join again in the query. If we have two
# distinct joins for the same connection in rhs query, then the
# combined query must have two joins, too.
reuse.discard(new_alias)
if alias != new_alias:
change_map[alias] = new_alias
if not rhs.alias_refcount[alias]:
# The alias was unused in the rhs query. Unref it so that it
# will be unused in the new query, too. We have to add and
# unref the alias so that join promotion has information of
# the join type for the unused alias.
self.unref_alias(new_alias)
joinpromoter.add_votes(rhs_votes)
joinpromoter.update_join_types(self)
# Combine subqueries aliases to ensure aliases relabelling properly
# handle subqueries when combining where and select clauses.
self.subq_aliases |= rhs.subq_aliases
# Now relabel a copy of the rhs where-clause and add it to the current
# one.
w = rhs.where.clone()
w.relabel_aliases(change_map)
self.where.add(w, connector)
# Selection columns and extra extensions are those provided by 'rhs'.
if rhs.select:
self.set_select([col.relabeled_clone(change_map) for col in rhs.select])
else:
self.select = ()
if connector == OR:
# It would be nice to be able to handle this, but the queries don't
# really make sense (or return consistent value sets). Not worth
# the extra complexity when you can write a real query instead.
if self.extra and rhs.extra:
raise ValueError(
"When merging querysets using 'or', you cannot have "
"extra(select=...) on both sides."
)
self.extra.update(rhs.extra)
extra_select_mask = set()
if self.extra_select_mask is not None:
extra_select_mask.update(self.extra_select_mask)
if rhs.extra_select_mask is not None:
extra_select_mask.update(rhs.extra_select_mask)
if extra_select_mask:
self.set_extra_mask(extra_select_mask)
self.extra_tables += rhs.extra_tables
# Ordering uses the 'rhs' ordering, unless it has none, in which case
# the current ordering is used.
self.order_by = rhs.order_by or self.order_by
self.extra_order_by = rhs.extra_order_by or self.extra_order_by
def _get_defer_select_mask(self, opts, mask, select_mask=None):
if select_mask is None:
select_mask = {}
select_mask[opts.pk] = {}
# All concrete fields that are not part of the defer mask must be
# loaded. If a relational field is encountered it gets added to the
# mask for it be considered if `select_related` and the cycle continues
# by recursively caling this function.
for field in opts.concrete_fields:
field_mask = mask.pop(field.name, None)
if field_mask is None:
select_mask.setdefault(field, {})
elif field_mask:
if not field.is_relation:
raise FieldError(next(iter(field_mask)))
field_select_mask = select_mask.setdefault(field, {})
related_model = field.remote_field.model._meta.concrete_model
self._get_defer_select_mask(
related_model._meta, field_mask, field_select_mask
)
# Remaining defer entries must be references to reverse relationships.
# The following code is expected to raise FieldError if it encounters
# a malformed defer entry.
for field_name, field_mask in mask.items():
if filtered_relation := self._filtered_relations.get(field_name):
relation = opts.get_field(filtered_relation.relation_name)
field_select_mask = select_mask.setdefault((field_name, relation), {})
field = relation.field
else:
field = opts.get_field(field_name).field
field_select_mask = select_mask.setdefault(field, {})
related_model = field.model._meta.concrete_model
self._get_defer_select_mask(
related_model._meta, field_mask, field_select_mask
)
return select_mask
def _get_only_select_mask(self, opts, mask, select_mask=None):
if select_mask is None:
select_mask = {}
select_mask[opts.pk] = {}
# Only include fields mentioned in the mask.
for field_name, field_mask in mask.items():
field = opts.get_field(field_name)
field_select_mask = select_mask.setdefault(field, {})
if field_mask:
if not field.is_relation:
raise FieldError(next(iter(field_mask)))
related_model = field.remote_field.model._meta.concrete_model
self._get_only_select_mask(
related_model._meta, field_mask, field_select_mask
)
return select_mask
def get_select_mask(self):
"""
Convert the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialized on each
model. Models that have all their fields included aren't mentioned in
the result, only those that have field restrictions in place.
"""
field_names, defer = self.deferred_loading
if not field_names:
return {}
mask = {}
for field_name in field_names:
part_mask = mask
for part in field_name.split(LOOKUP_SEP):
part_mask = part_mask.setdefault(part, {})
opts = self.get_meta()
if defer:
return self._get_defer_select_mask(opts, mask)
return self._get_only_select_mask(opts, mask)
def table_alias(self, table_name, create=False, filtered_relation=None):
"""
Return a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
most recently created alias for the table (if one exists) is reused.
"""
alias_list = self.table_map.get(table_name)
if not create and alias_list:
alias = alias_list[0]
self.alias_refcount[alias] += 1
return alias, False
# Create a new alias for this table.
if alias_list:
alias = "%s%d" % (self.alias_prefix, len(self.alias_map) + 1)
alias_list.append(alias)
else:
# The first occurrence of a table uses the table name directly.
alias = (
filtered_relation.alias if filtered_relation is not None else table_name
)
self.table_map[table_name] = [alias]
self.alias_refcount[alias] = 1
return alias, True
def ref_alias(self, alias):
"""Increases the reference count for this alias."""
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
"""Decreases the reference count for this alias."""
self.alias_refcount[alias] -= amount
def promote_joins(self, aliases):
"""
Promote recursively the join type of given aliases and its children to
an outer join. If 'unconditional' is False, only promote the join if
it is nullable or the parent join is an outer join.
The children promotion is done to avoid join chains that contain a LOUTER
b INNER c. So, if we have currently a INNER b INNER c and a->b is promoted,
then we must also promote b->c automatically, or otherwise the promotion
of a->b doesn't actually change anything in the query results.
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type is None:
# This is the base table (first FROM entry) - this table
# isn't really joined at all in the query, so we should not
# alter its join type.
continue
# Only the first alias (skipped above) should have None join_type
assert self.alias_map[alias].join_type is not None
parent_alias = self.alias_map[alias].parent_alias
parent_louter = (
parent_alias and self.alias_map[parent_alias].join_type == LOUTER
)
already_louter = self.alias_map[alias].join_type == LOUTER
if (self.alias_map[alias].nullable or parent_louter) and not already_louter:
self.alias_map[alias] = self.alias_map[alias].promote()
# Join type of 'alias' changed, so re-examine all aliases that
# refer to this one.
aliases.extend(
join
for join in self.alias_map
if self.alias_map[join].parent_alias == alias
and join not in aliases
)
def demote_joins(self, aliases):
"""
Change join type from LOUTER to INNER for all joins in aliases.
Similarly to promote_joins(), this method must ensure no join chains
containing first an outer, then an inner join are generated. If we
are demoting b->c join in chain a LOUTER b LOUTER c then we must
demote a->b automatically, or otherwise the demotion of b->c doesn't
actually change anything in the query results. .
"""
aliases = list(aliases)
while aliases:
alias = aliases.pop(0)
if self.alias_map[alias].join_type == LOUTER:
self.alias_map[alias] = self.alias_map[alias].demote()
parent_alias = self.alias_map[alias].parent_alias
if self.alias_map[parent_alias].join_type == INNER:
aliases.append(parent_alias)
def reset_refcounts(self, to_counts):
"""
Reset reference counts for aliases so that they match the value passed
in `to_counts`.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
self.unref_alias(alias, unref_amount)
def change_aliases(self, change_map):
"""
Change the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
# If keys and values of change_map were to intersect, an alias might be
# updated twice (e.g. T4 -> T5, T5 -> T6, so also T4 -> T6) depending
# on their order in change_map.
assert set(change_map).isdisjoint(change_map.values())
# 1. Update references in "select" (normal columns plus aliases),
# "group by" and "where".
self.where.relabel_aliases(change_map)
if isinstance(self.group_by, tuple):
self.group_by = tuple(
[col.relabeled_clone(change_map) for col in self.group_by]
)
self.select = tuple([col.relabeled_clone(change_map) for col in self.select])
self.annotations = self.annotations and {
key: col.relabeled_clone(change_map)
for key, col in self.annotations.items()
}
# 2. Rename the alias in the internal table/alias datastructures.
for old_alias, new_alias in change_map.items():
if old_alias not in self.alias_map:
continue
alias_data = self.alias_map[old_alias].relabeled_clone(change_map)
self.alias_map[new_alias] = alias_data
self.alias_refcount[new_alias] = self.alias_refcount[old_alias]
del self.alias_refcount[old_alias]
del self.alias_map[old_alias]
table_aliases = self.table_map[alias_data.table_name]
for pos, alias in enumerate(table_aliases):
if alias == old_alias:
table_aliases[pos] = new_alias
break
self.external_aliases = {
# Table is aliased or it's being changed and thus is aliased.
change_map.get(alias, alias): (aliased or alias in change_map)
for alias, aliased in self.external_aliases.items()
}
def bump_prefix(self, other_query, exclude=None):
"""
Change the alias prefix to the next letter in the alphabet in a way
that the other query's aliases and this query's aliases will not
conflict. Even tables that previously had no alias will get an alias
after this call. To prevent changing aliases use the exclude parameter.
"""
def prefix_gen():
"""
Generate a sequence of characters in alphabetical order:
-> 'A', 'B', 'C', ...
When the alphabet is finished, the sequence will continue with the
Cartesian product:
-> 'AA', 'AB', 'AC', ...
"""
alphabet = ascii_uppercase
prefix = chr(ord(self.alias_prefix) + 1)
yield prefix
for n in count(1):
seq = alphabet[alphabet.index(prefix) :] if prefix else alphabet
for s in product(seq, repeat=n):
yield "".join(s)
prefix = None
if self.alias_prefix != other_query.alias_prefix:
# No clashes between self and outer query should be possible.
return
# Explicitly avoid infinite loop. The constant divider is based on how
# much depth recursive subquery references add to the stack. This value
# might need to be adjusted when adding or removing function calls from
# the code path in charge of performing these operations.
local_recursion_limit = sys.getrecursionlimit() // 16
for pos, prefix in enumerate(prefix_gen()):
if prefix not in self.subq_aliases:
self.alias_prefix = prefix
break
if pos > local_recursion_limit:
raise RecursionError(
"Maximum recursion depth exceeded: too many subqueries."
)
self.subq_aliases = self.subq_aliases.union([self.alias_prefix])
other_query.subq_aliases = other_query.subq_aliases.union(self.subq_aliases)
if exclude is None:
exclude = {}
self.change_aliases(
{
alias: "%s%d" % (self.alias_prefix, pos)
for pos, alias in enumerate(self.alias_map)
if alias not in exclude
}
)
def get_initial_alias(self):
"""
Return the first alias for this query, after increasing its reference
count.
"""
if self.alias_map:
alias = self.base_table
self.ref_alias(alias)
elif self.model:
alias = self.join(self.base_table_class(self.get_meta().db_table, None))
else:
alias = None
return alias
def count_active_tables(self):
"""
Return the number of tables in this query with a non-zero reference
count. After execution, the reference counts are zeroed, so tables
added in compiler will not be seen by this method.
"""
return len([1 for count in self.alias_refcount.values() if count])
def join(self, join, reuse=None, reuse_with_filtered_relation=False):
"""
Return an alias for the 'join', either reusing an existing alias for
that join or creating a new one. 'join' is either a base_table_class or
join_class.
The 'reuse' parameter can be either None which means all joins are
reusable, or it can be a set containing the aliases that can be reused.
The 'reuse_with_filtered_relation' parameter is used when computing
FilteredRelation instances.
A join is always created as LOUTER if the lhs alias is LOUTER to make
sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new
joins are created as LOUTER if the join is nullable.
"""
if reuse_with_filtered_relation and reuse:
reuse_aliases = [
a for a, j in self.alias_map.items() if a in reuse and j.equals(join)
]
else:
reuse_aliases = [
a
for a, j in self.alias_map.items()
if (reuse is None or a in reuse) and j == join
]
if reuse_aliases:
if join.table_alias in reuse_aliases:
reuse_alias = join.table_alias
else:
# Reuse the most recent alias of the joined table
# (a many-to-many relation may be joined multiple times).
reuse_alias = reuse_aliases[-1]
self.ref_alias(reuse_alias)
return reuse_alias
# No reuse is possible, so we need a new alias.
alias, _ = self.table_alias(
join.table_name, create=True, filtered_relation=join.filtered_relation
)
if join.join_type:
if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable:
join_type = LOUTER
else:
join_type = INNER
join.join_type = join_type
join.table_alias = alias
self.alias_map[alias] = join
return alias
def join_parent_model(self, opts, model, alias, seen):
"""
Make sure the given 'model' is joined in the query. If 'model' isn't
a parent of 'opts' or if it is None this method is a no-op.
The 'alias' is the root alias for starting the join, 'seen' is a dict
of model -> alias of existing joins. It must also contain a mapping
of None -> some alias. This will be returned in the no-op case.
"""
if model in seen:
return seen[model]
chain = opts.get_base_chain(model)
if not chain:
return alias
curr_opts = opts
for int_model in chain:
if int_model in seen:
curr_opts = int_model._meta
alias = seen[int_model]
continue
# Proxy model have elements in base chain
# with no parents, assign the new options
# object and skip to the next base in that
# case
if not curr_opts.parents[int_model]:
curr_opts = int_model._meta
continue
link_field = curr_opts.get_ancestor_link(int_model)
join_info = self.setup_joins([link_field.name], curr_opts, alias)
curr_opts = int_model._meta
alias = seen[int_model] = join_info.joins[-1]
return alias or seen[None]
def check_alias(self, alias):
if FORBIDDEN_ALIAS_PATTERN.search(alias):
raise ValueError(
"Column aliases cannot contain whitespace characters, quotation marks, "
"semicolons, or SQL comments."
)
def add_annotation(self, annotation, alias, select=True):
"""Add a single annotation expression to the Query."""
self.check_alias(alias)
annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None)
if select:
self.append_annotation_mask([alias])
else:
self.set_annotation_mask(set(self.annotation_select).difference({alias}))
self.annotations[alias] = annotation
def resolve_expression(self, query, *args, **kwargs):
clone = self.clone()
# Subqueries need to use a different set of aliases than the outer query.
clone.bump_prefix(query)
clone.subquery = True
clone.where.resolve_expression(query, *args, **kwargs)
# Resolve combined queries.
if clone.combinator:
clone.combined_queries = tuple(
[
combined_query.resolve_expression(query, *args, **kwargs)
for combined_query in clone.combined_queries
]
)
for key, value in clone.annotations.items():
resolved = value.resolve_expression(query, *args, **kwargs)
if hasattr(resolved, "external_aliases"):
resolved.external_aliases.update(clone.external_aliases)
clone.annotations[key] = resolved
# Outer query's aliases are considered external.
for alias, table in query.alias_map.items():
clone.external_aliases[alias] = (
isinstance(table, Join)
and table.join_field.related_model._meta.db_table != alias
) or (
isinstance(table, BaseTable) and table.table_name != table.table_alias
)
return clone
def get_external_cols(self):
exprs = chain(self.annotations.values(), self.where.children)
return [
col
for col in self._gen_cols(exprs, include_external=True)
if col.alias in self.external_aliases
]
def get_group_by_cols(self, wrapper=None):
# If wrapper is referenced by an alias for an explicit GROUP BY through
# values() a reference to this expression and not the self must be
# returned to ensure external column references are not grouped against
# as well.
external_cols = self.get_external_cols()
if any(col.possibly_multivalued for col in external_cols):
return [wrapper or self]
return external_cols
def as_sql(self, compiler, connection):
# Some backends (e.g. Oracle) raise an error when a subquery contains
# unnecessary ORDER BY clause.
if (
self.subquery
and not connection.features.ignores_unnecessary_order_by_in_subqueries
):
self.clear_ordering(force=False)
for query in self.combined_queries:
query.clear_ordering(force=False)
sql, params = self.get_compiler(connection=connection).as_sql()
if self.subquery:
sql = "(%s)" % sql
return sql, params
def resolve_lookup_value(self, value, can_reuse, allow_joins):
if hasattr(value, "resolve_expression"):
value = value.resolve_expression(
self,
reuse=can_reuse,
allow_joins=allow_joins,
)
elif isinstance(value, (list, tuple)):
# The items of the iterable may be expressions and therefore need
# to be resolved independently.
values = (
self.resolve_lookup_value(sub_value, can_reuse, allow_joins)
for sub_value in value
)
type_ = type(value)
if hasattr(type_, "_make"): # namedtuple
return type_(*values)
return type_(values)
return value
def solve_lookup_type(self, lookup, summarize=False):
"""
Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains').
"""
lookup_splitted = lookup.split(LOOKUP_SEP)
if self.annotations:
annotation, expression_lookups = refs_expression(
lookup_splitted, self.annotations
)
if annotation:
expression = self.annotations[annotation]
if summarize:
expression = Ref(annotation, expression)
return expression_lookups, (), expression
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
field_parts = lookup_splitted[0 : len(lookup_splitted) - len(lookup_parts)]
if len(lookup_parts) > 1 and not field_parts:
raise FieldError(
'Invalid lookup "%s" for model %s".'
% (lookup, self.get_meta().model.__name__)
)
return lookup_parts, field_parts, False
def check_query_object_type(self, value, opts, field):
"""
Check whether the object passed while querying is of the correct type.
If not, raise a ValueError specifying the wrong object.
"""
if hasattr(value, "_meta"):
if not check_rel_lookup_compatibility(value._meta.model, opts, field):
raise ValueError(
'Cannot query "%s": Must be "%s" instance.'
% (value, opts.object_name)
)
def check_related_objects(self, field, value, opts):
"""Check the type of object passed to query relations."""
if field.is_relation:
# Check that the field and the queryset use the same model in a
# query like .filter(author=Author.objects.all()). For example, the
# opts would be Author's (from the author field) and value.model
# would be Author.objects.all() queryset's .model (Author also).
# The field is the related field on the lhs side.
if (
isinstance(value, Query)
and not value.has_select_fields
and not check_rel_lookup_compatibility(value.model, opts, field)
):
raise ValueError(
'Cannot use QuerySet for "%s": Use a QuerySet for "%s".'
% (value.model._meta.object_name, opts.object_name)
)
elif hasattr(value, "_meta"):
self.check_query_object_type(value, opts, field)
elif hasattr(value, "__iter__"):
for v in value:
self.check_query_object_type(v, opts, field)
def check_filterable(self, expression):
"""Raise an error if expression cannot be used in a WHERE clause."""
if hasattr(expression, "resolve_expression") and not getattr(
expression, "filterable", True
):
raise NotSupportedError(
expression.__class__.__name__ + " is disallowed in the filter "
"clause."
)
if hasattr(expression, "get_source_expressions"):
for expr in expression.get_source_expressions():
self.check_filterable(expr)
def build_lookup(self, lookups, lhs, rhs):
"""
Try to extract transforms and lookup from given lhs.
The lhs value is something that works like SQLExpression.
The rhs value is what the lookup is going to compare against.
The lookups is a list of names to extract using get_lookup()
and get_transform().
"""
# __exact is the default lookup if one isn't given.
*transforms, lookup_name = lookups or ["exact"]
for name in transforms:
lhs = self.try_transform(lhs, name)
# First try get_lookup() so that the lookup takes precedence if the lhs
# supports both transform and lookup for the name.
lookup_class = lhs.get_lookup(lookup_name)
if not lookup_class:
# A lookup wasn't found. Try to interpret the name as a transform
# and do an Exact lookup against it.
lhs = self.try_transform(lhs, lookup_name)
lookup_name = "exact"
lookup_class = lhs.get_lookup(lookup_name)
if not lookup_class:
return
lookup = lookup_class(lhs, rhs)
# Interpret '__exact=None' as the sql 'is NULL'; otherwise, reject all
# uses of None as a query value unless the lookup supports it.
if lookup.rhs is None and not lookup.can_use_none_as_rhs:
if lookup_name not in ("exact", "iexact"):
raise ValueError("Cannot use None as a query value")
return lhs.get_lookup("isnull")(lhs, True)
# For Oracle '' is equivalent to null. The check must be done at this
# stage because join promotion can't be done in the compiler. Using
# DEFAULT_DB_ALIAS isn't nice but it's the best that can be done here.
# A similar thing is done in is_nullable(), too.
if (
lookup_name == "exact"
and lookup.rhs == ""
and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls
):
return lhs.get_lookup("isnull")(lhs, True)
return lookup
def try_transform(self, lhs, name):
"""
Helper method for build_lookup(). Try to fetch and initialize
a transform for name parameter from lhs.
"""
transform_class = lhs.get_transform(name)
if transform_class:
return transform_class(lhs)
else:
output_field = lhs.output_field.__class__
suggested_lookups = difflib.get_close_matches(
name, output_field.get_lookups()
)
if suggested_lookups:
suggestion = ", perhaps you meant %s?" % " or ".join(suggested_lookups)
else:
suggestion = "."
raise FieldError(
"Unsupported lookup '%s' for %s or join on the field not "
"permitted%s" % (name, output_field.__name__, suggestion)
)
def build_filter(
self,
filter_expr,
branch_negated=False,
current_negated=False,
can_reuse=None,
allow_joins=True,
split_subq=True,
reuse_with_filtered_relation=False,
check_filterable=True,
summarize=False,
):
"""
Build a WhereNode for a single filter clause but don't add it
to this Query. Query.add_q() will then add this filter to the where
Node.
The 'branch_negated' tells us if the current branch contains any
negations. This will be used to determine if subqueries are needed.
The 'current_negated' is used to determine if the current filter is
negated or not and this will be used to determine if IS NULL filtering
is needed.
The difference between current_negated and branch_negated is that
branch_negated is set on first negation, but current_negated is
flipped for each negation.
Note that add_filter will not do any negating itself, that is done
upper in the code by add_q().
The 'can_reuse' is a set of reusable joins for multijoins.
If 'reuse_with_filtered_relation' is True, then only joins in can_reuse
will be reused.
The method will create a filter clause that can be added to the current
query. However, if the filter isn't added to the query then the caller
is responsible for unreffing the joins used.
"""
if isinstance(filter_expr, dict):
raise FieldError("Cannot parse keyword query as dict")
if isinstance(filter_expr, Q):
return self._add_q(
filter_expr,
branch_negated=branch_negated,
current_negated=current_negated,
used_aliases=can_reuse,
allow_joins=allow_joins,
split_subq=split_subq,
check_filterable=check_filterable,
summarize=summarize,
)
if hasattr(filter_expr, "resolve_expression"):
if not getattr(filter_expr, "conditional", False):
raise TypeError("Cannot filter against a non-conditional expression.")
condition = filter_expr.resolve_expression(
self, allow_joins=allow_joins, summarize=summarize
)
if not isinstance(condition, Lookup):
condition = self.build_lookup(["exact"], condition, True)
return WhereNode([condition], connector=AND), []
arg, value = filter_expr
if not arg:
raise FieldError("Cannot parse keyword query %r" % arg)
lookups, parts, reffed_expression = self.solve_lookup_type(arg, summarize)
if check_filterable:
self.check_filterable(reffed_expression)
if not allow_joins and len(parts) > 1:
raise FieldError("Joined field references are not permitted in this query")
pre_joins = self.alias_refcount.copy()
value = self.resolve_lookup_value(value, can_reuse, allow_joins)
used_joins = {
k for k, v in self.alias_refcount.items() if v > pre_joins.get(k, 0)
}
if check_filterable:
self.check_filterable(value)
if reffed_expression:
condition = self.build_lookup(lookups, reffed_expression, value)
return WhereNode([condition], connector=AND), []
opts = self.get_meta()
alias = self.get_initial_alias()
allow_many = not branch_negated or not split_subq
try:
join_info = self.setup_joins(
parts,
opts,
alias,
can_reuse=can_reuse,
allow_many=allow_many,
reuse_with_filtered_relation=reuse_with_filtered_relation,
)
# Prevent iterator from being consumed by check_related_objects()
if isinstance(value, Iterator):
value = list(value)
self.check_related_objects(join_info.final_field, value, join_info.opts)
# split_exclude() needs to know which joins were generated for the
# lookup parts
self._lookup_joins = join_info.joins
except MultiJoin as e:
return self.split_exclude(filter_expr, can_reuse, e.names_with_path)
# Update used_joins before trimming since they are reused to determine
# which joins could be later promoted to INNER.
used_joins.update(join_info.joins)
targets, alias, join_list = self.trim_joins(
join_info.targets, join_info.joins, join_info.path
)
if can_reuse is not None:
can_reuse.update(join_list)
if join_info.final_field.is_relation:
if len(targets) == 1:
col = self._get_col(targets[0], join_info.final_field, alias)
else:
col = MultiColSource(
alias, targets, join_info.targets, join_info.final_field
)
else:
col = self._get_col(targets[0], join_info.final_field, alias)
condition = self.build_lookup(lookups, col, value)
lookup_type = condition.lookup_name
clause = WhereNode([condition], connector=AND)
require_outer = (
lookup_type == "isnull" and condition.rhs is True and not current_negated
)
if (
current_negated
and (lookup_type != "isnull" or condition.rhs is False)
and condition.rhs is not None
):
require_outer = True
if lookup_type != "isnull":
# The condition added here will be SQL like this:
# NOT (col IS NOT NULL), where the first NOT is added in
# upper layers of code. The reason for addition is that if col
# is null, then col != someval will result in SQL "unknown"
# which isn't the same as in Python. The Python None handling
# is wanted, and it can be gotten by
# (col IS NULL OR col != someval)
# <=>
# NOT (col IS NOT NULL AND col = someval).
if (
self.is_nullable(targets[0])
or self.alias_map[join_list[-1]].join_type == LOUTER
):
lookup_class = targets[0].get_lookup("isnull")
col = self._get_col(targets[0], join_info.targets[0], alias)
clause.add(lookup_class(col, False), AND)
# If someval is a nullable column, someval IS NOT NULL is
# added.
if isinstance(value, Col) and self.is_nullable(value.target):
lookup_class = value.target.get_lookup("isnull")
clause.add(lookup_class(value, False), AND)
return clause, used_joins if not require_outer else ()
def add_filter(self, filter_lhs, filter_rhs):
self.add_q(Q((filter_lhs, filter_rhs)))
def add_q(self, q_object):
"""
A preprocessor for the internal _add_q(). Responsible for doing final
join promotion.
"""
# For join promotion this case is doing an AND for the added q_object
# and existing conditions. So, any existing inner join forces the join
# type to remain inner. Existing outer joins can however be demoted.
# (Consider case where rel_a is LOUTER and rel_a__col=1 is added - if
# rel_a doesn't produce any rows, then the whole condition must fail.
# So, demotion is OK.
existing_inner = {
a for a in self.alias_map if self.alias_map[a].join_type == INNER
}
clause, _ = self._add_q(q_object, self.used_aliases)
if clause:
self.where.add(clause, AND)
self.demote_joins(existing_inner)
def build_where(self, filter_expr):
return self.build_filter(filter_expr, allow_joins=False)[0]
def clear_where(self):
self.where = WhereNode()
def _add_q(
self,
q_object,
used_aliases,
branch_negated=False,
current_negated=False,
allow_joins=True,
split_subq=True,
check_filterable=True,
summarize=False,
):
"""Add a Q-object to the current filter."""
connector = q_object.connector
current_negated ^= q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = WhereNode(connector=connector, negated=q_object.negated)
joinpromoter = JoinPromoter(
q_object.connector, len(q_object.children), current_negated
)
for child in q_object.children:
child_clause, needed_inner = self.build_filter(
child,
can_reuse=used_aliases,
branch_negated=branch_negated,
current_negated=current_negated,
allow_joins=allow_joins,
split_subq=split_subq,
check_filterable=check_filterable,
summarize=summarize,
)
joinpromoter.add_votes(needed_inner)
if child_clause:
target_clause.add(child_clause, connector)
needed_inner = joinpromoter.update_join_types(self)
return target_clause, needed_inner
def build_filtered_relation_q(
self, q_object, reuse, branch_negated=False, current_negated=False
):
"""Add a FilteredRelation object to the current filter."""
connector = q_object.connector
current_negated ^= q_object.negated
branch_negated = branch_negated or q_object.negated
target_clause = WhereNode(connector=connector, negated=q_object.negated)
for child in q_object.children:
if isinstance(child, Node):
child_clause = self.build_filtered_relation_q(
child,
reuse=reuse,
branch_negated=branch_negated,
current_negated=current_negated,
)
else:
child_clause, _ = self.build_filter(
child,
can_reuse=reuse,
branch_negated=branch_negated,
current_negated=current_negated,
allow_joins=True,
split_subq=False,
reuse_with_filtered_relation=True,
)
target_clause.add(child_clause, connector)
return target_clause
def add_filtered_relation(self, filtered_relation, alias):
filtered_relation.alias = alias
lookups = dict(get_children_from_q(filtered_relation.condition))
relation_lookup_parts, relation_field_parts, _ = self.solve_lookup_type(
filtered_relation.relation_name
)
if relation_lookup_parts:
raise ValueError(
"FilteredRelation's relation_name cannot contain lookups "
"(got %r)." % filtered_relation.relation_name
)
for lookup in chain(lookups):
lookup_parts, lookup_field_parts, _ = self.solve_lookup_type(lookup)
shift = 2 if not lookup_parts else 1
lookup_field_path = lookup_field_parts[:-shift]
for idx, lookup_field_part in enumerate(lookup_field_path):
if len(relation_field_parts) > idx:
if relation_field_parts[idx] != lookup_field_part:
raise ValueError(
"FilteredRelation's condition doesn't support "
"relations outside the %r (got %r)."
% (filtered_relation.relation_name, lookup)
)
else:
raise ValueError(
"FilteredRelation's condition doesn't support nested "
"relations deeper than the relation_name (got %r for "
"%r)." % (lookup, filtered_relation.relation_name)
)
self._filtered_relations[filtered_relation.alias] = filtered_relation
def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):
"""
Walk the list of names and turns them into PathInfo tuples. A single
name in 'names' can generate multiple PathInfos (m2m, for example).
'names' is the path of names to travel, 'opts' is the model Options we
start the name resolving from, 'allow_many' is as for setup_joins().
If fail_on_missing is set to True, then a name that can't be resolved
will generate a FieldError.
Return a list of PathInfo tuples. In addition return the final field
(the last used join field) and target (which is a field guaranteed to
contain the same value as the final field). Finally, return those names
that weren't found (which are likely transforms and the final lookup).
"""
path, names_with_path = [], []
for pos, name in enumerate(names):
cur_names_with_path = (name, [])
if name == "pk":
name = opts.pk.name
field = None
filtered_relation = None
try:
if opts is None:
raise FieldDoesNotExist
field = opts.get_field(name)
except FieldDoesNotExist:
if name in self.annotation_select:
field = self.annotation_select[name].output_field
elif name in self._filtered_relations and pos == 0:
filtered_relation = self._filtered_relations[name]
if LOOKUP_SEP in filtered_relation.relation_name:
parts = filtered_relation.relation_name.split(LOOKUP_SEP)
filtered_relation_path, field, _, _ = self.names_to_path(
parts,
opts,
allow_many,
fail_on_missing,
)
path.extend(filtered_relation_path[:-1])
else:
field = opts.get_field(filtered_relation.relation_name)
if field is not None:
# Fields that contain one-to-many relations with a generic
# model (like a GenericForeignKey) cannot generate reverse
# relations and therefore cannot be used for reverse querying.
if field.is_relation and not field.related_model:
raise FieldError(
"Field %r does not generate an automatic reverse "
"relation and therefore cannot be used for reverse "
"querying. If it is a GenericForeignKey, consider "
"adding a GenericRelation." % name
)
try:
model = field.model._meta.concrete_model
except AttributeError:
# QuerySet.annotate() may introduce fields that aren't
# attached to a model.
model = None
else:
# We didn't find the current field, so move position back
# one step.
pos -= 1
if pos == -1 or fail_on_missing:
available = sorted(
[
*get_field_names_from_opts(opts),
*self.annotation_select,
*self._filtered_relations,
]
)
raise FieldError(
"Cannot resolve keyword '%s' into field. "
"Choices are: %s" % (name, ", ".join(available))
)
break
# Check if we need any joins for concrete inheritance cases (the
# field lives in parent, but we are currently in one of its
# children)
if opts is not None and model is not opts.model:
path_to_parent = opts.get_path_to_parent(model)
if path_to_parent:
path.extend(path_to_parent)
cur_names_with_path[1].extend(path_to_parent)
opts = path_to_parent[-1].to_opts
if hasattr(field, "path_infos"):
if filtered_relation:
pathinfos = field.get_path_info(filtered_relation)
else:
pathinfos = field.path_infos
if not allow_many:
for inner_pos, p in enumerate(pathinfos):
if p.m2m:
cur_names_with_path[1].extend(pathinfos[0 : inner_pos + 1])
names_with_path.append(cur_names_with_path)
raise MultiJoin(pos + 1, names_with_path)
last = pathinfos[-1]
path.extend(pathinfos)
final_field = last.join_field
opts = last.to_opts
targets = last.target_fields
cur_names_with_path[1].extend(pathinfos)
names_with_path.append(cur_names_with_path)
else:
# Local non-relational field.
final_field = field
targets = (field,)
if fail_on_missing and pos + 1 != len(names):
raise FieldError(
"Cannot resolve keyword %r into field. Join on '%s'"
" not permitted." % (names[pos + 1], name)
)
break
return path, final_field, targets, names[pos + 1 :]
def setup_joins(
self,
names,
opts,
alias,
can_reuse=None,
allow_many=True,
reuse_with_filtered_relation=False,
):
"""
Compute the necessary table joins for the passage through the fields
given in 'names'. 'opts' is the Options class for the current model
(which gives the table we are starting from), 'alias' is the alias for
the table to start the joining from.
The 'can_reuse' defines the reverse foreign key joins we can reuse. It
can be None in which case all joins are reusable or a set of aliases
that can be reused. Note that non-reverse foreign keys are always
reusable when using setup_joins().
The 'reuse_with_filtered_relation' can be used to force 'can_reuse'
parameter and force the relation on the given connections.
If 'allow_many' is False, then any reverse foreign key seen will
generate a MultiJoin exception.
Return the final field involved in the joins, the target field (used
for any 'where' constraint), the final 'opts' value, the joins, the
field path traveled to generate the joins, and a transform function
that takes a field and alias and is equivalent to `field.get_col(alias)`
in the simple case but wraps field transforms if they were included in
names.
The target field is the field containing the concrete value. Final
field can be something different, for example foreign key pointing to
that value. Final field is needed for example in some value
conversions (convert 'obj' in fk__id=obj to pk val using the foreign
key field for example).
"""
joins = [alias]
# The transform can't be applied yet, as joins must be trimmed later.
# To avoid making every caller of this method look up transforms
# directly, compute transforms here and create a partial that converts
# fields to the appropriate wrapped version.
def final_transformer(field, alias):
if not self.alias_cols:
alias = None
return field.get_col(alias)
# Try resolving all the names as fields first. If there's an error,
# treat trailing names as lookups until a field can be resolved.
last_field_exception = None
for pivot in range(len(names), 0, -1):
try:
path, final_field, targets, rest = self.names_to_path(
names[:pivot],
opts,
allow_many,
fail_on_missing=True,
)
except FieldError as exc:
if pivot == 1:
# The first item cannot be a lookup, so it's safe
# to raise the field error here.
raise
else:
last_field_exception = exc
else:
# The transforms are the remaining items that couldn't be
# resolved into fields.
transforms = names[pivot:]
break
for name in transforms:
def transform(field, alias, *, name, previous):
try:
wrapped = previous(field, alias)
return self.try_transform(wrapped, name)
except FieldError:
# FieldError is raised if the transform doesn't exist.
if isinstance(final_field, Field) and last_field_exception:
raise last_field_exception
else:
raise
final_transformer = functools.partial(
transform, name=name, previous=final_transformer
)
final_transformer.has_transforms = True
# Then, add the path to the query's joins. Note that we can't trim
# joins at this stage - we will need the information about join type
# of the trimmed joins.
for join in path:
if join.filtered_relation:
filtered_relation = join.filtered_relation.clone()
table_alias = filtered_relation.alias
else:
filtered_relation = None
table_alias = None
opts = join.to_opts
if join.direct:
nullable = self.is_nullable(join.join_field)
else:
nullable = True
connection = self.join_class(
opts.db_table,
alias,
table_alias,
INNER,
join.join_field,
nullable,
filtered_relation=filtered_relation,
)
reuse = can_reuse if join.m2m or reuse_with_filtered_relation else None
alias = self.join(
connection,
reuse=reuse,
reuse_with_filtered_relation=reuse_with_filtered_relation,
)
joins.append(alias)
if filtered_relation:
filtered_relation.path = joins[:]
return JoinInfo(final_field, targets, opts, joins, path, final_transformer)
def trim_joins(self, targets, joins, path):
"""
The 'target' parameter is the final field being joined to, 'joins'
is the full list of join aliases. The 'path' contain the PathInfos
used to create the joins.
Return the final target field and table alias and the new active
joins.
Always trim any direct join if the target column is already in the
previous table. Can't trim reverse joins as it's unknown if there's
anything on the other side of the join.
"""
joins = joins[:]
for pos, info in enumerate(reversed(path)):
if len(joins) == 1 or not info.direct:
break
if info.filtered_relation:
break
join_targets = {t.column for t in info.join_field.foreign_related_fields}
cur_targets = {t.column for t in targets}
if not cur_targets.issubset(join_targets):
break
targets_dict = {
r[1].column: r[0]
for r in info.join_field.related_fields
if r[1].column in cur_targets
}
targets = tuple(targets_dict[t.column] for t in targets)
self.unref_alias(joins.pop())
return targets, joins[-1], joins
@classmethod
def _gen_cols(cls, exprs, include_external=False, resolve_refs=True):
for expr in exprs:
if isinstance(expr, Col):
yield expr
elif include_external and callable(
getattr(expr, "get_external_cols", None)
):
yield from expr.get_external_cols()
elif hasattr(expr, "get_source_expressions"):
if not resolve_refs and isinstance(expr, Ref):
continue
yield from cls._gen_cols(
expr.get_source_expressions(),
include_external=include_external,
resolve_refs=resolve_refs,
)
@classmethod
def _gen_col_aliases(cls, exprs):
yield from (expr.alias for expr in cls._gen_cols(exprs))
def resolve_ref(self, name, allow_joins=True, reuse=None, summarize=False):
annotation = self.annotations.get(name)
if annotation is not None:
if not allow_joins:
for alias in self._gen_col_aliases([annotation]):
if isinstance(self.alias_map[alias], Join):
raise FieldError(
"Joined field references are not permitted in this query"
)
if summarize:
# Summarize currently means we are doing an aggregate() query
# which is executed as a wrapped subquery if any of the
# aggregate() elements reference an existing annotation. In
# that case we need to return a Ref to the subquery's annotation.
if name not in self.annotation_select:
raise FieldError(
"Cannot aggregate over the '%s' alias. Use annotate() "
"to promote it." % name
)
return Ref(name, self.annotation_select[name])
else:
return annotation
else:
field_list = name.split(LOOKUP_SEP)
annotation = self.annotations.get(field_list[0])
if annotation is not None:
for transform in field_list[1:]:
annotation = self.try_transform(annotation, transform)
return annotation
join_info = self.setup_joins(
field_list, self.get_meta(), self.get_initial_alias(), can_reuse=reuse
)
targets, final_alias, join_list = self.trim_joins(
join_info.targets, join_info.joins, join_info.path
)
if not allow_joins and len(join_list) > 1:
raise FieldError(
"Joined field references are not permitted in this query"
)
if len(targets) > 1:
raise FieldError(
"Referencing multicolumn fields with F() objects isn't supported"
)
# Verify that the last lookup in name is a field or a transform:
# transform_function() raises FieldError if not.
transform = join_info.transform_function(targets[0], final_alias)
if reuse is not None:
reuse.update(join_list)
return transform
def split_exclude(self, filter_expr, can_reuse, names_with_path):
"""
When doing an exclude against any kind of N-to-many relation, we need
to use a subquery. This method constructs the nested query, given the
original exclude filter (filter_expr) and the portion up to the first
N-to-many relation field.
For example, if the origin filter is ~Q(child__name='foo'), filter_expr
is ('child__name', 'foo') and can_reuse is a set of joins usable for
filters in the original query.
We will turn this into equivalent of:
WHERE NOT EXISTS(
SELECT 1
FROM child
WHERE name = 'foo' AND child.parent_id = parent.id
LIMIT 1
)
"""
# Generate the inner query.
query = self.__class__(self.model)
query._filtered_relations = self._filtered_relations
filter_lhs, filter_rhs = filter_expr
if isinstance(filter_rhs, OuterRef):
filter_rhs = OuterRef(filter_rhs)
elif isinstance(filter_rhs, F):
filter_rhs = OuterRef(filter_rhs.name)
query.add_filter(filter_lhs, filter_rhs)
query.clear_ordering(force=True)
# Try to have as simple as possible subquery -> trim leading joins from
# the subquery.
trimmed_prefix, contains_louter = query.trim_start(names_with_path)
col = query.select[0]
select_field = col.target
alias = col.alias
if alias in can_reuse:
pk = select_field.model._meta.pk
# Need to add a restriction so that outer query's filters are in effect for
# the subquery, too.
query.bump_prefix(self)
lookup_class = select_field.get_lookup("exact")
# Note that the query.select[0].alias is different from alias
# due to bump_prefix above.
lookup = lookup_class(pk.get_col(query.select[0].alias), pk.get_col(alias))
query.where.add(lookup, AND)
query.external_aliases[alias] = True
lookup_class = select_field.get_lookup("exact")
lookup = lookup_class(col, ResolvedOuterRef(trimmed_prefix))
query.where.add(lookup, AND)
condition, needed_inner = self.build_filter(Exists(query))
if contains_louter:
or_null_condition, _ = self.build_filter(
("%s__isnull" % trimmed_prefix, True),
current_negated=True,
branch_negated=True,
can_reuse=can_reuse,
)
condition.add(or_null_condition, OR)
# Note that the end result will be:
# (outercol NOT IN innerq AND outercol IS NOT NULL) OR outercol IS NULL.
# This might look crazy but due to how IN works, this seems to be
# correct. If the IS NOT NULL check is removed then outercol NOT
# IN will return UNKNOWN. If the IS NULL check is removed, then if
# outercol IS NULL we will not match the row.
return condition, needed_inner
def set_empty(self):
self.where.add(NothingNode(), AND)
for query in self.combined_queries:
query.set_empty()
def is_empty(self):
return any(isinstance(c, NothingNode) for c in self.where.children)
def set_limits(self, low=None, high=None):
"""
Adjust the limits on the rows retrieved. Use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, convert them to the appropriate offset and limit values.
Apply any limits passed in here to the existing constraints. Add low
to the current low value and clamp both to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
self.high_mark = min(self.high_mark, self.low_mark + high)
else:
self.high_mark = self.low_mark + high
if low is not None:
if self.high_mark is not None:
self.low_mark = min(self.high_mark, self.low_mark + low)
else:
self.low_mark = self.low_mark + low
if self.low_mark == self.high_mark:
self.set_empty()
def clear_limits(self):
"""Clear any existing limits."""
self.low_mark, self.high_mark = 0, None
@property
def is_sliced(self):
return self.low_mark != 0 or self.high_mark is not None
def has_limit_one(self):
return self.high_mark is not None and (self.high_mark - self.low_mark) == 1
def can_filter(self):
"""
Return True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.is_sliced
def clear_select_clause(self):
"""Remove all fields from SELECT clause."""
self.select = ()
self.default_cols = False
self.select_related = False
self.set_extra_mask(())
self.set_annotation_mask(())
def clear_select_fields(self):
"""
Clear the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
self.select = ()
self.values_select = ()
def add_select_col(self, col, name):
self.select += (col,)
self.values_select += (name,)
def set_select(self, cols):
self.default_cols = False
self.select = tuple(cols)
def add_distinct_fields(self, *field_names):
"""
Add and resolve the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Add the given (model) fields to the select set. Add the field names in
the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
try:
cols = []
for name in field_names:
# Join promotion note - we must not remove any rows here, so
# if there is no existing joins, use outer join.
join_info = self.setup_joins(
name.split(LOOKUP_SEP), opts, alias, allow_many=allow_m2m
)
targets, final_alias, joins = self.trim_joins(
join_info.targets,
join_info.joins,
join_info.path,
)
for target in targets:
cols.append(join_info.transform_function(target, final_alias))
if cols:
self.set_select(cols)
except MultiJoin:
raise FieldError("Invalid field name: '%s'" % name)
except FieldError:
if LOOKUP_SEP in name:
# For lookups spanning over relationships, show the error
# from the model on which the lookup failed.
raise
elif name in self.annotations:
raise FieldError(
"Cannot select the '%s' alias. Use annotate() to promote "
"it." % name
)
else:
names = sorted(
[
*get_field_names_from_opts(opts),
*self.extra,
*self.annotation_select,
*self._filtered_relations,
]
)
raise FieldError(
"Cannot resolve keyword %r into field. "
"Choices are: %s" % (name, ", ".join(names))
)
def add_ordering(self, *ordering):
"""
Add items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or OrderBy
expressions.
If 'ordering' is empty, clear all ordering from the query.
"""
errors = []
for item in ordering:
if isinstance(item, str):
if item == "?":
continue
if item.startswith("-"):
item = item[1:]
if item in self.annotations:
continue
if self.extra and item in self.extra:
continue
# names_to_path() validates the lookup. A descriptive
# FieldError will be raise if it's not.
self.names_to_path(item.split(LOOKUP_SEP), self.model._meta)
elif not hasattr(item, "resolve_expression"):
errors.append(item)
if getattr(item, "contains_aggregate", False):
raise FieldError(
"Using an aggregate in order_by() without also including "
"it in annotate() is not allowed: %s" % item
)
if errors:
raise FieldError("Invalid order_by arguments: %s" % errors)
if ordering:
self.order_by += ordering
else:
self.default_ordering = False
def clear_ordering(self, force=False, clear_default=True):
"""
Remove any ordering settings if the current query allows it without
side effects, set 'force' to True to clear the ordering regardless.
If 'clear_default' is True, there will be no ordering in the resulting
query (not even the model's default).
"""
if not force and (
self.is_sliced or self.distinct_fields or self.select_for_update
):
return
self.order_by = ()
self.extra_order_by = ()
if clear_default:
self.default_ordering = False
def set_group_by(self, allow_aliases=True):
"""
Expand the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
if allow_aliases:
# Column names from JOINs to check collisions with aliases.
column_names = set()
seen_models = set()
for join in list(self.alias_map.values())[1:]: # Skip base table.
model = join.join_field.related_model
if model not in seen_models:
column_names.update(
{field.column for field in model._meta.local_concrete_fields}
)
seen_models.add(model)
if self.values_select:
# If grouping by aliases is allowed assign selected values
# aliases by moving them to annotations.
group_by_annotations = {}
values_select = {}
for alias, expr in zip(self.values_select, self.select):
if isinstance(expr, Col):
values_select[alias] = expr
else:
group_by_annotations[alias] = expr
self.annotations = {**group_by_annotations, **self.annotations}
self.append_annotation_mask(group_by_annotations)
self.select = tuple(values_select.values())
self.values_select = tuple(values_select)
group_by = list(self.select)
for alias, annotation in self.annotation_select.items():
if not (group_by_cols := annotation.get_group_by_cols()):
continue
if (
allow_aliases
and alias not in column_names
and not annotation.contains_aggregate
):
group_by.append(Ref(alias, annotation))
else:
group_by.extend(group_by_cols)
self.group_by = tuple(group_by)
def add_select_related(self, fields):
"""
Set up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
if isinstance(self.select_related, bool):
field_dict = {}
else:
field_dict = self.select_related
for field in fields:
d = field_dict
for part in field.split(LOOKUP_SEP):
d = d.setdefault(part, {})
self.select_related = field_dict
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Add data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
# We need to pair any placeholder markers in the 'select'
# dictionary with their parameters in 'select_params' so that
# subsequent updates to the select dictionary also adjust the
# parameters appropriately.
select_pairs = {}
if select_params:
param_iter = iter(select_params)
else:
param_iter = iter([])
for name, entry in select.items():
self.check_alias(name)
entry = str(entry)
entry_params = []
pos = entry.find("%s")
while pos != -1:
if pos == 0 or entry[pos - 1] != "%":
entry_params.append(next(param_iter))
pos = entry.find("%s", pos + 2)
select_pairs[name] = (entry, entry_params)
self.extra.update(select_pairs)
if where or params:
self.where.add(ExtraWhere(where, params), AND)
if tables:
self.extra_tables += tuple(tables)
if order_by:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""Remove any fields from the deferred loading set."""
self.deferred_loading = (frozenset(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. Add the new field names to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
# Fields on related models are stored in the literal double-underscore
# format, so that we can use a set datastructure. We do the foo__bar
# splitting and handling when computing the SQL column names (as part of
# get_columns()).
existing, defer = self.deferred_loading
if defer:
# Add to existing deferred names.
self.deferred_loading = existing.union(field_names), True
else:
# Remove names from the set of any existing "immediate load" names.
if new_existing := existing.difference(field_names):
self.deferred_loading = new_existing, False
else:
self.clear_deferred_loading()
if new_only := set(field_names).difference(existing):
self.deferred_loading = new_only, True
def add_immediate_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, remove
those names from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
existing, defer = self.deferred_loading
field_names = set(field_names)
if "pk" in field_names:
field_names.remove("pk")
field_names.add(self.get_meta().pk.name)
if defer:
# Remove any existing deferred names from the current set before
# setting the new names.
self.deferred_loading = field_names.difference(existing), False
else:
# Replace any existing "immediate load" field names.
self.deferred_loading = frozenset(field_names), False
def set_annotation_mask(self, names):
"""Set the mask of annotations that will be returned by the SELECT."""
if names is None:
self.annotation_select_mask = None
else:
self.annotation_select_mask = set(names)
self._annotation_select_cache = None
def append_annotation_mask(self, names):
if self.annotation_select_mask is not None:
self.set_annotation_mask(self.annotation_select_mask.union(names))
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT.
Don't remove them from the Query since they might be used later.
"""
if names is None:
self.extra_select_mask = None
else:
self.extra_select_mask = set(names)
self._extra_select_cache = None
def set_values(self, fields):
self.select_related = False
self.clear_deferred_loading()
self.clear_select_fields()
self.has_select_fields = True
if fields:
field_names = []
extra_names = []
annotation_names = []
if not self.extra and not self.annotations:
# Shortcut - if there are no extra or annotations, then
# the values() clause must be just field names.
field_names = list(fields)
else:
self.default_cols = False
for f in fields:
if f in self.extra_select:
extra_names.append(f)
elif f in self.annotation_select:
annotation_names.append(f)
else:
field_names.append(f)
self.set_extra_mask(extra_names)
self.set_annotation_mask(annotation_names)
selected = frozenset(field_names + extra_names + annotation_names)
else:
field_names = [f.attname for f in self.model._meta.concrete_fields]
selected = frozenset(field_names)
# Selected annotations must be known before setting the GROUP BY
# clause.
if self.group_by is True:
self.add_fields(
(f.attname for f in self.model._meta.concrete_fields), False
)
# Disable GROUP BY aliases to avoid orphaning references to the
# SELECT clause which is about to be cleared.
self.set_group_by(allow_aliases=False)
self.clear_select_fields()
elif self.group_by:
# Resolve GROUP BY annotation references if they are not part of
# the selected fields anymore.
group_by = []
for expr in self.group_by:
if isinstance(expr, Ref) and expr.refs not in selected:
expr = self.annotations[expr.refs]
group_by.append(expr)
self.group_by = tuple(group_by)
self.values_select = tuple(field_names)
self.add_fields(field_names, True)
@property
def annotation_select(self):
"""
Return the dictionary of aggregate columns that are not masked and
should be used in the SELECT clause. Cache this result for performance.
"""
if self._annotation_select_cache is not None:
return self._annotation_select_cache
elif not self.annotations:
return {}
elif self.annotation_select_mask is not None:
self._annotation_select_cache = {
k: v
for k, v in self.annotations.items()
if k in self.annotation_select_mask
}
return self._annotation_select_cache
else:
return self.annotations
@property
def extra_select(self):
if self._extra_select_cache is not None:
return self._extra_select_cache
if not self.extra:
return {}
elif self.extra_select_mask is not None:
self._extra_select_cache = {
k: v for k, v in self.extra.items() if k in self.extra_select_mask
}
return self._extra_select_cache
else:
return self.extra
def trim_start(self, names_with_path):
"""
Trim joins from the start of the join path. The candidates for trim
are the PathInfos in names_with_path structure that are m2m joins.
Also set the select column so the start matches the join.
This method is meant to be used for generating the subquery joins &
cols in split_exclude().
Return a lookup usable for doing outerq.filter(lookup=self) and a
boolean indicating if the joins in the prefix contain a LEFT OUTER join.
_"""
all_paths = []
for _, paths in names_with_path:
all_paths.extend(paths)
contains_louter = False
# Trim and operate only on tables that were generated for
# the lookup part of the query. That is, avoid trimming
# joins generated for F() expressions.
lookup_tables = [
t for t in self.alias_map if t in self._lookup_joins or t == self.base_table
]
for trimmed_paths, path in enumerate(all_paths):
if path.m2m:
break
if self.alias_map[lookup_tables[trimmed_paths + 1]].join_type == LOUTER:
contains_louter = True
alias = lookup_tables[trimmed_paths]
self.unref_alias(alias)
# The path.join_field is a Rel, lets get the other side's field
join_field = path.join_field.field
# Build the filter prefix.
paths_in_prefix = trimmed_paths
trimmed_prefix = []
for name, path in names_with_path:
if paths_in_prefix - len(path) < 0:
break
trimmed_prefix.append(name)
paths_in_prefix -= len(path)
trimmed_prefix.append(join_field.foreign_related_fields[0].name)
trimmed_prefix = LOOKUP_SEP.join(trimmed_prefix)
# Lets still see if we can trim the first join from the inner query
# (that is, self). We can't do this for:
# - LEFT JOINs because we would miss those rows that have nothing on
# the outer side,
# - INNER JOINs from filtered relations because we would miss their
# filters.
first_join = self.alias_map[lookup_tables[trimmed_paths + 1]]
if first_join.join_type != LOUTER and not first_join.filtered_relation:
select_fields = [r[0] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths + 1]
self.unref_alias(lookup_tables[trimmed_paths])
extra_restriction = join_field.get_extra_restriction(
None, lookup_tables[trimmed_paths + 1]
)
if extra_restriction:
self.where.add(extra_restriction, AND)
else:
# TODO: It might be possible to trim more joins from the start of the
# inner query if it happens to have a longer join chain containing the
# values in select_fields. Lets punt this one for now.
select_fields = [r[1] for r in join_field.related_fields]
select_alias = lookup_tables[trimmed_paths]
# The found starting point is likely a join_class instead of a
# base_table_class reference. But the first entry in the query's FROM
# clause must not be a JOIN.
for table in self.alias_map:
if self.alias_refcount[table] > 0:
self.alias_map[table] = self.base_table_class(
self.alias_map[table].table_name,
table,
)
break
self.set_select([f.get_col(select_alias) for f in select_fields])
return trimmed_prefix, contains_louter
def is_nullable(self, field):
"""
Check if the given field should be treated as nullable.
Some backends treat '' as null and Django treats such fields as
nullable for those backends. In such situations field.null can be
False even if we should treat the field as nullable.
"""
# We need to use DEFAULT_DB_ALIAS here, as QuerySet does not have
# (nor should it have) knowledge of which connection is going to be
# used. The proper fix would be to defer all decisions where
# is_nullable() is needed to the compiler stage, but that is not easy
# to do currently.
return field.null or (
field.empty_strings_allowed
and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls
)
def get_order_dir(field, default="ASC"):
"""
Return the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
prefix) should sort. The '-' prefix always sorts the opposite way.
"""
dirn = ORDER_DIR[default]
if field[0] == "-":
return field[1:], dirn[1]
return field, dirn[0]
class JoinPromoter:
"""
A class to abstract away join promotion problems for complex filter
conditions.
"""
def __init__(self, connector, num_children, negated):
self.connector = connector
self.negated = negated
if self.negated:
if connector == AND:
self.effective_connector = OR
else:
self.effective_connector = AND
else:
self.effective_connector = self.connector
self.num_children = num_children
# Maps of table alias to how many times it is seen as required for
# inner and/or outer joins.
self.votes = Counter()
def __repr__(self):
return (
f"{self.__class__.__qualname__}(connector={self.connector!r}, "
f"num_children={self.num_children!r}, negated={self.negated!r})"
)
def add_votes(self, votes):
"""
Add single vote per item to self.votes. Parameter can be any
iterable.
"""
self.votes.update(votes)
def update_join_types(self, query):
"""
Change join types so that the generated query is as efficient as
possible, but still correct. So, change as many joins as possible
to INNER, but don't make OUTER joins INNER if that could remove
results from the query.
"""
to_promote = set()
to_demote = set()
# The effective_connector is used so that NOT (a AND b) is treated
# similarly to (a OR b) for join promotion.
for table, votes in self.votes.items():
# We must use outer joins in OR case when the join isn't contained
# in all of the joins. Otherwise the INNER JOIN itself could remove
# valid results. Consider the case where a model with rel_a and
# rel_b relations is queried with rel_a__col=1 | rel_b__col=2. Now,
# if rel_a join doesn't produce any results is null (for example
# reverse foreign key or null value in direct foreign key), and
# there is a matching row in rel_b with col=2, then an INNER join
# to rel_a would remove a valid match from the query. So, we need
# to promote any existing INNER to LOUTER (it is possible this
# promotion in turn will be demoted later on).
if self.effective_connector == OR and votes < self.num_children:
to_promote.add(table)
# If connector is AND and there is a filter that can match only
# when there is a joinable row, then use INNER. For example, in
# rel_a__col=1 & rel_b__col=2, if either of the rels produce NULL
# as join output, then the col=1 or col=2 can't match (as
# NULL=anything is always false).
# For the OR case, if all children voted for a join to be inner,
# then we can use INNER for the join. For example:
# (rel_a__col__icontains=Alex | rel_a__col__icontains=Russell)
# then if rel_a doesn't produce any rows, the whole condition
# can't match. Hence we can safely use INNER join.
if self.effective_connector == AND or (
self.effective_connector == OR and votes == self.num_children
):
to_demote.add(table)
# Finally, what happens in cases where we have:
# (rel_a__col=1|rel_b__col=2) & rel_a__col__gte=0
# Now, we first generate the OR clause, and promote joins for it
# in the first if branch above. Both rel_a and rel_b are promoted
# to LOUTER joins. After that we do the AND case. The OR case
# voted no inner joins but the rel_a__col__gte=0 votes inner join
# for rel_a. We demote it back to INNER join (in AND case a single
# vote is enough). The demotion is OK, if rel_a doesn't produce
# rows, then the rel_a__col__gte=0 clause can't be true, and thus
# the whole clause must be false. So, it is safe to use INNER
# join.
# Note that in this example we could just as well have the __gte
# clause and the OR clause swapped. Or we could replace the __gte
# clause with an OR clause containing rel_a__col=1|rel_a__col=2,
# and again we could safely demote to INNER.
query.promote_joins(to_promote)
query.demote_joins(to_demote)
return to_demote
|
5336ebbb303786f5489bc61286a558ca573ea7eaf423e27e50ac8d6195279100 | import collections
import json
import re
from functools import partial
from itertools import chain
from django.core.exceptions import EmptyResultSet, FieldError, FullResultSet
from django.db import DatabaseError, NotSupportedError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import F, OrderBy, RawSQL, Ref, Value
from django.db.models.functions import Cast, Random
from django.db.models.lookups import Lookup
from django.db.models.query_utils import select_related_descend
from django.db.models.sql.constants import (
CURSOR,
GET_ITERATOR_CHUNK_SIZE,
MULTI,
NO_RESULTS,
ORDER_DIR,
SINGLE,
)
from django.db.models.sql.query import Query, get_order_dir
from django.db.models.sql.where import AND
from django.db.transaction import TransactionManagementError
from django.utils.functional import cached_property
from django.utils.hashable import make_hashable
from django.utils.regex_helper import _lazy_re_compile
class SQLCompiler:
# Multiline ordering SQL clause may appear from RawSQL.
ordering_parts = _lazy_re_compile(
r"^(.*)\s(?:ASC|DESC).*",
re.MULTILINE | re.DOTALL,
)
def __init__(self, query, connection, using, elide_empty=True):
self.query = query
self.connection = connection
self.using = using
# Some queries, e.g. coalesced aggregation, need to be executed even if
# they would return an empty result set.
self.elide_empty = elide_empty
self.quote_cache = {"*": "*"}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
self._meta_ordering = None
def __repr__(self):
return (
f"<{self.__class__.__qualname__} "
f"model={self.query.model.__qualname__} "
f"connection={self.connection!r} using={self.using!r}>"
)
def setup_query(self, with_col_aliases=False):
if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select(
with_col_aliases=with_col_aliases,
)
self.col_count = len(self.select)
def pre_sql_setup(self, with_col_aliases=False):
"""
Do any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query(with_col_aliases=with_col_aliases)
order_by = self.get_order_by()
self.where, self.having, self.qualify = self.query.where.split_having_qualify(
must_group_by=self.query.group_by is not None
)
extra_select = self.get_extra_select(order_by, self.select)
self.has_extra_select = bool(extra_select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Return a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk')
# .annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
allows_group_by_refs = self.connection.features.allows_group_by_refs
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, "as_sql"):
expr = self.query.resolve_ref(expr)
if not allows_group_by_refs and isinstance(expr, Ref):
expr = expr.source
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
ref_sources = {expr.source for expr in expressions if isinstance(expr, Ref)}
aliased_exprs = {}
for expr, _, alias in select:
# Skip members of the select clause that are already included
# by reference.
if expr in ref_sources:
continue
if alias:
aliased_exprs[expr] = alias
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
if not self._meta_ordering:
for expr, (sql, params, is_ref) in order_by:
# Skip references to the SELECT clause, as all expressions in
# the SELECT clause are already part of the GROUP BY.
if not is_ref:
expressions.extend(expr.get_group_by_cols())
having_group_by = self.having.get_group_by_cols() if self.having else ()
for expr in having_group_by:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having_group_by)
for expr in expressions:
if allows_group_by_refs and (alias := aliased_exprs.get(expr)):
expr = Ref(alias, expr)
try:
sql, params = self.compile(expr)
except (EmptyResultSet, FullResultSet):
continue
sql, params = expr.select_format(self, sql, params)
params_hash = make_hashable(params)
if (sql, params_hash) not in seen:
result.append((sql, params))
seen.add((sql, params_hash))
return result
def collapse_group_by(self, expressions, having):
# If the database supports group by functional dependence reduction,
# then the expressions can be reduced to the set of selected table
# primary keys as all other columns are functionally dependent on them.
if self.connection.features.allows_group_by_selected_pks:
# Filter out all expressions associated with a table's primary key
# present in the grouped columns. This is done by identifying all
# tables that have their primary key included in the grouped
# columns and removing non-primary key columns referring to them.
# Unmanaged models are excluded because they could be representing
# database views on which the optimization might not be allowed.
pks = {
expr
for expr in expressions
if (
hasattr(expr, "target")
and expr.target.primary_key
and self.connection.features.allows_group_by_selected_pks_on_model(
expr.target.model
)
)
}
aliases = {expr.alias for expr in pks}
expressions = [
expr
for expr in expressions
if expr in pks
or expr in having
or getattr(expr, "alias", None) not in aliases
]
return expressions
def get_select(self, with_col_aliases=False):
"""
Return three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- The base model of the query.
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
select_mask = self.query.get_select_mask()
if self.query.default_cols:
cols = self.get_default_columns(select_mask)
else:
# self.query.select is a special case. These columns never go to
# any model.
cols = self.query.select
if cols:
select_list = []
for col in cols:
select_list.append(select_idx)
select.append((col, None))
select_idx += 1
klass_info = {
"model": self.query.model,
"select_fields": select_list,
}
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select, select_mask)
klass_info["related_klass_infos"] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info["related_klass_infos"]:
if ki["from_parent"]:
ki["select_fields"] = (
klass_info["select_fields"] + ki["select_fields"]
)
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
col_idx = 1
for col, alias in select:
try:
sql, params = self.compile(col)
except EmptyResultSet:
empty_result_set_value = getattr(
col, "empty_result_set_value", NotImplemented
)
if empty_result_set_value is NotImplemented:
# Select a predicate that's always False.
sql, params = "0", ()
else:
sql, params = self.compile(Value(empty_result_set_value))
except FullResultSet:
sql, params = self.compile(Value(True))
else:
sql, params = col.select_format(self, sql, params)
if alias is None and with_col_aliases:
alias = f"col{col_idx}"
col_idx += 1
ret.append((col, (sql, params), alias))
return ret, klass_info, annotations
def _order_by_pairs(self):
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
elif self.query.order_by:
ordering = self.query.order_by
elif (meta := self.query.get_meta()) and meta.ordering:
ordering = meta.ordering
self._meta_ordering = ordering
else:
ordering = []
if self.query.standard_ordering:
default_order, _ = ORDER_DIR["ASC"]
else:
default_order, _ = ORDER_DIR["DESC"]
for field in ordering:
if hasattr(field, "resolve_expression"):
if isinstance(field, Value):
# output_field must be resolved for constants.
field = Cast(field, field.output_field)
if not isinstance(field, OrderBy):
field = field.asc()
if not self.query.standard_ordering:
field = field.copy()
field.reverse_ordering()
if isinstance(field.expression, F) and (
annotation := self.query.annotation_select.get(
field.expression.name
)
):
field.expression = Ref(field.expression.name, annotation)
yield field, isinstance(field.expression, Ref)
continue
if field == "?": # random
yield OrderBy(Random()), False
continue
col, order = get_order_dir(field, default_order)
descending = order == "DESC"
if col in self.query.annotation_select:
# Reference to expression in SELECT clause
yield (
OrderBy(
Ref(col, self.query.annotation_select[col]),
descending=descending,
),
True,
)
continue
if col in self.query.annotations:
# References to an expression which is masked out of the SELECT
# clause.
if self.query.combinator and self.select:
# Don't use the resolved annotation because other
# combinated queries might define it differently.
expr = F(col)
else:
expr = self.query.annotations[col]
if isinstance(expr, Value):
# output_field must be resolved for constants.
expr = Cast(expr, expr.output_field)
yield OrderBy(expr, descending=descending), False
continue
if "." in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split(".", 1)
yield (
OrderBy(
RawSQL(
"%s.%s" % (self.quote_name_unless_alias(table), col), []
),
descending=descending,
),
False,
)
continue
if self.query.extra and col in self.query.extra:
if col in self.query.extra_select:
yield (
OrderBy(
Ref(col, RawSQL(*self.query.extra[col])),
descending=descending,
),
True,
)
else:
yield (
OrderBy(RawSQL(*self.query.extra[col]), descending=descending),
False,
)
else:
if self.query.combinator and self.select:
# Don't use the first model's field because other
# combinated queries might define it differently.
yield OrderBy(F(col), descending=descending), False
else:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
yield from self.find_ordering_name(
field,
self.query.get_meta(),
default_order=default_order,
)
def get_order_by(self):
"""
Return a list of 2-tuples of the form (expr, (sql, params, is_ref)) for
the ORDER BY clause.
The order_by clause can alter the select clause (for example it can add
aliases to clauses that do not yet have one, or it can add totally new
select clauses).
"""
result = []
seen = set()
for expr, is_ref in self._order_by_pairs():
resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None)
if not is_ref and self.query.combinator and self.select:
src = resolved.expression
expr_src = expr.expression
for sel_expr, _, col_alias in self.select:
if src == sel_expr:
# When values() is used the exact alias must be used to
# reference annotations.
if (
self.query.has_select_fields
and col_alias in self.query.annotation_select
and not (
isinstance(expr_src, F) and col_alias == expr_src.name
)
):
continue
resolved.set_source_expressions(
[Ref(col_alias if col_alias else src.target.column, src)]
)
break
else:
# Add column used in ORDER BY clause to the selected
# columns and to each combined query.
order_by_idx = len(self.query.select) + 1
col_alias = f"__orderbycol{order_by_idx}"
for q in self.query.combined_queries:
# If fields were explicitly selected through values()
# combined queries cannot be augmented.
if q.has_select_fields:
raise DatabaseError(
"ORDER BY term does not match any column in "
"the result set."
)
q.add_annotation(expr_src, col_alias)
self.query.add_select_col(resolved, col_alias)
resolved.set_source_expressions([Ref(col_alias, src)])
sql, params = self.compile(resolved)
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql)[1]
params_hash = make_hashable(params)
if (without_ordering, params_hash) in seen:
continue
seen.add((without_ordering, params_hash))
result.append((resolved, (sql, params, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
if self.query.distinct and not self.query.distinct_fields:
select_sql = [t[1] for t in select]
for expr, (sql, params, is_ref) in order_by:
without_ordering = self.ordering_parts.search(sql)[1]
if not is_ref and (without_ordering, params) not in select_sql:
extra_select.append((expr, (without_ordering, params), None))
return extra_select
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if (
(name in self.query.alias_map and name not in self.query.table_map)
or name in self.query.extra_select
or (
self.query.external_aliases.get(name)
and name not in self.query.table_map
)
):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node):
vendor_impl = getattr(node, "as_" + self.connection.vendor, None)
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
else:
sql, params = node.as_sql(self, self.connection)
return sql, params
def get_combinator_sql(self, combinator, all):
features = self.connection.features
compilers = [
query.get_compiler(self.using, self.connection, self.elide_empty)
for query in self.query.combined_queries
]
if not features.supports_slicing_ordering_in_compound:
for compiler in compilers:
if compiler.query.is_sliced:
raise DatabaseError(
"LIMIT/OFFSET not allowed in subqueries of compound statements."
)
if compiler.get_order_by():
raise DatabaseError(
"ORDER BY not allowed in subqueries of compound statements."
)
elif self.query.is_sliced and combinator == "union":
limit = (self.query.low_mark, self.query.high_mark)
for compiler in compilers:
# A sliced union cannot have its parts elided as some of them
# might be sliced as well and in the event where only a single
# part produces a non-empty resultset it might be impossible to
# generate valid SQL.
compiler.elide_empty = False
if not compiler.query.is_sliced:
compiler.query.set_limits(*limit)
parts = ()
for compiler in compilers:
try:
# If the columns list is limited, then all combined queries
# must have the same columns list. Set the selects defined on
# the query on all combined queries, if not already set.
if not compiler.query.values_select and self.query.values_select:
compiler.query = compiler.query.clone()
compiler.query.set_values(
(
*self.query.extra_select,
*self.query.values_select,
*self.query.annotation_select,
)
)
part_sql, part_args = compiler.as_sql(with_col_aliases=True)
if compiler.query.combinator:
# Wrap in a subquery if wrapping in parentheses isn't
# supported.
if not features.supports_parentheses_in_compound:
part_sql = "SELECT * FROM ({})".format(part_sql)
# Add parentheses when combining with compound query if not
# already added for all compound queries.
elif (
self.query.subquery
or not features.supports_slicing_ordering_in_compound
):
part_sql = "({})".format(part_sql)
elif (
self.query.subquery
and features.supports_slicing_ordering_in_compound
):
part_sql = "({})".format(part_sql)
parts += ((part_sql, part_args),)
except EmptyResultSet:
# Omit the empty queryset with UNION and with DIFFERENCE if the
# first queryset is nonempty.
if combinator == "union" or (combinator == "difference" and parts):
continue
raise
if not parts:
raise EmptyResultSet
combinator_sql = self.connection.ops.set_operators[combinator]
if all and combinator == "union":
combinator_sql += " ALL"
braces = "{}"
if not self.query.subquery and features.supports_slicing_ordering_in_compound:
braces = "({})"
sql_parts, args_parts = zip(
*((braces.format(sql), args) for sql, args in parts)
)
result = [" {} ".format(combinator_sql).join(sql_parts)]
params = []
for part in args_parts:
params.extend(part)
return result, params
def get_qualify_sql(self):
where_parts = []
if self.where:
where_parts.append(self.where)
if self.having:
where_parts.append(self.having)
inner_query = self.query.clone()
inner_query.subquery = True
inner_query.where = inner_query.where.__class__(where_parts)
# Augment the inner query with any window function references that
# might have been masked via values() and alias(). If any masked
# aliases are added they'll be masked again to avoid fetching
# the data in the `if qual_aliases` branch below.
select = {
expr: alias for expr, _, alias in self.get_select(with_col_aliases=True)[0]
}
select_aliases = set(select.values())
qual_aliases = set()
replacements = {}
def collect_replacements(expressions):
while expressions:
expr = expressions.pop()
if expr in replacements:
continue
elif select_alias := select.get(expr):
replacements[expr] = select_alias
elif isinstance(expr, Lookup):
expressions.extend(expr.get_source_expressions())
elif isinstance(expr, Ref):
if expr.refs not in select_aliases:
expressions.extend(expr.get_source_expressions())
else:
num_qual_alias = len(qual_aliases)
select_alias = f"qual{num_qual_alias}"
qual_aliases.add(select_alias)
inner_query.add_annotation(expr, select_alias)
replacements[expr] = select_alias
collect_replacements(list(self.qualify.leaves()))
self.qualify = self.qualify.replace_expressions(
{expr: Ref(alias, expr) for expr, alias in replacements.items()}
)
order_by = []
for order_by_expr, *_ in self.get_order_by():
collect_replacements(order_by_expr.get_source_expressions())
order_by.append(
order_by_expr.replace_expressions(
{expr: Ref(alias, expr) for expr, alias in replacements.items()}
)
)
inner_query_compiler = inner_query.get_compiler(
self.using, elide_empty=self.elide_empty
)
inner_sql, inner_params = inner_query_compiler.as_sql(
# The limits must be applied to the outer query to avoid pruning
# results too eagerly.
with_limits=False,
# Force unique aliasing of selected columns to avoid collisions
# and make rhs predicates referencing easier.
with_col_aliases=True,
)
qualify_sql, qualify_params = self.compile(self.qualify)
result = [
"SELECT * FROM (",
inner_sql,
")",
self.connection.ops.quote_name("qualify"),
"WHERE",
qualify_sql,
]
if qual_aliases:
# If some select aliases were unmasked for filtering purposes they
# must be masked back.
cols = [self.connection.ops.quote_name(alias) for alias in select.values()]
result = [
"SELECT",
", ".join(cols),
"FROM (",
*result,
")",
self.connection.ops.quote_name("qualify_mask"),
]
params = list(inner_params) + qualify_params
# As the SQL spec is unclear on whether or not derived tables
# ordering must propagate it has to be explicitly repeated on the
# outer-most query to ensure it's preserved.
if order_by:
ordering_sqls = []
for ordering in order_by:
ordering_sql, ordering_params = self.compile(ordering)
ordering_sqls.append(ordering_sql)
params.extend(ordering_params)
result.extend(["ORDER BY", ", ".join(ordering_sqls)])
return result, params
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
refcounts_before = self.query.alias_refcount.copy()
try:
combinator = self.query.combinator
extra_select, order_by, group_by = self.pre_sql_setup(
with_col_aliases=with_col_aliases or bool(combinator),
)
for_update_part = None
# Is a LIMIT/OFFSET clause needed?
with_limit_offset = with_limits and self.query.is_sliced
combinator = self.query.combinator
features = self.connection.features
if combinator:
if not getattr(features, "supports_select_{}".format(combinator)):
raise NotSupportedError(
"{} is not supported on this database backend.".format(
combinator
)
)
result, params = self.get_combinator_sql(
combinator, self.query.combinator_all
)
elif self.qualify:
result, params = self.get_qualify_sql()
order_by = None
else:
distinct_fields, distinct_params = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct'
# (see docstring of get_from_clause() for details).
from_, f_params = self.get_from_clause()
try:
where, w_params = (
self.compile(self.where) if self.where is not None else ("", [])
)
except EmptyResultSet:
if self.elide_empty:
raise
# Use a predicate that's always False.
where, w_params = "0 = 1", []
except FullResultSet:
where, w_params = "", []
try:
having, h_params = (
self.compile(self.having)
if self.having is not None
else ("", [])
)
except FullResultSet:
having, h_params = "", []
result = ["SELECT"]
params = []
if self.query.distinct:
distinct_result, distinct_params = self.connection.ops.distinct_sql(
distinct_fields,
distinct_params,
)
result += distinct_result
params += distinct_params
out_cols = []
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = "%s AS %s" % (
s_sql,
self.connection.ops.quote_name(alias),
)
params.extend(s_params)
out_cols.append(s_sql)
result += [", ".join(out_cols)]
if from_:
result += ["FROM", *from_]
elif self.connection.features.bare_select_suffix:
result += [self.connection.features.bare_select_suffix]
params.extend(f_params)
if self.query.select_for_update and features.has_select_for_update:
if (
self.connection.get_autocommit()
# Don't raise an exception when database doesn't
# support transactions, as it's a noop.
and features.supports_transactions
):
raise TransactionManagementError(
"select_for_update cannot be used outside of a transaction."
)
if (
with_limit_offset
and not features.supports_select_for_update_with_limit
):
raise NotSupportedError(
"LIMIT/OFFSET is not supported with "
"select_for_update on this database backend."
)
nowait = self.query.select_for_update_nowait
skip_locked = self.query.select_for_update_skip_locked
of = self.query.select_for_update_of
no_key = self.query.select_for_no_key_update
# If it's a NOWAIT/SKIP LOCKED/OF/NO KEY query but the
# backend doesn't support it, raise NotSupportedError to
# prevent a possible deadlock.
if nowait and not features.has_select_for_update_nowait:
raise NotSupportedError(
"NOWAIT is not supported on this database backend."
)
elif skip_locked and not features.has_select_for_update_skip_locked:
raise NotSupportedError(
"SKIP LOCKED is not supported on this database backend."
)
elif of and not features.has_select_for_update_of:
raise NotSupportedError(
"FOR UPDATE OF is not supported on this database backend."
)
elif no_key and not features.has_select_for_no_key_update:
raise NotSupportedError(
"FOR NO KEY UPDATE is not supported on this "
"database backend."
)
for_update_part = self.connection.ops.for_update_sql(
nowait=nowait,
skip_locked=skip_locked,
of=self.get_select_for_update_of_arguments(),
no_key=no_key,
)
if for_update_part and features.for_update_after_from:
result.append(for_update_part)
if where:
result.append("WHERE %s" % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) is not implemented."
)
order_by = order_by or self.connection.ops.force_no_ordering()
result.append("GROUP BY %s" % ", ".join(grouping))
if self._meta_ordering:
order_by = None
if having:
result.append("HAVING %s" % having)
params.extend(h_params)
if self.query.explain_info:
result.insert(
0,
self.connection.ops.explain_query_prefix(
self.query.explain_info.format,
**self.query.explain_info.options,
),
)
if order_by:
ordering = []
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
order_by_sql = "ORDER BY %s" % ", ".join(ordering)
if combinator and features.requires_compound_order_by_subquery:
result = ["SELECT * FROM (", *result, ")", order_by_sql]
else:
result.append(order_by_sql)
if with_limit_offset:
result.append(
self.connection.ops.limit_offset_sql(
self.query.low_mark, self.query.high_mark
)
)
if for_update_part and not features.for_update_after_from:
result.append(for_update_part)
if self.query.subquery and extra_select:
# If the query is used as a subquery, the extra selects would
# result in more columns than the left-hand side expression is
# expecting. This can happen when a subquery uses a combination
# of order_by() and distinct(), forcing the ordering expressions
# to be selected as well. Wrap the query in another subquery
# to exclude extraneous selects.
sub_selects = []
sub_params = []
for index, (select, _, alias) in enumerate(self.select, start=1):
if alias:
sub_selects.append(
"%s.%s"
% (
self.connection.ops.quote_name("subquery"),
self.connection.ops.quote_name(alias),
)
)
else:
select_clone = select.relabeled_clone(
{select.alias: "subquery"}
)
subselect, subparams = select_clone.as_sql(
self, self.connection
)
sub_selects.append(subselect)
sub_params.extend(subparams)
return "SELECT %s FROM (%s) subquery" % (
", ".join(sub_selects),
" ".join(result),
), tuple(sub_params + params)
return " ".join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def get_default_columns(
self, select_mask, start_alias=None, opts=None, from_parent=None
):
"""
Compute the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Return a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, return a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
if (opts := self.query.get_meta()) is None:
return result
start_alias = start_alias or self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if (
from_parent
and model is not None
and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model
)
):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if select_mask and field not in select_mask:
continue
alias = self.query.join_parent_model(opts, model, start_alias, seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Return a quoted list of fields to use in DISTINCT ON part of the query.
This method can alter the tables in the query, and thus it must be
called before get_from_clause().
"""
result = []
params = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _, transform_function = self._setup_joins(
parts, opts, None
)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
if name in self.query.annotation_select:
result.append(self.connection.ops.quote_name(name))
else:
r, p = self.compile(transform_function(target, alias))
result.append(r)
params.append(p)
return result, params
def find_ordering_name(
self, name, opts, alias=None, default_order="ASC", already_seen=None
):
"""
Return the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
descending = order == "DESC"
pieces = name.split(LOOKUP_SEP)
(
field,
targets,
alias,
joins,
path,
opts,
transform_function,
) = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless it is the pk
# shortcut or the attribute name of the field that is specified or
# there are transforms to process.
if (
field.is_relation
and opts.ordering
and getattr(field, "attname", None) != pieces[-1]
and name != "pk"
and not getattr(transform_function, "has_transforms", False)
):
# Firstly, avoid infinite loops.
already_seen = already_seen or set()
join_tuple = tuple(
getattr(self.query.alias_map[j], "join_cols", None) for j in joins
)
if join_tuple in already_seen:
raise FieldError("Infinite loop caused by ordering.")
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
if hasattr(item, "resolve_expression") and not isinstance(
item, OrderBy
):
item = item.desc() if descending else item.asc()
if isinstance(item, OrderBy):
results.append(
(item.prefix_references(f"{name}{LOOKUP_SEP}"), False)
)
continue
results.extend(
(expr.prefix_references(f"{name}{LOOKUP_SEP}"), is_ref)
for expr, is_ref in self.find_ordering_name(
item, opts, alias, order, already_seen
)
)
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [
(OrderBy(transform_function(t, alias), descending=descending), False)
for t in targets
]
def _setup_joins(self, pieces, opts, alias):
"""
Helper method for get_order_by() and get_distinct().
get_ordering() and get_distinct() must produce same target columns on
same input, as the prefixes of get_ordering() and get_distinct() must
match. Executing SQL where this is not true is an error.
"""
alias = alias or self.query.get_initial_alias()
field, targets, opts, joins, path, transform_function = self.query.setup_joins(
pieces, opts, alias
)
alias = joins[-1]
return field, targets, alias, joins, path, opts, transform_function
def get_from_clause(self):
"""
Return a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Subclasses, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables that are needed. This means the select columns,
ordering, and distinct must be done first.
"""
result = []
params = []
for alias in tuple(self.query.alias_map):
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if (
alias not in self.query.alias_map
or self.query.alias_refcount[alias] == 1
):
result.append(", %s" % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(
self,
select,
select_mask,
opts=None,
root_alias=None,
cur_depth=1,
requested=None,
restricted=None,
):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects
if f.field.unique
)
return chain(
direct_choices, reverse_choices, self.query._filtered_relations
)
related_klass_infos = []
if not restricted and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
restricted = isinstance(self.query.select_related, dict)
if restricted:
requested = self.query.select_related
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info["related_klass_infos"] = related_klass_infos
for f in opts.fields:
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or f.name in requested:
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s"
% (
f.name,
", ".join(_get_field_choices()) or "(none)",
)
)
else:
next = False
if not select_related_descend(f, restricted, requested, select_mask):
continue
related_select_mask = select_mask.get(f) or {}
klass_info = {
"model": f.remote_field.model,
"field": f,
"reverse": False,
"local_setter": f.set_cached_value,
"remote_setter": f.remote_field.set_cached_value
if f.unique
else lambda x, y: None,
"from_parent": False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _, _ = self.query.setup_joins([f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(
related_select_mask, start_alias=alias, opts=f.remote_field.model._meta
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next_klass_infos = self.get_related_selections(
select,
related_select_mask,
f.remote_field.model._meta,
alias,
cur_depth + 1,
next,
restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
related_select_mask = select_mask.get(f) or {}
if not select_related_descend(
f, restricted, requested, related_select_mask, reverse=True
):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
join_info = self.query.setup_joins(
[related_field_name], opts, root_alias
)
alias = join_info.joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
klass_info = {
"model": model,
"field": f,
"reverse": True,
"local_setter": f.remote_field.set_cached_value,
"remote_setter": f.set_cached_value,
"from_parent": from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
related_select_mask,
start_alias=alias,
opts=model._meta,
from_parent=opts.model,
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select,
related_select_mask,
model._meta,
alias,
cur_depth + 1,
next,
restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
def local_setter(obj, from_obj):
# Set a reverse fk object when relation is non-empty.
if from_obj:
f.remote_field.set_cached_value(from_obj, obj)
def remote_setter(name, obj, from_obj):
setattr(from_obj, name, obj)
for name in list(requested):
# Filtered relations work only on the topmost level.
if cur_depth > 1:
break
if name in self.query._filtered_relations:
fields_found.add(name)
f, _, join_opts, joins, _, _ = self.query.setup_joins(
[name], opts, root_alias
)
model = join_opts.model
alias = joins[-1]
from_parent = (
issubclass(model, opts.model) and model is not opts.model
)
klass_info = {
"model": model,
"field": f,
"reverse": True,
"local_setter": local_setter,
"remote_setter": partial(remote_setter, name),
"from_parent": from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
field_select_mask = select_mask.get((name, f)) or {}
columns = self.get_default_columns(
field_select_mask,
start_alias=alias,
opts=model._meta,
from_parent=opts.model,
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info["select_fields"] = select_fields
next_requested = requested.get(name, {})
next_klass_infos = self.get_related_selections(
select,
field_select_mask,
opts=model._meta,
root_alias=alias,
cur_depth=cur_depth + 1,
requested=next_requested,
restricted=restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
"Invalid field name(s) given in select_related: %s. "
"Choices are: %s"
% (
", ".join(invalid_fields),
", ".join(_get_field_choices()) or "(none)",
)
)
return related_klass_infos
def get_select_for_update_of_arguments(self):
"""
Return a quoted list of arguments for the SELECT FOR UPDATE OF part of
the query.
"""
def _get_parent_klass_info(klass_info):
concrete_model = klass_info["model"]._meta.concrete_model
for parent_model, parent_link in concrete_model._meta.parents.items():
parent_list = parent_model._meta.get_parent_list()
yield {
"model": parent_model,
"field": parent_link,
"reverse": False,
"select_fields": [
select_index
for select_index in klass_info["select_fields"]
# Selected columns from a model or its parents.
if (
self.select[select_index][0].target.model == parent_model
or self.select[select_index][0].target.model in parent_list
)
],
}
def _get_first_selected_col_from_model(klass_info):
"""
Find the first selected column from a model. If it doesn't exist,
don't lock a model.
select_fields is filled recursively, so it also contains fields
from the parent models.
"""
concrete_model = klass_info["model"]._meta.concrete_model
for select_index in klass_info["select_fields"]:
if self.select[select_index][0].target.model == concrete_model:
return self.select[select_index][0]
def _get_field_choices():
"""Yield all allowed field paths in breadth-first search order."""
queue = collections.deque([(None, self.klass_info)])
while queue:
parent_path, klass_info = queue.popleft()
if parent_path is None:
path = []
yield "self"
else:
field = klass_info["field"]
if klass_info["reverse"]:
field = field.remote_field
path = parent_path + [field.name]
yield LOOKUP_SEP.join(path)
queue.extend(
(path, klass_info)
for klass_info in _get_parent_klass_info(klass_info)
)
queue.extend(
(path, klass_info)
for klass_info in klass_info.get("related_klass_infos", [])
)
if not self.klass_info:
return []
result = []
invalid_names = []
for name in self.query.select_for_update_of:
klass_info = self.klass_info
if name == "self":
col = _get_first_selected_col_from_model(klass_info)
else:
for part in name.split(LOOKUP_SEP):
klass_infos = (
*klass_info.get("related_klass_infos", []),
*_get_parent_klass_info(klass_info),
)
for related_klass_info in klass_infos:
field = related_klass_info["field"]
if related_klass_info["reverse"]:
field = field.remote_field
if field.name == part:
klass_info = related_klass_info
break
else:
klass_info = None
break
if klass_info is None:
invalid_names.append(name)
continue
col = _get_first_selected_col_from_model(klass_info)
if col is not None:
if self.connection.features.select_for_update_of_column:
result.append(self.compile(col)[0])
else:
result.append(self.quote_name_unless_alias(col.alias))
if invalid_names:
raise FieldError(
"Invalid field name(s) given in select_for_update(of=(...)): %s. "
"Only relational fields followed in the query are allowed. "
"Choices are: %s."
% (
", ".join(invalid_names),
", ".join(_get_field_choices()),
)
)
return result
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
converters[i] = (backend_converters + field_converters, expression)
return converters
def apply_converters(self, rows, converters):
connection = self.connection
converters = list(converters.items())
for row in map(list, rows):
for pos, (convs, expression) in converters:
value = row[pos]
for converter in convs:
value = converter(value, expression, connection)
row[pos] = value
yield row
def results_iter(
self,
results=None,
tuple_expected=False,
chunked_fetch=False,
chunk_size=GET_ITERATOR_CHUNK_SIZE,
):
"""Return an iterator over the results from executing this query."""
if results is None:
results = self.execute_sql(
MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size
)
fields = [s[0] for s in self.select[0 : self.col_count]]
converters = self.get_converters(fields)
rows = chain.from_iterable(results)
if converters:
rows = self.apply_converters(rows, converters)
if tuple_expected:
rows = map(tuple, rows)
return rows
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
return bool(self.execute_sql(SINGLE))
def execute_sql(
self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE
):
"""
Run the query against the database and return the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
result_type = result_type or NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
if chunked_fetch:
cursor = self.connection.chunked_cursor()
else:
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
# Might fail for server-side cursors (e.g. connection closed)
cursor.close()
raise
if result_type == CURSOR:
# Give the caller the cursor to process and close.
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0 : self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor,
self.connection.features.empty_fetchmany_value,
self.col_count if self.has_extra_select else None,
chunk_size,
)
if not chunked_fetch or not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further. Use chunked_fetch if requested,
# unless the database doesn't support it.
return list(result)
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = "%s.%s" % (qn(alias), qn2(columns[index]))
self.query.where.add(RawSQL("%s = %s" % (lhs_sql, rhs), lhs_params), AND)
sql, params = self.as_sql()
return "EXISTS (%s)" % sql, params
def explain_query(self):
result = list(self.execute_sql())
# Some backends return 1 item tuples with strings, and others return
# tuples with integers and strings. Flatten them out into strings.
format_ = self.query.explain_info.format
output_formatter = json.dumps if format_ and format_.lower() == "json" else str
for row in result[0]:
if not isinstance(row, str):
yield " ".join(output_formatter(c) for c in row)
else:
yield row
class SQLInsertCompiler(SQLCompiler):
returning_fields = None
returning_params = ()
def field_as_sql(self, field, val):
"""
Take a field and a value intended to be saved on that field, and
return placeholder SQL and accompanying params. Check for raw values,
expressions, and fields with get_placeholder() defined in that order.
When field is None, consider the value raw and use it as the
placeholder, with no corresponding parameters returned.
"""
if field is None:
# A field value of None means the value is raw.
sql, params = val, []
elif hasattr(val, "as_sql"):
# This is an expression, let's compile it.
sql, params = self.compile(val)
elif hasattr(field, "get_placeholder"):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
sql, params = field.get_placeholder(val, self, self.connection), [val]
else:
# Return the common case for the placeholder
sql, params = "%s", [val]
# The following hook is only used by Oracle Spatial, which sometimes
# needs to yield 'NULL' and [] as its placeholder and params instead
# of '%s' and [None]. The 'NULL' placeholder is produced earlier by
# OracleOperations.get_geom_placeholder(). The following line removes
# the corresponding None parameter. See ticket #10888.
params = self.connection.ops.modify_insert_params(sql, params)
return sql, params
def prepare_value(self, field, value):
"""
Prepare a value to be used in a query by resolving it if it is an
expression and otherwise calling the field's get_db_prep_save().
"""
if hasattr(value, "resolve_expression"):
value = value.resolve_expression(
self.query, allow_joins=False, for_save=True
)
# Don't allow values containing Col expressions. They refer to
# existing columns on a row, but in the case of insert the row
# doesn't exist yet.
if value.contains_column_references:
raise ValueError(
'Failed to insert expression "%s" on %s. F() expressions '
"can only be used to update, not to insert." % (value, field)
)
if value.contains_aggregate:
raise FieldError(
"Aggregate functions are not allowed in this query "
"(%s=%r)." % (field.name, value)
)
if value.contains_over_clause:
raise FieldError(
"Window expressions are not allowed in this query (%s=%r)."
% (field.name, value)
)
else:
value = field.get_db_prep_save(value, connection=self.connection)
return value
def pre_save_val(self, field, obj):
"""
Get the given field's value off the given obj. pre_save() is used for
things like auto_now on DateTimeField. Skip it if this is a raw query.
"""
if self.query.raw:
return getattr(obj, field.attname)
return field.pre_save(obj, add=True)
def assemble_as_sql(self, fields, value_rows):
"""
Take a sequence of N fields and a sequence of M rows of values, and
generate placeholder SQL and parameters for each field and value.
Return a pair containing:
* a sequence of M rows of N SQL placeholder strings, and
* a sequence of M rows of corresponding parameter values.
Each placeholder string may contain any number of '%s' interpolation
strings, and each parameter row will contain exactly as many params
as the total number of '%s's in the corresponding placeholder row.
"""
if not value_rows:
return [], []
# list of (sql, [params]) tuples for each object to be saved
# Shape: [n_objs][n_fields][2]
rows_of_fields_as_sql = (
(self.field_as_sql(field, v) for field, v in zip(fields, row))
for row in value_rows
)
# tuple like ([sqls], [[params]s]) for each object to be saved
# Shape: [n_objs][2][n_fields]
sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql)
# Extract separate lists for placeholders and params.
# Each of these has shape [n_objs][n_fields]
placeholder_rows, param_rows = zip(*sql_and_param_pair_rows)
# Params for each field are still lists, and need to be flattened.
param_rows = [[p for ps in row for p in ps] for row in param_rows]
return placeholder_rows, param_rows
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
insert_statement = self.connection.ops.insert_statement(
on_conflict=self.query.on_conflict,
)
result = ["%s %s" % (insert_statement, qn(opts.db_table))]
fields = self.query.fields or [opts.pk]
result.append("(%s)" % ", ".join(qn(f.column) for f in fields))
if self.query.fields:
value_rows = [
[
self.prepare_value(field, self.pre_save_val(field, obj))
for field in fields
]
for obj in self.query.objs
]
else:
# An empty object.
value_rows = [
[self.connection.ops.pk_default_value()] for _ in self.query.objs
]
fields = [None]
# Currently the backends just accept values when generating bulk
# queries and generate their own placeholders. Doing that isn't
# necessary and it should be possible to use placeholders and
# expressions in bulk inserts too.
can_bulk = (
not self.returning_fields and self.connection.features.has_bulk_insert
)
placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows)
on_conflict_suffix_sql = self.connection.ops.on_conflict_suffix_sql(
fields,
self.query.on_conflict,
(f.column for f in self.query.update_fields),
(f.column for f in self.query.unique_fields),
)
if (
self.returning_fields
and self.connection.features.can_return_columns_from_insert
):
if self.connection.features.can_return_rows_from_bulk_insert:
result.append(
self.connection.ops.bulk_insert_sql(fields, placeholder_rows)
)
params = param_rows
else:
result.append("VALUES (%s)" % ", ".join(placeholder_rows[0]))
params = [param_rows[0]]
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
# Skip empty r_sql to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
r_sql, self.returning_params = self.connection.ops.return_insert_columns(
self.returning_fields
)
if r_sql:
result.append(r_sql)
params += [self.returning_params]
return [(" ".join(result), tuple(chain.from_iterable(params)))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
return [(" ".join(result), tuple(p for ps in param_rows for p in ps))]
else:
if on_conflict_suffix_sql:
result.append(on_conflict_suffix_sql)
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholder_rows, param_rows)
]
def execute_sql(self, returning_fields=None):
assert not (
returning_fields
and len(self.query.objs) != 1
and not self.connection.features.can_return_rows_from_bulk_insert
)
opts = self.query.get_meta()
self.returning_fields = returning_fields
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not self.returning_fields:
return []
if (
self.connection.features.can_return_rows_from_bulk_insert
and len(self.query.objs) > 1
):
rows = self.connection.ops.fetch_returned_insert_rows(cursor)
elif self.connection.features.can_return_columns_from_insert:
assert len(self.query.objs) == 1
rows = [
self.connection.ops.fetch_returned_insert_columns(
cursor,
self.returning_params,
)
]
else:
rows = [
(
self.connection.ops.last_insert_id(
cursor,
opts.db_table,
opts.pk.column,
),
)
]
cols = [field.get_col(opts.db_table) for field in self.returning_fields]
converters = self.get_converters(cols)
if converters:
rows = list(self.apply_converters(rows, converters))
return rows
class SQLDeleteCompiler(SQLCompiler):
@cached_property
def single_alias(self):
# Ensure base table is in aliases.
self.query.get_initial_alias()
return sum(self.query.alias_refcount[t] > 0 for t in self.query.alias_map) == 1
@classmethod
def _expr_refs_base_model(cls, expr, base_model):
if isinstance(expr, Query):
return expr.model == base_model
if not hasattr(expr, "get_source_expressions"):
return False
return any(
cls._expr_refs_base_model(source_expr, base_model)
for source_expr in expr.get_source_expressions()
)
@cached_property
def contains_self_reference_subquery(self):
return any(
self._expr_refs_base_model(expr, self.query.model)
for expr in chain(
self.query.annotations.values(), self.query.where.children
)
)
def _as_sql(self, query):
delete = "DELETE FROM %s" % self.quote_name_unless_alias(query.base_table)
try:
where, params = self.compile(query.where)
except FullResultSet:
return delete, ()
return f"{delete} WHERE {where}", tuple(params)
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
if self.single_alias and not self.contains_self_reference_subquery:
return self._as_sql(self.query)
innerq = self.query.clone()
innerq.__class__ = Query
innerq.clear_select_clause()
pk = self.query.model._meta.pk
innerq.select = [pk.get_col(self.query.get_initial_alias())]
outerq = Query(self.query.model)
if not self.connection.features.update_can_self_select:
# Force the materialization of the inner query to allow reference
# to the target table on MySQL.
sql, params = innerq.get_compiler(connection=self.connection).as_sql()
innerq = RawSQL("SELECT * FROM (%s) subquery" % sql, params)
outerq.add_filter("pk__in", innerq)
return self._as_sql(outerq)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return "", ()
qn = self.quote_name_unless_alias
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, "resolve_expression"):
val = val.resolve_expression(
self.query, allow_joins=False, for_save=True
)
if val.contains_aggregate:
raise FieldError(
"Aggregate functions are not allowed in this query "
"(%s=%r)." % (field.name, val)
)
if val.contains_over_clause:
raise FieldError(
"Window expressions are not allowed in this query "
"(%s=%r)." % (field.name, val)
)
elif hasattr(val, "prepare_database_save"):
if field.remote_field:
val = field.get_db_prep_save(
val.prepare_database_save(field),
connection=self.connection,
)
else:
raise TypeError(
"Tried to update field %s with a model instance, %r. "
"Use a value compatible with %s."
% (field, val, field.__class__.__name__)
)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, "get_placeholder"):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = "%s"
name = field.column
if hasattr(val, "as_sql"):
sql, params = self.compile(val)
values.append("%s = %s" % (qn(name), placeholder % sql))
update_params.extend(params)
elif val is not None:
values.append("%s = %s" % (qn(name), placeholder))
update_params.append(val)
else:
values.append("%s = NULL" % qn(name))
table = self.query.base_table
result = [
"UPDATE %s SET" % qn(table),
", ".join(values),
]
try:
where, params = self.compile(self.query.where)
except FullResultSet:
params = []
else:
result.append("WHERE %s" % where)
return " ".join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Return the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super().execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, munge the "where"
conditions to match the format required for (portable) SQL updates.
If multiple updates are required, pull out the id values to update at
this point so that they don't change as a result of the progressive
updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.chain(klass=Query)
query.select_related = False
query.clear_ordering(force=True)
query.extra = {}
query.select = []
meta = query.get_meta()
fields = [meta.pk.name]
related_ids_index = []
for related in self.query.related_updates:
if all(
path.join_field.primary_key for path in meta.get_path_to_parent(related)
):
# If a primary key chain exists to the targeted related update,
# then the meta.pk value can be used for it.
related_ids_index.append((related, 0))
else:
# This branch will only be reached when updating a field of an
# ancestor that is not part of the primary key chain of a MTI
# tree.
related_ids_index.append((related, len(fields)))
fields.append(related._meta.pk.name)
query.add_fields(fields)
super().pre_sql_setup()
must_pre_select = (
count > 1 and not self.connection.features.update_can_self_select
)
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.clear_where()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
related_ids = collections.defaultdict(list)
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
for parent, index in related_ids_index:
related_ids[parent].extend(r[index] for r in rows)
self.query.add_filter("pk__in", idents)
self.query.related_ids = related_ids
else:
# The fast path. Filters and updates in one query.
self.query.add_filter("pk__in", query)
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
sql, params = [], []
for annotation in self.query.annotation_select.values():
ann_sql, ann_params = self.compile(annotation)
ann_sql, ann_params = annotation.select_format(self, ann_sql, ann_params)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
sql = ", ".join(sql)
params = tuple(params)
inner_query_sql, inner_query_params = self.query.inner_query.get_compiler(
self.using,
elide_empty=self.elide_empty,
).as_sql(with_col_aliases=True)
sql = "SELECT %s FROM (%s) subquery" % (sql, inner_query_sql)
params += inner_query_params
return sql, params
def cursor_iter(cursor, sentinel, col_count, itersize):
"""
Yield blocks of rows from a cursor and ensure the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel):
yield rows if col_count is None else [r[:col_count] for r in rows]
finally:
cursor.close()
|
a1e2d4ea19063ab1d33c725f79b753517eeab4d4f768cecb7e6d668fb3b5a093 | import datetime
import uuid
from functools import lru_cache
from django.conf import settings
from django.db import DatabaseError, NotSupportedError
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.backends.utils import split_tzname_delta, strip_quotes, truncate_name
from django.db.models import AutoField, Exists, ExpressionWrapper, Lookup
from django.db.models.expressions import RawSQL
from django.db.models.sql.where import WhereNode
from django.utils import timezone
from django.utils.encoding import force_bytes, force_str
from django.utils.functional import cached_property
from django.utils.regex_helper import _lazy_re_compile
from .base import Database
from .utils import BulkInsertMapper, InsertVar, Oracle_datetime
class DatabaseOperations(BaseDatabaseOperations):
# Oracle uses NUMBER(5), NUMBER(11), and NUMBER(19) for integer fields.
# SmallIntegerField uses NUMBER(11) instead of NUMBER(5), which is used by
# SmallAutoField, to preserve backward compatibility.
integer_field_ranges = {
"SmallIntegerField": (-99999999999, 99999999999),
"IntegerField": (-99999999999, 99999999999),
"BigIntegerField": (-9999999999999999999, 9999999999999999999),
"PositiveBigIntegerField": (0, 9999999999999999999),
"PositiveSmallIntegerField": (0, 99999999999),
"PositiveIntegerField": (0, 99999999999),
"SmallAutoField": (-99999, 99999),
"AutoField": (-99999999999, 99999999999),
"BigAutoField": (-9999999999999999999, 9999999999999999999),
}
set_operators = {**BaseDatabaseOperations.set_operators, "difference": "MINUS"}
# TODO: colorize this SQL code with style.SQL_KEYWORD(), etc.
_sequence_reset_sql = """
DECLARE
table_value integer;
seq_value integer;
seq_name user_tab_identity_cols.sequence_name%%TYPE;
BEGIN
BEGIN
SELECT sequence_name INTO seq_name FROM user_tab_identity_cols
WHERE table_name = '%(table_name)s' AND
column_name = '%(column_name)s';
EXCEPTION WHEN NO_DATA_FOUND THEN
seq_name := '%(no_autofield_sequence_name)s';
END;
SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s;
SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences
WHERE sequence_name = seq_name;
WHILE table_value > seq_value LOOP
EXECUTE IMMEDIATE 'SELECT "'||seq_name||'".nextval FROM DUAL'
INTO seq_value;
END LOOP;
END;
/"""
# Oracle doesn't support string without precision; use the max string size.
cast_char_field_without_max_length = "NVARCHAR2(2000)"
cast_data_types = {
"AutoField": "NUMBER(11)",
"BigAutoField": "NUMBER(19)",
"SmallAutoField": "NUMBER(5)",
"TextField": cast_char_field_without_max_length,
}
def cache_key_culling_sql(self):
cache_key = self.quote_name("cache_key")
return (
f"SELECT {cache_key} "
f"FROM %s "
f"ORDER BY {cache_key} OFFSET %%s ROWS FETCH FIRST 1 ROWS ONLY"
)
# EXTRACT format cannot be passed in parameters.
_extract_format_re = _lazy_re_compile(r"[A-Z_]+")
def date_extract_sql(self, lookup_type, sql, params):
extract_sql = f"TO_CHAR({sql}, %s)"
extract_param = None
if lookup_type == "week_day":
# TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
extract_param = "D"
elif lookup_type == "iso_week_day":
extract_sql = f"TO_CHAR({sql} - 1, %s)"
extract_param = "D"
elif lookup_type == "week":
# IW = ISO week number
extract_param = "IW"
elif lookup_type == "quarter":
extract_param = "Q"
elif lookup_type == "iso_year":
extract_param = "IYYY"
else:
lookup_type = lookup_type.upper()
if not self._extract_format_re.fullmatch(lookup_type):
raise ValueError(f"Invalid loookup type: {lookup_type!r}")
# https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/EXTRACT-datetime.html
return f"EXTRACT({lookup_type} FROM {sql})", params
return extract_sql, (*params, extract_param)
def date_trunc_sql(self, lookup_type, sql, params, tzname=None):
sql, params = self._convert_sql_to_tz(sql, params, tzname)
# https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/ROUND-and-TRUNC-Date-Functions.html
trunc_param = None
if lookup_type in ("year", "month"):
trunc_param = lookup_type.upper()
elif lookup_type == "quarter":
trunc_param = "Q"
elif lookup_type == "week":
trunc_param = "IW"
else:
return f"TRUNC({sql})", params
return f"TRUNC({sql}, %s)", (*params, trunc_param)
# Oracle crashes with "ORA-03113: end-of-file on communication channel"
# if the time zone name is passed in parameter. Use interpolation instead.
# https://groups.google.com/forum/#!msg/django-developers/zwQju7hbG78/9l934yelwfsJ
# This regexp matches all time zone names from the zoneinfo database.
_tzname_re = _lazy_re_compile(r"^[\w/:+-]+$")
def _prepare_tzname_delta(self, tzname):
tzname, sign, offset = split_tzname_delta(tzname)
return f"{sign}{offset}" if offset else tzname
def _convert_sql_to_tz(self, sql, params, tzname):
if not (settings.USE_TZ and tzname):
return sql, params
if not self._tzname_re.match(tzname):
raise ValueError("Invalid time zone name: %s" % tzname)
# Convert from connection timezone to the local time, returning
# TIMESTAMP WITH TIME ZONE and cast it back to TIMESTAMP to strip the
# TIME ZONE details.
if self.connection.timezone_name != tzname:
from_timezone_name = self.connection.timezone_name
to_timezone_name = self._prepare_tzname_delta(tzname)
return (
f"CAST((FROM_TZ({sql}, '{from_timezone_name}') AT TIME ZONE "
f"'{to_timezone_name}') AS TIMESTAMP)",
params,
)
return sql, params
def datetime_cast_date_sql(self, sql, params, tzname):
sql, params = self._convert_sql_to_tz(sql, params, tzname)
return f"TRUNC({sql})", params
def datetime_cast_time_sql(self, sql, params, tzname):
# Since `TimeField` values are stored as TIMESTAMP change to the
# default date and convert the field to the specified timezone.
sql, params = self._convert_sql_to_tz(sql, params, tzname)
convert_datetime_sql = (
f"TO_TIMESTAMP(CONCAT('1900-01-01 ', TO_CHAR({sql}, 'HH24:MI:SS.FF')), "
f"'YYYY-MM-DD HH24:MI:SS.FF')"
)
return (
f"CASE WHEN {sql} IS NOT NULL THEN {convert_datetime_sql} ELSE NULL END",
(*params, *params),
)
def datetime_extract_sql(self, lookup_type, sql, params, tzname):
sql, params = self._convert_sql_to_tz(sql, params, tzname)
return self.date_extract_sql(lookup_type, sql, params)
def datetime_trunc_sql(self, lookup_type, sql, params, tzname):
sql, params = self._convert_sql_to_tz(sql, params, tzname)
# https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf/ROUND-and-TRUNC-Date-Functions.html
trunc_param = None
if lookup_type in ("year", "month"):
trunc_param = lookup_type.upper()
elif lookup_type == "quarter":
trunc_param = "Q"
elif lookup_type == "week":
trunc_param = "IW"
elif lookup_type == "hour":
trunc_param = "HH24"
elif lookup_type == "minute":
trunc_param = "MI"
elif lookup_type == "day":
return f"TRUNC({sql})", params
else:
# Cast to DATE removes sub-second precision.
return f"CAST({sql} AS DATE)", params
return f"TRUNC({sql}, %s)", (*params, trunc_param)
def time_trunc_sql(self, lookup_type, sql, params, tzname=None):
# The implementation is similar to `datetime_trunc_sql` as both
# `DateTimeField` and `TimeField` are stored as TIMESTAMP where
# the date part of the later is ignored.
sql, params = self._convert_sql_to_tz(sql, params, tzname)
trunc_param = None
if lookup_type == "hour":
trunc_param = "HH24"
elif lookup_type == "minute":
trunc_param = "MI"
elif lookup_type == "second":
# Cast to DATE removes sub-second precision.
return f"CAST({sql} AS DATE)", params
return f"TRUNC({sql}, %s)", (*params, trunc_param)
def get_db_converters(self, expression):
converters = super().get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type in ["JSONField", "TextField"]:
converters.append(self.convert_textfield_value)
elif internal_type == "BinaryField":
converters.append(self.convert_binaryfield_value)
elif internal_type == "BooleanField":
converters.append(self.convert_booleanfield_value)
elif internal_type == "DateTimeField":
if settings.USE_TZ:
converters.append(self.convert_datetimefield_value)
elif internal_type == "DateField":
converters.append(self.convert_datefield_value)
elif internal_type == "TimeField":
converters.append(self.convert_timefield_value)
elif internal_type == "UUIDField":
converters.append(self.convert_uuidfield_value)
# Oracle stores empty strings as null. If the field accepts the empty
# string, undo this to adhere to the Django convention of using
# the empty string instead of null.
if expression.output_field.empty_strings_allowed:
converters.append(
self.convert_empty_bytes
if internal_type == "BinaryField"
else self.convert_empty_string
)
return converters
def convert_textfield_value(self, value, expression, connection):
if isinstance(value, Database.LOB):
value = value.read()
return value
def convert_binaryfield_value(self, value, expression, connection):
if isinstance(value, Database.LOB):
value = force_bytes(value.read())
return value
def convert_booleanfield_value(self, value, expression, connection):
if value in (0, 1):
value = bool(value)
return value
# cx_Oracle always returns datetime.datetime objects for
# DATE and TIMESTAMP columns, but Django wants to see a
# python datetime.date, .time, or .datetime.
def convert_datetimefield_value(self, value, expression, connection):
if value is not None:
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_datefield_value(self, value, expression, connection):
if isinstance(value, Database.Timestamp):
value = value.date()
return value
def convert_timefield_value(self, value, expression, connection):
if isinstance(value, Database.Timestamp):
value = value.time()
return value
def convert_uuidfield_value(self, value, expression, connection):
if value is not None:
value = uuid.UUID(value)
return value
@staticmethod
def convert_empty_string(value, expression, connection):
return "" if value is None else value
@staticmethod
def convert_empty_bytes(value, expression, connection):
return b"" if value is None else value
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def fetch_returned_insert_columns(self, cursor, returning_params):
columns = []
for param in returning_params:
value = param.get_value()
if value == []:
raise DatabaseError(
"The database did not return a new row id. Probably "
'"ORA-1403: no data found" was raised internally but was '
"hidden by the Oracle OCI library (see "
"https://code.djangoproject.com/ticket/28859)."
)
columns.append(value[0])
return tuple(columns)
def field_cast_sql(self, db_type, internal_type):
if db_type and db_type.endswith("LOB") and internal_type != "JSONField":
return "DBMS_LOB.SUBSTR(%s)"
else:
return "%s"
def no_limit_value(self):
return None
def limit_offset_sql(self, low_mark, high_mark):
fetch, offset = self._get_limit_offset_params(low_mark, high_mark)
return " ".join(
sql
for sql in (
("OFFSET %d ROWS" % offset) if offset else None,
("FETCH FIRST %d ROWS ONLY" % fetch) if fetch else None,
)
if sql
)
def last_executed_query(self, cursor, sql, params):
# https://cx-oracle.readthedocs.io/en/latest/api_manual/cursor.html#Cursor.statement
# The DB API definition does not define this attribute.
statement = cursor.statement
# Unlike Psycopg's `query` and MySQLdb`'s `_executed`, cx_Oracle's
# `statement` doesn't contain the query parameters. Substitute
# parameters manually.
if params:
if isinstance(params, (tuple, list)):
params = {
f":arg{i}": param for i, param in enumerate(dict.fromkeys(params))
}
elif isinstance(params, dict):
params = {f":{key}": val for (key, val) in params.items()}
for key in sorted(params, key=len, reverse=True):
statement = statement.replace(
key, force_str(params[key], errors="replace")
)
return statement
def last_insert_id(self, cursor, table_name, pk_name):
sq_name = self._get_sequence_name(cursor, strip_quotes(table_name), pk_name)
cursor.execute('"%s".currval' % sq_name)
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type, internal_type=None):
if lookup_type in ("iexact", "icontains", "istartswith", "iendswith"):
return "UPPER(%s)"
if internal_type == "JSONField" and lookup_type == "exact":
return "DBMS_LOB.SUBSTR(%s)"
return "%s"
def max_in_list_size(self):
return 1000
def max_name_length(self):
return 30
def pk_default_value(self):
return "NULL"
def prep_for_iexact_query(self, x):
return x
def process_clob(self, value):
if value is None:
return ""
return value.read()
def quote_name(self, name):
# SQL92 requires delimited (quoted) names to be case-sensitive. When
# not quoted, Oracle has case-insensitive behavior for identifiers, but
# always defaults to uppercase.
# We simplify things by making Oracle identifiers always uppercase.
if not name.startswith('"') and not name.endswith('"'):
name = '"%s"' % truncate_name(name, self.max_name_length())
# Oracle puts the query text into a (query % args) construct, so % signs
# in names need to be escaped. The '%%' will be collapsed back to '%' at
# that stage so we aren't really making the name longer here.
name = name.replace("%", "%%")
return name.upper()
def regex_lookup(self, lookup_type):
if lookup_type == "regex":
match_option = "'c'"
else:
match_option = "'i'"
return "REGEXP_LIKE(%%s, %%s, %s)" % match_option
def return_insert_columns(self, fields):
if not fields:
return "", ()
field_names = []
params = []
for field in fields:
field_names.append(
"%s.%s"
% (
self.quote_name(field.model._meta.db_table),
self.quote_name(field.column),
)
)
params.append(InsertVar(field))
return "RETURNING %s INTO %s" % (
", ".join(field_names),
", ".join(["%s"] * len(params)),
), tuple(params)
def __foreign_key_constraints(self, table_name, recursive):
with self.connection.cursor() as cursor:
if recursive:
cursor.execute(
"""
SELECT
user_tables.table_name, rcons.constraint_name
FROM
user_tables
JOIN
user_constraints cons
ON (user_tables.table_name = cons.table_name
AND cons.constraint_type = ANY('P', 'U'))
LEFT JOIN
user_constraints rcons
ON (user_tables.table_name = rcons.table_name
AND rcons.constraint_type = 'R')
START WITH user_tables.table_name = UPPER(%s)
CONNECT BY
NOCYCLE PRIOR cons.constraint_name = rcons.r_constraint_name
GROUP BY
user_tables.table_name, rcons.constraint_name
HAVING user_tables.table_name != UPPER(%s)
ORDER BY MAX(level) DESC
""",
(table_name, table_name),
)
else:
cursor.execute(
"""
SELECT
cons.table_name, cons.constraint_name
FROM
user_constraints cons
WHERE
cons.constraint_type = 'R'
AND cons.table_name = UPPER(%s)
""",
(table_name,),
)
return cursor.fetchall()
@cached_property
def _foreign_key_constraints(self):
# 512 is large enough to fit the ~330 tables (as of this writing) in
# Django's test suite.
return lru_cache(maxsize=512)(self.__foreign_key_constraints)
def sql_flush(self, style, tables, *, reset_sequences=False, allow_cascade=False):
if not tables:
return []
truncated_tables = {table.upper() for table in tables}
constraints = set()
# Oracle's TRUNCATE CASCADE only works with ON DELETE CASCADE foreign
# keys which Django doesn't define. Emulate the PostgreSQL behavior
# which truncates all dependent tables by manually retrieving all
# foreign key constraints and resolving dependencies.
for table in tables:
for foreign_table, constraint in self._foreign_key_constraints(
table, recursive=allow_cascade
):
if allow_cascade:
truncated_tables.add(foreign_table)
constraints.add((foreign_table, constraint))
sql = (
[
"%s %s %s %s %s %s %s %s;"
% (
style.SQL_KEYWORD("ALTER"),
style.SQL_KEYWORD("TABLE"),
style.SQL_FIELD(self.quote_name(table)),
style.SQL_KEYWORD("DISABLE"),
style.SQL_KEYWORD("CONSTRAINT"),
style.SQL_FIELD(self.quote_name(constraint)),
style.SQL_KEYWORD("KEEP"),
style.SQL_KEYWORD("INDEX"),
)
for table, constraint in constraints
]
+ [
"%s %s %s;"
% (
style.SQL_KEYWORD("TRUNCATE"),
style.SQL_KEYWORD("TABLE"),
style.SQL_FIELD(self.quote_name(table)),
)
for table in truncated_tables
]
+ [
"%s %s %s %s %s %s;"
% (
style.SQL_KEYWORD("ALTER"),
style.SQL_KEYWORD("TABLE"),
style.SQL_FIELD(self.quote_name(table)),
style.SQL_KEYWORD("ENABLE"),
style.SQL_KEYWORD("CONSTRAINT"),
style.SQL_FIELD(self.quote_name(constraint)),
)
for table, constraint in constraints
]
)
if reset_sequences:
sequences = [
sequence
for sequence in self.connection.introspection.sequence_list()
if sequence["table"].upper() in truncated_tables
]
# Since we've just deleted all the rows, running our sequence ALTER
# code will reset the sequence to 0.
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
def sequence_reset_by_name_sql(self, style, sequences):
sql = []
for sequence_info in sequences:
no_autofield_sequence_name = self._get_no_autofield_sequence_name(
sequence_info["table"]
)
table = self.quote_name(sequence_info["table"])
column = self.quote_name(sequence_info["column"] or "id")
query = self._sequence_reset_sql % {
"no_autofield_sequence_name": no_autofield_sequence_name,
"table": table,
"column": column,
"table_name": strip_quotes(table),
"column_name": strip_quotes(column),
}
sql.append(query)
return sql
def sequence_reset_sql(self, style, model_list):
output = []
query = self._sequence_reset_sql
for model in model_list:
for f in model._meta.local_fields:
if isinstance(f, AutoField):
no_autofield_sequence_name = self._get_no_autofield_sequence_name(
model._meta.db_table
)
table = self.quote_name(model._meta.db_table)
column = self.quote_name(f.column)
output.append(
query
% {
"no_autofield_sequence_name": no_autofield_sequence_name,
"table": table,
"column": column,
"table_name": strip_quotes(table),
"column_name": strip_quotes(column),
}
)
# Only one AutoField is allowed per model, so don't
# continue to loop
break
return output
def start_transaction_sql(self):
return ""
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def adapt_datefield_value(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
The default implementation transforms the date to text, but that is not
necessary for Oracle.
"""
return value
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
If naive datetime is passed assumes that is in UTC. Normally Django
models.DateTimeField makes sure that if USE_TZ is True passed datetime
is timezone aware.
"""
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, "resolve_expression"):
return value
# cx_Oracle doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError(
"Oracle backend does not support timezone-aware datetimes when "
"USE_TZ is False."
)
return Oracle_datetime.from_datetime(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, "resolve_expression"):
return value
if isinstance(value, str):
return datetime.datetime.strptime(value, "%H:%M:%S")
# Oracle doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("Oracle backend does not support timezone-aware times.")
return Oracle_datetime(
1900, 1, 1, value.hour, value.minute, value.second, value.microsecond
)
def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None):
return value
def combine_expression(self, connector, sub_expressions):
lhs, rhs = sub_expressions
if connector == "%%":
return "MOD(%s)" % ",".join(sub_expressions)
elif connector == "&":
return "BITAND(%s)" % ",".join(sub_expressions)
elif connector == "|":
return "BITAND(-%(lhs)s-1,%(rhs)s)+%(lhs)s" % {"lhs": lhs, "rhs": rhs}
elif connector == "<<":
return "(%(lhs)s * POWER(2, %(rhs)s))" % {"lhs": lhs, "rhs": rhs}
elif connector == ">>":
return "FLOOR(%(lhs)s / POWER(2, %(rhs)s))" % {"lhs": lhs, "rhs": rhs}
elif connector == "^":
return "POWER(%s)" % ",".join(sub_expressions)
elif connector == "#":
raise NotSupportedError("Bitwise XOR is not supported in Oracle.")
return super().combine_expression(connector, sub_expressions)
def _get_no_autofield_sequence_name(self, table):
"""
Manually created sequence name to keep backward compatibility for
AutoFields that aren't Oracle identity columns.
"""
name_length = self.max_name_length() - 3
return "%s_SQ" % truncate_name(strip_quotes(table), name_length).upper()
def _get_sequence_name(self, cursor, table, pk_name):
cursor.execute(
"""
SELECT sequence_name
FROM user_tab_identity_cols
WHERE table_name = UPPER(%s)
AND column_name = UPPER(%s)""",
[table, pk_name],
)
row = cursor.fetchone()
return self._get_no_autofield_sequence_name(table) if row is None else row[0]
def bulk_insert_sql(self, fields, placeholder_rows):
query = []
for row in placeholder_rows:
select = []
for i, placeholder in enumerate(row):
# A model without any fields has fields=[None].
if fields[i]:
internal_type = getattr(
fields[i], "target_field", fields[i]
).get_internal_type()
placeholder = (
BulkInsertMapper.types.get(internal_type, "%s") % placeholder
)
# Add columns aliases to the first select to avoid "ORA-00918:
# column ambiguously defined" when two or more columns in the
# first select have the same value.
if not query:
placeholder = "%s col_%s" % (placeholder, i)
select.append(placeholder)
query.append("SELECT %s FROM DUAL" % ", ".join(select))
# Bulk insert to tables with Oracle identity columns causes Oracle to
# add sequence.nextval to it. Sequence.nextval cannot be used with the
# UNION operator. To prevent incorrect SQL, move UNION to a subquery.
return "SELECT * FROM (%s)" % " UNION ALL ".join(query)
def subtract_temporals(self, internal_type, lhs, rhs):
if internal_type == "DateField":
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
params = (*lhs_params, *rhs_params)
return (
"NUMTODSINTERVAL(TO_NUMBER(%s - %s), 'DAY')" % (lhs_sql, rhs_sql),
params,
)
return super().subtract_temporals(internal_type, lhs, rhs)
def bulk_batch_size(self, fields, objs):
"""Oracle restricts the number of parameters in a query."""
if fields:
return self.connection.features.max_query_params // len(fields)
return len(objs)
def conditional_expression_supported_in_where_clause(self, expression):
"""
Oracle supports only EXISTS(...) or filters in the WHERE clause, others
must be compared with True.
"""
if isinstance(expression, (Exists, Lookup, WhereNode)):
return True
if isinstance(expression, ExpressionWrapper) and expression.conditional:
return self.conditional_expression_supported_in_where_clause(
expression.expression
)
if isinstance(expression, RawSQL) and expression.conditional:
return True
return False
|
9a39013efa84c28cd9b8167a1ee542bdf5d72ab5bba9119806862b593a12f867 | import operator
from django.db import transaction
from django.db.backends.base.features import BaseDatabaseFeatures
from django.db.utils import OperationalError
from django.utils.functional import cached_property
from .base import Database
class DatabaseFeatures(BaseDatabaseFeatures):
minimum_database_version = (3, 9)
test_db_allows_multiple_connections = False
supports_unspecified_pk = True
supports_timezones = False
max_query_params = 999
supports_transactions = True
atomic_transactions = False
can_rollback_ddl = True
can_create_inline_fk = False
requires_literal_defaults = True
can_clone_databases = True
supports_temporal_subtraction = True
ignores_table_name_case = True
supports_cast_with_precision = False
time_cast_precision = 3
can_release_savepoints = True
has_case_insensitive_like = True
# Is "ALTER TABLE ... RENAME COLUMN" supported?
can_alter_table_rename_column = Database.sqlite_version_info >= (3, 25, 0)
# Is "ALTER TABLE ... DROP COLUMN" supported?
can_alter_table_drop_column = Database.sqlite_version_info >= (3, 35, 5)
supports_parentheses_in_compound = False
# Deferred constraint checks can be emulated on SQLite < 3.20 but not in a
# reasonably performant way.
supports_pragma_foreign_key_check = Database.sqlite_version_info >= (3, 20, 0)
can_defer_constraint_checks = supports_pragma_foreign_key_check
supports_functions_in_partial_indexes = Database.sqlite_version_info >= (3, 15, 0)
supports_over_clause = Database.sqlite_version_info >= (3, 25, 0)
supports_frame_range_fixed_distance = Database.sqlite_version_info >= (3, 28, 0)
supports_aggregate_filter_clause = Database.sqlite_version_info >= (3, 30, 1)
supports_order_by_nulls_modifier = Database.sqlite_version_info >= (3, 30, 0)
# NULLS LAST/FIRST emulation on < 3.30 requires subquery wrapping.
requires_compound_order_by_subquery = Database.sqlite_version_info < (3, 30)
order_by_nulls_first = True
supports_json_field_contains = False
supports_update_conflicts = Database.sqlite_version_info >= (3, 24, 0)
supports_update_conflicts_with_target = supports_update_conflicts
test_collations = {
"ci": "nocase",
"cs": "binary",
"non_default": "nocase",
}
django_test_expected_failures = {
# The django_format_dtdelta() function doesn't properly handle mixed
# Date/DateTime fields and timedeltas.
"expressions.tests.FTimeDeltaTests.test_mixed_comparisons1",
}
create_test_table_with_composite_primary_key = """
CREATE TABLE test_table_composite_pk (
column_1 INTEGER NOT NULL,
column_2 INTEGER NOT NULL,
PRIMARY KEY(column_1, column_2)
)
"""
@cached_property
def django_test_skips(self):
skips = {
"SQLite stores values rounded to 15 significant digits.": {
"model_fields.test_decimalfield.DecimalFieldTests."
"test_fetch_from_db_without_float_rounding",
},
"SQLite naively remakes the table on field alteration.": {
"schema.tests.SchemaTests.test_unique_no_unnecessary_fk_drops",
"schema.tests.SchemaTests.test_unique_and_reverse_m2m",
"schema.tests.SchemaTests."
"test_alter_field_default_doesnt_perform_queries",
"schema.tests.SchemaTests."
"test_rename_column_renames_deferred_sql_references",
},
"SQLite doesn't support negative precision for ROUND().": {
"db_functions.math.test_round.RoundTests."
"test_null_with_negative_precision",
"db_functions.math.test_round.RoundTests."
"test_decimal_with_negative_precision",
"db_functions.math.test_round.RoundTests."
"test_float_with_negative_precision",
"db_functions.math.test_round.RoundTests."
"test_integer_with_negative_precision",
},
}
if Database.sqlite_version_info < (3, 27):
skips.update(
{
"Nondeterministic failure on SQLite < 3.27.": {
"expressions_window.tests.WindowFunctionTests."
"test_subquery_row_range_rank",
},
}
)
if self.connection.is_in_memory_db():
skips.update(
{
"the sqlite backend's close() method is a no-op when using an "
"in-memory database": {
"servers.test_liveserverthread.LiveServerThreadTest."
"test_closes_connections",
"servers.tests.LiveServerTestCloseConnectionTest."
"test_closes_connections",
},
"For SQLite in-memory tests, closing the connection destroys"
"the database.": {
"test_utils.tests.AssertNumQueriesUponConnectionTests."
"test_ignores_connection_configuration_queries",
},
}
)
else:
skips.update(
{
"Only connections to in-memory SQLite databases are passed to the "
"server thread.": {
"servers.tests.LiveServerInMemoryDatabaseLockTest."
"test_in_memory_database_lock",
},
"multiprocessing's start method is checked only for in-memory "
"SQLite databases": {
"backends.sqlite.test_creation.TestDbSignatureTests."
"test_get_test_db_clone_settings_not_supported",
},
}
)
return skips
@cached_property
def supports_atomic_references_rename(self):
return Database.sqlite_version_info >= (3, 26, 0)
@cached_property
def introspected_field_types(self):
return {
**super().introspected_field_types,
"BigAutoField": "AutoField",
"DurationField": "BigIntegerField",
"GenericIPAddressField": "CharField",
"SmallAutoField": "AutoField",
}
@cached_property
def supports_json_field(self):
with self.connection.cursor() as cursor:
try:
with transaction.atomic(self.connection.alias):
cursor.execute('SELECT JSON(\'{"a": "b"}\')')
except OperationalError:
return False
return True
can_introspect_json_field = property(operator.attrgetter("supports_json_field"))
has_json_object_function = property(operator.attrgetter("supports_json_field"))
@cached_property
def can_return_columns_from_insert(self):
return Database.sqlite_version_info >= (3, 35)
can_return_rows_from_bulk_insert = property(
operator.attrgetter("can_return_columns_from_insert")
)
|
ce265ec7dc62ab105897d482f758b84080d8ba87a7cf83a829a34eeec5eef244 | import time
from importlib import import_module
from django.conf import settings
from django.contrib.sessions.backends.base import UpdateError
from django.contrib.sessions.exceptions import SessionInterrupted
from django.utils.cache import patch_vary_headers
from django.utils.deprecation import MiddlewareMixin
from django.utils.http import http_date
class SessionMiddleware(MiddlewareMixin):
def __init__(self, get_response):
super().__init__(get_response)
engine = import_module(settings.SESSION_ENGINE)
self.SessionStore = engine.SessionStore
def process_request(self, request):
session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME)
request.session = self.SessionStore(session_key)
def process_response(self, request, response):
"""
If request.session was modified, or if the configuration is to save the
session every time, save the changes and set a session cookie or delete
the session cookie if the session has been emptied.
"""
try:
accessed = request.session.accessed
modified = request.session.modified
empty = request.session.is_empty()
except AttributeError:
return response
# First check if we need to delete this cookie.
# The session should be deleted only if the session is entirely empty.
if settings.SESSION_COOKIE_NAME in request.COOKIES and empty:
response.delete_cookie(
settings.SESSION_COOKIE_NAME,
path=settings.SESSION_COOKIE_PATH,
domain=settings.SESSION_COOKIE_DOMAIN,
samesite=settings.SESSION_COOKIE_SAMESITE,
)
patch_vary_headers(response, ("Cookie",))
else:
if accessed:
patch_vary_headers(response, ("Cookie",))
if (modified or settings.SESSION_SAVE_EVERY_REQUEST) and not empty:
if request.session.get_expire_at_browser_close():
max_age = None
expires = None
else:
max_age = request.session.get_expiry_age()
expires_time = time.time() + max_age
expires = http_date(expires_time)
# Save the session data and refresh the client cookie.
# Skip session save for 5xx responses.
if response.status_code < 500:
try:
request.session.save()
except UpdateError:
raise SessionInterrupted(
"The request's session was deleted before the "
"request completed. The user may have logged "
"out in a concurrent request, for example."
)
response.set_cookie(
settings.SESSION_COOKIE_NAME,
request.session.session_key,
max_age=max_age,
expires=expires,
domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH,
secure=settings.SESSION_COOKIE_SECURE or None,
httponly=settings.SESSION_COOKIE_HTTPONLY or None,
samesite=settings.SESSION_COOKIE_SAMESITE,
)
return response
|
1ec7708b8a464b901d8017cee0e50c20513614589a510b776a9c15659ba62f16 | """
Tests for django.core.servers.
"""
import errno
import os
import socket
import threading
import unittest
from http.client import HTTPConnection
from urllib.error import HTTPError
from urllib.parse import urlencode
from urllib.request import urlopen
from django.conf import settings
from django.core.servers.basehttp import ThreadedWSGIServer, WSGIServer
from django.db import DEFAULT_DB_ALIAS, connection, connections
from django.test import LiveServerTestCase, override_settings
from django.test.testcases import LiveServerThread, QuietWSGIRequestHandler
from .models import Person
TEST_ROOT = os.path.dirname(__file__)
TEST_SETTINGS = {
"MEDIA_URL": "media/",
"MEDIA_ROOT": os.path.join(TEST_ROOT, "media"),
"STATIC_URL": "static/",
"STATIC_ROOT": os.path.join(TEST_ROOT, "static"),
}
@override_settings(ROOT_URLCONF="servers.urls", **TEST_SETTINGS)
class LiveServerBase(LiveServerTestCase):
available_apps = [
"servers",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
]
fixtures = ["testdata.json"]
def urlopen(self, url):
return urlopen(self.live_server_url + url)
class CloseConnectionTestServer(ThreadedWSGIServer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# This event is set right after the first time a request closes its
# database connections.
self._connections_closed = threading.Event()
def _close_connections(self):
super()._close_connections()
self._connections_closed.set()
class CloseConnectionTestLiveServerThread(LiveServerThread):
server_class = CloseConnectionTestServer
def _create_server(self, connections_override=None):
return super()._create_server(connections_override=self.connections_override)
class LiveServerTestCloseConnectionTest(LiveServerBase):
server_thread_class = CloseConnectionTestLiveServerThread
@classmethod
def _make_connections_override(cls):
conn = connections[DEFAULT_DB_ALIAS]
cls.conn = conn
cls.old_conn_max_age = conn.settings_dict["CONN_MAX_AGE"]
# Set the connection's CONN_MAX_AGE to None to simulate the
# CONN_MAX_AGE setting being set to None on the server. This prevents
# Django from closing the connection and allows testing that
# ThreadedWSGIServer closes connections.
conn.settings_dict["CONN_MAX_AGE"] = None
# Pass a database connection through to the server to check it is being
# closed by ThreadedWSGIServer.
return {DEFAULT_DB_ALIAS: conn}
@classmethod
def tearDownConnectionTest(cls):
cls.conn.settings_dict["CONN_MAX_AGE"] = cls.old_conn_max_age
@classmethod
def tearDownClass(cls):
cls.tearDownConnectionTest()
super().tearDownClass()
def test_closes_connections(self):
# The server's request thread sets this event after closing
# its database connections.
closed_event = self.server_thread.httpd._connections_closed
conn = self.conn
# Open a connection to the database.
conn.connect()
self.assertIsNotNone(conn.connection)
with self.urlopen("/model_view/") as f:
# The server can access the database.
self.assertCountEqual(f.read().splitlines(), [b"jane", b"robert"])
# Wait for the server's request thread to close the connection.
# A timeout of 0.1 seconds should be more than enough. If the wait
# times out, the assertion after should fail.
closed_event.wait(timeout=0.1)
self.assertIsNone(conn.connection)
@unittest.skipUnless(connection.vendor == "sqlite", "SQLite specific test.")
class LiveServerInMemoryDatabaseLockTest(LiveServerBase):
def test_in_memory_database_lock(self):
"""
With a threaded LiveServer and an in-memory database, an error can
occur when 2 requests reach the server and try to lock the database
at the same time, if the requests do not share the same database
connection.
"""
conn = self.server_thread.connections_override[DEFAULT_DB_ALIAS]
# Open a connection to the database.
conn.connect()
# Create a transaction to lock the database.
cursor = conn.cursor()
cursor.execute("BEGIN IMMEDIATE TRANSACTION")
try:
with self.urlopen("/create_model_instance/") as f:
self.assertEqual(f.status, 200)
except HTTPError:
self.fail("Unexpected error due to a database lock.")
finally:
# Release the transaction.
cursor.execute("ROLLBACK")
class FailingLiveServerThread(LiveServerThread):
def _create_server(self, connections_override=None):
raise RuntimeError("Error creating server.")
class LiveServerTestCaseSetupTest(LiveServerBase):
server_thread_class = FailingLiveServerThread
@classmethod
def check_allowed_hosts(cls, expected):
if settings.ALLOWED_HOSTS != expected:
raise RuntimeError(f"{settings.ALLOWED_HOSTS} != {expected}")
@classmethod
def setUpClass(cls):
cls.check_allowed_hosts(["testserver"])
try:
super().setUpClass()
except RuntimeError:
# LiveServerTestCase's change to ALLOWED_HOSTS should be reverted.
cls.doClassCleanups()
cls.check_allowed_hosts(["testserver"])
else:
raise RuntimeError("Server did not fail.")
cls.set_up_called = True
def test_set_up_class(self):
self.assertIs(self.set_up_called, True)
class LiveServerAddress(LiveServerBase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# put it in a list to prevent descriptor lookups in test
cls.live_server_url_test = [cls.live_server_url]
def test_live_server_url_is_class_property(self):
self.assertIsInstance(self.live_server_url_test[0], str)
self.assertEqual(self.live_server_url_test[0], self.live_server_url)
class LiveServerSingleThread(LiveServerThread):
def _create_server(self, connections_override=None):
return WSGIServer(
(self.host, self.port), QuietWSGIRequestHandler, allow_reuse_address=False
)
class SingleThreadLiveServerTestCase(LiveServerTestCase):
server_thread_class = LiveServerSingleThread
class LiveServerViews(LiveServerBase):
def test_protocol(self):
"""Launched server serves with HTTP 1.1."""
with self.urlopen("/example_view/") as f:
self.assertEqual(f.version, 11)
def test_closes_connection_without_content_length(self):
"""
An HTTP 1.1 server is supposed to support keep-alive. Since our
development server is rather simple we support it only in cases where
we can detect a content length from the response. This should be doable
for all simple views and streaming responses where an iterable with
length of one is passed. The latter follows as result of `set_content_length`
from https://github.com/python/cpython/blob/main/Lib/wsgiref/handlers.py.
If we cannot detect a content length we explicitly set the `Connection`
header to `close` to notify the client that we do not actually support
it.
"""
conn = HTTPConnection(
LiveServerViews.server_thread.host,
LiveServerViews.server_thread.port,
timeout=1,
)
try:
conn.request(
"GET", "/streaming_example_view/", headers={"Connection": "keep-alive"}
)
response = conn.getresponse()
self.assertTrue(response.will_close)
self.assertEqual(response.read(), b"Iamastream")
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader("Connection"), "close")
conn.request(
"GET", "/streaming_example_view/", headers={"Connection": "close"}
)
response = conn.getresponse()
self.assertTrue(response.will_close)
self.assertEqual(response.read(), b"Iamastream")
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader("Connection"), "close")
finally:
conn.close()
def test_keep_alive_on_connection_with_content_length(self):
"""
See `test_closes_connection_without_content_length` for details. This
is a follow up test, which ensure that we do not close the connection
if not needed, hence allowing us to take advantage of keep-alive.
"""
conn = HTTPConnection(
LiveServerViews.server_thread.host, LiveServerViews.server_thread.port
)
try:
conn.request("GET", "/example_view/", headers={"Connection": "keep-alive"})
response = conn.getresponse()
self.assertFalse(response.will_close)
self.assertEqual(response.read(), b"example view")
self.assertEqual(response.status, 200)
self.assertIsNone(response.getheader("Connection"))
conn.request("GET", "/example_view/", headers={"Connection": "close"})
response = conn.getresponse()
self.assertFalse(response.will_close)
self.assertEqual(response.read(), b"example view")
self.assertEqual(response.status, 200)
self.assertIsNone(response.getheader("Connection"))
finally:
conn.close()
def test_keep_alive_connection_clears_previous_request_data(self):
conn = HTTPConnection(
LiveServerViews.server_thread.host, LiveServerViews.server_thread.port
)
try:
conn.request(
"POST", "/method_view/", b"{}", headers={"Connection": "keep-alive"}
)
response = conn.getresponse()
self.assertFalse(response.will_close)
self.assertEqual(response.status, 200)
self.assertEqual(response.read(), b"POST")
conn.request(
"POST", "/method_view/", b"{}", headers={"Connection": "close"}
)
response = conn.getresponse()
self.assertFalse(response.will_close)
self.assertEqual(response.status, 200)
self.assertEqual(response.read(), b"POST")
finally:
conn.close()
def test_404(self):
with self.assertRaises(HTTPError) as err:
self.urlopen("/")
err.exception.close()
self.assertEqual(err.exception.code, 404, "Expected 404 response")
def test_view(self):
with self.urlopen("/example_view/") as f:
self.assertEqual(f.read(), b"example view")
def test_static_files(self):
with self.urlopen("/static/example_static_file.txt") as f:
self.assertEqual(f.read().rstrip(b"\r\n"), b"example static file")
def test_no_collectstatic_emulation(self):
"""
LiveServerTestCase reports a 404 status code when HTTP client
tries to access a static file that isn't explicitly put under
STATIC_ROOT.
"""
with self.assertRaises(HTTPError) as err:
self.urlopen("/static/another_app/another_app_static_file.txt")
err.exception.close()
self.assertEqual(err.exception.code, 404, "Expected 404 response")
def test_media_files(self):
with self.urlopen("/media/example_media_file.txt") as f:
self.assertEqual(f.read().rstrip(b"\r\n"), b"example media file")
def test_environ(self):
with self.urlopen("/environ_view/?%s" % urlencode({"q": "тест"})) as f:
self.assertIn(b"QUERY_STRING: 'q=%D1%82%D0%B5%D1%81%D1%82'", f.read())
@override_settings(ROOT_URLCONF="servers.urls")
class SingleThreadLiveServerViews(SingleThreadLiveServerTestCase):
available_apps = ["servers"]
def test_closes_connection_with_content_length(self):
"""
Contrast to
LiveServerViews.test_keep_alive_on_connection_with_content_length().
Persistent connections require threading server.
"""
conn = HTTPConnection(
SingleThreadLiveServerViews.server_thread.host,
SingleThreadLiveServerViews.server_thread.port,
timeout=1,
)
try:
conn.request("GET", "/example_view/", headers={"Connection": "keep-alive"})
response = conn.getresponse()
self.assertTrue(response.will_close)
self.assertEqual(response.read(), b"example view")
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader("Connection"), "close")
finally:
conn.close()
class LiveServerDatabase(LiveServerBase):
def test_fixtures_loaded(self):
"""
Fixtures are properly loaded and visible to the live server thread.
"""
with self.urlopen("/model_view/") as f:
self.assertCountEqual(f.read().splitlines(), [b"jane", b"robert"])
def test_database_writes(self):
"""
Data written to the database by a view can be read.
"""
with self.urlopen("/create_model_instance/"):
pass
self.assertQuerySetEqual(
Person.objects.order_by("pk"),
["jane", "robert", "emily"],
lambda b: b.name,
)
class LiveServerPort(LiveServerBase):
def test_port_bind(self):
"""
Each LiveServerTestCase binds to a unique port or fails to start a
server thread when run concurrently (#26011).
"""
TestCase = type("TestCase", (LiveServerBase,), {})
try:
TestCase._start_server_thread()
except OSError as e:
if e.errno == errno.EADDRINUSE:
# We're out of ports, LiveServerTestCase correctly fails with
# an OSError.
return
# Unexpected error.
raise
self.assertNotEqual(
self.live_server_url,
TestCase.live_server_url,
f"Acquired duplicate server addresses for server threads: "
f"{self.live_server_url}",
)
def test_specified_port_bind(self):
"""LiveServerTestCase.port customizes the server's port."""
TestCase = type("TestCase", (LiveServerBase,), {})
# Find an open port and tell TestCase to use it.
s = socket.socket()
s.bind(("", 0))
TestCase.port = s.getsockname()[1]
s.close()
TestCase._start_server_thread()
self.assertEqual(
TestCase.port,
TestCase.server_thread.port,
f"Did not use specified port for LiveServerTestCase thread: "
f"{TestCase.port}",
)
class LiveServerThreadedTests(LiveServerBase):
"""If LiveServerTestCase isn't threaded, these tests will hang."""
def test_view_calls_subview(self):
url = "/subview_calling_view/?%s" % urlencode({"url": self.live_server_url})
with self.urlopen(url) as f:
self.assertEqual(f.read(), b"subview calling view: subview")
def test_check_model_instance_from_subview(self):
url = "/check_model_instance_from_subview/?%s" % urlencode(
{
"url": self.live_server_url,
}
)
with self.urlopen(url) as f:
self.assertIn(b"emily", f.read())
|
9f08c3bead1b54bc4e16019a5b3abf50d18b980248df3b163e0015da4ee4d098 | import datetime
import math
import re
from decimal import Decimal
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
Avg,
Case,
Count,
DateField,
DateTimeField,
DecimalField,
DurationField,
Exists,
F,
FloatField,
IntegerField,
Max,
Min,
OuterRef,
Q,
StdDev,
Subquery,
Sum,
TimeField,
Value,
Variance,
When,
)
from django.db.models.expressions import Func, RawSQL
from django.db.models.functions import (
Cast,
Coalesce,
Greatest,
Lower,
Now,
Pi,
TruncDate,
TruncHour,
)
from django.test import TestCase
from django.test.testcases import skipUnlessDBFeature
from django.test.utils import Approximate, CaptureQueriesContext
from django.utils import timezone
from .models import Author, Book, Publisher, Store
class NowUTC(Now):
template = "CURRENT_TIMESTAMP"
output_field = DateTimeField()
def as_sql(self, compiler, connection, **extra_context):
if connection.features.test_now_utc_template:
extra_context["template"] = connection.features.test_now_utc_template
return super().as_sql(compiler, connection, **extra_context)
class AggregateTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name="Adrian Holovaty", age=34)
cls.a2 = Author.objects.create(name="Jacob Kaplan-Moss", age=35)
cls.a3 = Author.objects.create(name="Brad Dayley", age=45)
cls.a4 = Author.objects.create(name="James Bennett", age=29)
cls.a5 = Author.objects.create(name="Jeffrey Forcier", age=37)
cls.a6 = Author.objects.create(name="Paul Bissex", age=29)
cls.a7 = Author.objects.create(name="Wesley J. Chun", age=25)
cls.a8 = Author.objects.create(name="Peter Norvig", age=57)
cls.a9 = Author.objects.create(name="Stuart Russell", age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(
name="Apress", num_awards=3, duration=datetime.timedelta(days=1)
)
cls.p2 = Publisher.objects.create(
name="Sams", num_awards=1, duration=datetime.timedelta(days=2)
)
cls.p3 = Publisher.objects.create(name="Prentice Hall", num_awards=7)
cls.p4 = Publisher.objects.create(name="Morgan Kaufmann", num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn="159059725",
name="The Definitive Guide to Django: Web Development Done Right",
pages=447,
rating=4.5,
price=Decimal("30.00"),
contact=cls.a1,
publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6),
)
cls.b2 = Book.objects.create(
isbn="067232959",
name="Sams Teach Yourself Django in 24 Hours",
pages=528,
rating=3.0,
price=Decimal("23.09"),
contact=cls.a3,
publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3),
)
cls.b3 = Book.objects.create(
isbn="159059996",
name="Practical Django Projects",
pages=300,
rating=4.0,
price=Decimal("29.69"),
contact=cls.a4,
publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23),
)
cls.b4 = Book.objects.create(
isbn="013235613",
name="Python Web Development with Django",
pages=350,
rating=4.0,
price=Decimal("29.69"),
contact=cls.a5,
publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3),
)
cls.b5 = Book.objects.create(
isbn="013790395",
name="Artificial Intelligence: A Modern Approach",
pages=1132,
rating=4.0,
price=Decimal("82.80"),
contact=cls.a8,
publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15),
)
cls.b6 = Book.objects.create(
isbn="155860191",
name=(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp"
),
pages=946,
rating=5.0,
price=Decimal("75.00"),
contact=cls.a8,
publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15),
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name="Amazon.com",
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59),
)
s2 = Store.objects.create(
name="Books.com",
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59),
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30),
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_empty_aggregate(self):
self.assertEqual(Author.objects.aggregate(), {})
def test_aggregate_in_order_by(self):
msg = (
"Using an aggregate in order_by() without also including it in "
"annotate() is not allowed: Avg(F(book__rating)"
)
with self.assertRaisesMessage(FieldError, msg):
Author.objects.values("age").order_by(Avg("book__rating"))
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(
vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)}
)
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(vals, {"age__sum": 254})
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(vals, {"friends__age__avg": Approximate(34.07, places=2)})
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(vals, {"authors__age__avg": Approximate(38.2857, places=2)})
vals = Author.objects.filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(vals, {"book__rating__avg": 4.0})
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(vals, {"publisher__num_awards__sum": 30})
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(vals, {"book__price__sum": Decimal("270.27")})
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(vals, {"books__authors__age__max": 57})
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(vals, {"book__publisher__num_awards__min": 1})
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(
amazon_mean=Avg("books__rating")
)
self.assertEqual(vals, {"amazon_mean": Approximate(4.08, places=2)})
def test_aggregate_transform(self):
vals = Store.objects.aggregate(min_month=Min("original_opening__month"))
self.assertEqual(vals, {"min_month": 3})
def test_aggregate_join_transform(self):
vals = Publisher.objects.aggregate(min_year=Min("book__pubdate__year"))
self.assertEqual(vals, {"min_year": 1991})
def test_annotate_basic(self):
self.assertQuerySetEqual(
Book.objects.annotate().order_by("pk"),
[
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
],
lambda b: b.name,
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=self.b1.pk)
self.assertEqual(
b.name, "The Definitive Guide to Django: Web Development Done Right"
)
self.assertEqual(b.mean_age, 34.5)
def test_annotate_defer(self):
qs = (
Book.objects.annotate(page_sum=Sum("pages"))
.defer("name")
.filter(pk=self.b1.pk)
)
rows = [
(
self.b1.id,
"159059725",
447,
"The Definitive Guide to Django: Web Development Done Right",
)
]
self.assertQuerySetEqual(
qs.order_by("pk"), rows, lambda r: (r.id, r.isbn, r.page_sum, r.name)
)
def test_annotate_defer_select_related(self):
qs = (
Book.objects.select_related("contact")
.annotate(page_sum=Sum("pages"))
.defer("name")
.filter(pk=self.b1.pk)
)
rows = [
(
self.b1.id,
"159059725",
447,
"Adrian Holovaty",
"The Definitive Guide to Django: Web Development Done Right",
)
]
self.assertQuerySetEqual(
qs.order_by("pk"),
rows,
lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name),
)
def test_annotate_m2m(self):
books = (
Book.objects.filter(rating__lt=4.5)
.annotate(Avg("authors__age"))
.order_by("name")
)
self.assertQuerySetEqual(
books,
[
("Artificial Intelligence: A Modern Approach", 51.5),
("Practical Django Projects", 29.0),
("Python Web Development with Django", Approximate(30.3, places=1)),
("Sams Teach Yourself Django in 24 Hours", 45.0),
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerySetEqual(
books,
[
("Artificial Intelligence: A Modern Approach", 2),
(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
1,
),
("Practical Django Projects", 1),
("Python Web Development with Django", 3),
("Sams Teach Yourself Django in 24 Hours", 1),
("The Definitive Guide to Django: Web Development Done Right", 2),
],
lambda b: (b.name, b.num_authors),
)
def test_backwards_m2m_annotate(self):
authors = (
Author.objects.filter(name__contains="a")
.annotate(Avg("book__rating"))
.order_by("name")
)
self.assertQuerySetEqual(
authors,
[
("Adrian Holovaty", 4.5),
("Brad Dayley", 3.0),
("Jacob Kaplan-Moss", 4.5),
("James Bennett", 4.0),
("Paul Bissex", 4.0),
("Stuart Russell", 4.0),
],
lambda a: (a.name, a.book__rating__avg),
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerySetEqual(
authors,
[
("Adrian Holovaty", 1),
("Brad Dayley", 1),
("Jacob Kaplan-Moss", 1),
("James Bennett", 1),
("Jeffrey Forcier", 1),
("Paul Bissex", 1),
("Peter Norvig", 2),
("Stuart Russell", 1),
("Wesley J. Chun", 1),
],
lambda a: (a.name, a.num_books),
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerySetEqual(
books,
[
("Artificial Intelligence: A Modern Approach", 7),
(
"Paradigms of Artificial Intelligence Programming: Case Studies in "
"Common Lisp",
9,
),
("Practical Django Projects", 3),
("Python Web Development with Django", 7),
("Sams Teach Yourself Django in 24 Hours", 1),
("The Definitive Guide to Django: Web Development Done Right", 3),
],
lambda b: (b.name, b.publisher__num_awards__sum),
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerySetEqual(
publishers,
[
("Apress", Decimal("59.69")),
("Jonno's House of Books", None),
("Morgan Kaufmann", Decimal("75.00")),
("Prentice Hall", Decimal("112.49")),
("Sams", Decimal("23.09")),
],
lambda p: (p.name, p.book__price__sum),
)
def test_annotate_values(self):
books = list(
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values()
)
self.assertEqual(
books,
[
{
"contact_id": self.a1.id,
"id": self.b1.id,
"isbn": "159059725",
"mean_age": 34.5,
"name": (
"The Definitive Guide to Django: Web Development Done Right"
),
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": self.p1.id,
"rating": 4.5,
}
],
)
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values("pk", "isbn", "mean_age")
)
self.assertEqual(
list(books),
[
{
"pk": self.b1.pk,
"isbn": "159059725",
"mean_age": 34.5,
}
],
)
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values("name")
)
self.assertEqual(
list(books),
[{"name": "The Definitive Guide to Django: Web Development Done Right"}],
)
books = (
Book.objects.filter(pk=self.b1.pk)
.values()
.annotate(mean_age=Avg("authors__age"))
)
self.assertEqual(
list(books),
[
{
"contact_id": self.a1.id,
"id": self.b1.id,
"isbn": "159059725",
"mean_age": 34.5,
"name": (
"The Definitive Guide to Django: Web Development Done Right"
),
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": self.p1.id,
"rating": 4.5,
}
],
)
books = (
Book.objects.values("rating")
.annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age"))
.order_by("rating")
)
self.assertEqual(
list(books),
[
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1),
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
},
],
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertQuerySetEqual(
authors,
[
("Adrian Holovaty", 32.0),
("Brad Dayley", None),
("Jacob Kaplan-Moss", 29.5),
("James Bennett", 34.0),
("Jeffrey Forcier", 27.0),
("Paul Bissex", 31.0),
("Peter Norvig", 46.0),
("Stuart Russell", 57.0),
("Wesley J. Chun", Approximate(33.66, places=1)),
],
lambda a: (a.name, a.friends__age__avg),
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
def test_count_star(self):
with self.assertNumQueries(1) as ctx:
Book.objects.aggregate(n=Count("*"))
sql = ctx.captured_queries[0]["sql"]
self.assertIn("SELECT COUNT(*) ", sql)
def test_count_distinct_expression(self):
aggs = Book.objects.aggregate(
distinct_ratings=Count(
Case(When(pages__gt=300, then="rating")), distinct=True
),
)
self.assertEqual(aggs["distinct_ratings"], 4)
def test_distinct_on_aggregate(self):
for aggregate, expected_result in (
(Avg, 4.125),
(Count, 4),
(Sum, 16.5),
):
with self.subTest(aggregate=aggregate.__name__):
books = Book.objects.aggregate(
ratings=aggregate("rating", distinct=True)
)
self.assertEqual(books["ratings"], expected_result)
def test_non_grouped_annotation_not_in_group_by(self):
"""
An annotation not included in values() before an aggregate should be
excluded from the group by clause.
"""
qs = (
Book.objects.annotate(xprice=F("price"))
.filter(rating=4.0)
.values("rating")
.annotate(count=Count("publisher_id", distinct=True))
.values("count", "rating")
.order_by("count")
)
self.assertEqual(list(qs), [{"rating": 4.0, "count": 2}])
def test_grouped_annotation_in_group_by(self):
"""
An annotation included in values() before an aggregate should be
included in the group by clause.
"""
qs = (
Book.objects.annotate(xprice=F("price"))
.filter(rating=4.0)
.values("rating", "xprice")
.annotate(count=Count("publisher_id", distinct=True))
.values("count", "rating")
.order_by("count")
)
self.assertEqual(
list(qs),
[
{"rating": 4.0, "count": 1},
{"rating": 4.0, "count": 2},
],
)
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count("book__id")))
implicit = list(Author.objects.annotate(Count("book")))
self.assertCountEqual(explicit, implicit)
def test_annotate_ordering(self):
books = (
Book.objects.values("rating")
.annotate(oldest=Max("authors__age"))
.order_by("oldest", "rating")
)
self.assertEqual(
list(books),
[
{"rating": 4.5, "oldest": 35},
{"rating": 3.0, "oldest": 45},
{"rating": 4.0, "oldest": 57},
{"rating": 5.0, "oldest": 57},
],
)
books = (
Book.objects.values("rating")
.annotate(oldest=Max("authors__age"))
.order_by("-oldest", "-rating")
)
self.assertEqual(
list(books),
[
{"rating": 5.0, "oldest": 57},
{"rating": 4.0, "oldest": 57},
{"rating": 3.0, "oldest": 45},
{"rating": 4.5, "oldest": 35},
],
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(
Avg("num_authors")
)
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_avg_duration_field(self):
# Explicit `output_field`.
self.assertEqual(
Publisher.objects.aggregate(Avg("duration", output_field=DurationField())),
{"duration__avg": datetime.timedelta(days=1, hours=12)},
)
# Implicit `output_field`.
self.assertEqual(
Publisher.objects.aggregate(Avg("duration")),
{"duration__avg": datetime.timedelta(days=1, hours=12)},
)
def test_sum_duration_field(self):
self.assertEqual(
Publisher.objects.aggregate(Sum("duration", output_field=DurationField())),
{"duration__sum": datetime.timedelta(days=3)},
)
def test_sum_distinct_aggregate(self):
"""
Sum on a distinct() QuerySet should aggregate only the distinct items.
"""
authors = Author.objects.filter(book__in=[self.b5, self.b6])
self.assertEqual(authors.count(), 3)
distinct_authors = authors.distinct()
self.assertEqual(distinct_authors.count(), 2)
# Selected author ages are 57 and 46
age_sum = distinct_authors.aggregate(Sum("age"))
self.assertEqual(age_sum["age__sum"], 103)
def test_filtering(self):
p = Publisher.objects.create(name="Expensive Publisher", num_awards=0)
Book.objects.create(
name="ExpensiveBook1",
pages=1,
isbn="111",
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 1),
)
Book.objects.create(
name="ExpensiveBook2",
pages=1,
isbn="222",
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 2),
)
Book.objects.create(
name="ExpensiveBook3",
pages=1,
isbn="333",
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=self.a1.id,
pubdate=datetime.date(2008, 12, 3),
)
publishers = (
Publisher.objects.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerySetEqual(
publishers,
["Apress", "Prentice Hall", "Expensive Publisher"],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by(
"pk"
)
self.assertQuerySetEqual(
publishers,
[
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = (
Publisher.objects.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1, book__price__lt=Decimal("40.0"))
.order_by("pk")
)
self.assertQuerySetEqual(
publishers,
["Apress", "Prentice Hall", "Expensive Publisher"],
lambda p: p.name,
)
publishers = (
Publisher.objects.filter(book__price__lt=Decimal("40.0"))
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerySetEqual(publishers, ["Apress"], lambda p: p.name)
publishers = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_books__range=[1, 3])
.order_by("pk")
)
self.assertQuerySetEqual(
publishers,
[
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_books__range=[1, 2])
.order_by("pk")
)
self.assertQuerySetEqual(
publishers,
["Apress", "Sams", "Prentice Hall", "Morgan Kaufmann"],
lambda p: p.name,
)
publishers = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_books__in=[1, 3])
.order_by("pk")
)
self.assertQuerySetEqual(
publishers,
["Sams", "Morgan Kaufmann", "Expensive Publisher"],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(
num_books__isnull=True
)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=self.a1.pk).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = (
Book.objects.annotate(num_authors=Count("authors__name"))
.filter(num_authors__exact=2)
.order_by("pk")
)
self.assertQuerySetEqual(
books,
[
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name,
)
authors = (
Author.objects.annotate(num_friends=Count("friends__id", distinct=True))
.filter(num_friends=0)
.order_by("pk")
)
self.assertQuerySetEqual(authors, ["Brad Dayley"], lambda a: a.name)
publishers = (
Publisher.objects.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerySetEqual(
publishers, ["Apress", "Prentice Hall"], lambda p: p.name
)
publishers = (
Publisher.objects.filter(book__price__lt=Decimal("40.0"))
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
)
self.assertQuerySetEqual(publishers, ["Apress"], lambda p: p.name)
books = Book.objects.annotate(num_authors=Count("authors__id")).filter(
authors__name__contains="Norvig", num_authors__gt=1
)
self.assertQuerySetEqual(
books, ["Artificial Intelligence: A Modern Approach"], lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains="Norvig")
b = Book.objects.get(name__contains="Done Right")
b.authors.add(a)
b.save()
vals = (
Book.objects.annotate(num_authors=Count("authors__id"))
.filter(authors__name__contains="Norvig", num_authors__gt=1)
.aggregate(Avg("rating"))
)
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = (
Publisher.objects.annotate(
earliest_book=Min("book__pubdate"),
)
.exclude(earliest_book=None)
.order_by("earliest_book")
.values(
"earliest_book",
"num_awards",
"id",
"name",
)
)
self.assertEqual(
list(publishers),
[
{
"earliest_book": datetime.date(1991, 10, 15),
"num_awards": 9,
"id": self.p4.id,
"name": "Morgan Kaufmann",
},
{
"earliest_book": datetime.date(1995, 1, 15),
"num_awards": 7,
"id": self.p3.id,
"name": "Prentice Hall",
},
{
"earliest_book": datetime.date(2007, 12, 6),
"num_awards": 3,
"id": self.p1.id,
"name": "Apress",
},
{
"earliest_book": datetime.date(2008, 3, 3),
"num_awards": 1,
"id": self.p2.id,
"name": "Sams",
},
],
)
vals = Store.objects.aggregate(
Max("friday_night_closing"), Min("original_opening")
)
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
)
def test_annotate_values_list(self):
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("pk", "isbn", "mean_age")
)
self.assertEqual(list(books), [(self.b1.id, "159059725", 34.5)])
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("isbn")
)
self.assertEqual(list(books), [("159059725",)])
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("mean_age")
)
self.assertEqual(list(books), [(34.5,)])
books = (
Book.objects.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("mean_age", flat=True)
)
self.assertEqual(list(books), [34.5])
books = (
Book.objects.values_list("price")
.annotate(count=Count("price"))
.order_by("-count", "price")
)
self.assertEqual(
list(books),
[
(Decimal("29.69"), 2),
(Decimal("23.09"), 1),
(Decimal("30"), 1),
(Decimal("75"), 1),
(Decimal("82.8"), 1),
],
)
def test_dates_with_aggregation(self):
"""
.dates() returns a distinct set of dates when applied to a
QuerySet with aggregation.
Refs #18056. Previously, .dates() would return distinct (date_kind,
aggregation) sets, in this case (year, num_authors), so 2008 would be
returned twice because there are books from 2008 with a different
number of authors.
"""
dates = Book.objects.annotate(num_authors=Count("authors")).dates(
"pubdate", "year"
)
self.assertSequenceEqual(
dates,
[
datetime.date(1991, 1, 1),
datetime.date(1995, 1, 1),
datetime.date(2007, 1, 1),
datetime.date(2008, 1, 1),
],
)
def test_values_aggregation(self):
# Refs #20782
max_rating = Book.objects.values("rating").aggregate(max_rating=Max("rating"))
self.assertEqual(max_rating["max_rating"], 5)
max_books_per_rating = (
Book.objects.values("rating")
.annotate(books_per_rating=Count("id"))
.aggregate(Max("books_per_rating"))
)
self.assertEqual(max_books_per_rating, {"books_per_rating__max": 3})
def test_ticket17424(self):
"""
Doing exclude() on a foreign model after annotate() doesn't crash.
"""
all_books = list(Book.objects.values_list("pk", flat=True).order_by("pk"))
annotated_books = Book.objects.order_by("pk").annotate(one=Count("id"))
# The value doesn't matter, we just need any negative
# constraint on a related model that's a noop.
excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__")
# Try to generate query tree
str(excluded_books.query)
self.assertQuerySetEqual(excluded_books, all_books, lambda x: x.pk)
# Check internal state
self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type)
self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type)
def test_ticket12886(self):
"""
Aggregation over sliced queryset works correctly.
"""
qs = Book.objects.order_by("-rating")[0:3]
vals = qs.aggregate(average_top3_rating=Avg("rating"))["average_top3_rating"]
self.assertAlmostEqual(vals, 4.5, places=2)
def test_ticket11881(self):
"""
Subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE or
select_related() stuff.
"""
qs = (
Book.objects.select_for_update()
.order_by("pk")
.select_related("publisher")
.annotate(max_pk=Max("pk"))
)
with CaptureQueriesContext(connection) as captured_queries:
qs.aggregate(avg_pk=Avg("max_pk"))
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]["sql"].lower()
self.assertNotIn("for update", qstr)
forced_ordering = connection.ops.force_no_ordering()
if forced_ordering:
# If the backend needs to force an ordering we make sure it's
# the only "ORDER BY" clause present in the query.
self.assertEqual(
re.findall(r"order by (\w+)", qstr),
[", ".join(f[1][0] for f in forced_ordering).lower()],
)
else:
self.assertNotIn("order by", qstr)
self.assertEqual(qstr.count(" join "), 0)
def test_decimal_max_digits_has_no_effect(self):
Book.objects.all().delete()
a1 = Author.objects.first()
p1 = Publisher.objects.first()
thedate = timezone.now()
for i in range(10):
Book.objects.create(
isbn="abcde{}".format(i),
name="none",
pages=10,
rating=4.0,
price=9999.98,
contact=a1,
publisher=p1,
pubdate=thedate,
)
book = Book.objects.aggregate(price_sum=Sum("price"))
self.assertEqual(book["price_sum"], Decimal("99999.80"))
def test_nonaggregate_aggregation_throws(self):
with self.assertRaisesMessage(TypeError, "fail is not an aggregate expression"):
Book.objects.aggregate(fail=F("price"))
def test_nonfield_annotation(self):
book = Book.objects.annotate(val=Max(Value(2))).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(
val=Max(Value(2), output_field=IntegerField())
).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(val=Max(2, output_field=IntegerField())).first()
self.assertEqual(book.val, 2)
def test_annotation_expressions(self):
authors = Author.objects.annotate(
combined_ages=Sum(F("age") + F("friends__age"))
).order_by("name")
authors2 = Author.objects.annotate(
combined_ages=Sum("age") + Sum("friends__age")
).order_by("name")
for qs in (authors, authors2):
self.assertQuerySetEqual(
qs,
[
("Adrian Holovaty", 132),
("Brad Dayley", None),
("Jacob Kaplan-Moss", 129),
("James Bennett", 63),
("Jeffrey Forcier", 128),
("Paul Bissex", 120),
("Peter Norvig", 103),
("Stuart Russell", 103),
("Wesley J. Chun", 176),
],
lambda a: (a.name, a.combined_ages),
)
def test_aggregation_expressions(self):
a1 = Author.objects.aggregate(av_age=Sum("age") / Count("*"))
a2 = Author.objects.aggregate(av_age=Sum("age") / Count("age"))
a3 = Author.objects.aggregate(av_age=Avg("age"))
self.assertEqual(a1, {"av_age": 37})
self.assertEqual(a2, {"av_age": 37})
self.assertEqual(a3, {"av_age": Approximate(37.4, places=1)})
def test_avg_decimal_field(self):
v = Book.objects.filter(rating=4).aggregate(avg_price=(Avg("price")))[
"avg_price"
]
self.assertIsInstance(v, Decimal)
self.assertEqual(v, Approximate(Decimal("47.39"), places=2))
def test_order_of_precedence(self):
p1 = Book.objects.filter(rating=4).aggregate(avg_price=(Avg("price") + 2) * 3)
self.assertEqual(p1, {"avg_price": Approximate(Decimal("148.18"), places=2)})
p2 = Book.objects.filter(rating=4).aggregate(avg_price=Avg("price") + 2 * 3)
self.assertEqual(p2, {"avg_price": Approximate(Decimal("53.39"), places=2)})
def test_combine_different_types(self):
msg = (
"Cannot infer type of '+' expression involving these types: FloatField, "
"DecimalField. You must set output_field."
)
qs = Book.objects.annotate(sums=Sum("rating") + Sum("pages") + Sum("price"))
with self.assertRaisesMessage(FieldError, msg):
qs.first()
with self.assertRaisesMessage(FieldError, msg):
qs.first()
b1 = Book.objects.annotate(
sums=Sum(F("rating") + F("pages") + F("price"), output_field=IntegerField())
).get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
b2 = Book.objects.annotate(
sums=Sum(F("rating") + F("pages") + F("price"), output_field=FloatField())
).get(pk=self.b4.pk)
self.assertEqual(b2.sums, 383.69)
b3 = Book.objects.annotate(
sums=Sum(F("rating") + F("pages") + F("price"), output_field=DecimalField())
).get(pk=self.b4.pk)
self.assertEqual(b3.sums, Approximate(Decimal("383.69"), places=2))
def test_complex_aggregations_require_kwarg(self):
with self.assertRaisesMessage(
TypeError, "Complex annotations require an alias"
):
Author.objects.annotate(Sum(F("age") + F("friends__age")))
with self.assertRaisesMessage(TypeError, "Complex aggregates require an alias"):
Author.objects.aggregate(Sum("age") / Count("age"))
with self.assertRaisesMessage(TypeError, "Complex aggregates require an alias"):
Author.objects.aggregate(Sum(1))
def test_aggregate_over_complex_annotation(self):
qs = Author.objects.annotate(combined_ages=Sum(F("age") + F("friends__age")))
age = qs.aggregate(max_combined_age=Max("combined_ages"))
self.assertEqual(age["max_combined_age"], 176)
age = qs.aggregate(max_combined_age_doubled=Max("combined_ages") * 2)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max("combined_ages") + Max("combined_ages")
)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max("combined_ages") + Max("combined_ages"),
sum_combined_age=Sum("combined_ages"),
)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
self.assertEqual(age["sum_combined_age"], 954)
age = qs.aggregate(
max_combined_age_doubled=Max("combined_ages") + Max("combined_ages"),
sum_combined_age_doubled=Sum("combined_ages") + Sum("combined_ages"),
)
self.assertEqual(age["max_combined_age_doubled"], 176 * 2)
self.assertEqual(age["sum_combined_age_doubled"], 954 * 2)
def test_values_annotation_with_expression(self):
# ensure the F() is promoted to the group by clause
qs = Author.objects.values("name").annotate(another_age=Sum("age") + F("age"))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a["another_age"], 68)
qs = qs.annotate(friend_count=Count("friends"))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a["friend_count"], 2)
qs = (
qs.annotate(combined_age=Sum("age") + F("friends__age"))
.filter(name="Adrian Holovaty")
.order_by("-combined_age")
)
self.assertEqual(
list(qs),
[
{
"name": "Adrian Holovaty",
"another_age": 68,
"friend_count": 1,
"combined_age": 69,
},
{
"name": "Adrian Holovaty",
"another_age": 68,
"friend_count": 1,
"combined_age": 63,
},
],
)
vals = qs.values("name", "combined_age")
self.assertEqual(
list(vals),
[
{"name": "Adrian Holovaty", "combined_age": 69},
{"name": "Adrian Holovaty", "combined_age": 63},
],
)
def test_annotate_values_aggregate(self):
alias_age = (
Author.objects.annotate(age_alias=F("age"))
.values(
"age_alias",
)
.aggregate(sum_age=Sum("age_alias"))
)
age = Author.objects.values("age").aggregate(sum_age=Sum("age"))
self.assertEqual(alias_age["sum_age"], age["sum_age"])
def test_annotate_over_annotate(self):
author = (
Author.objects.annotate(age_alias=F("age"))
.annotate(sum_age=Sum("age_alias"))
.get(name="Adrian Holovaty")
)
other_author = Author.objects.annotate(sum_age=Sum("age")).get(
name="Adrian Holovaty"
)
self.assertEqual(author.sum_age, other_author.sum_age)
def test_aggregate_over_aggregate(self):
msg = "Cannot resolve keyword 'age_agg' into field."
with self.assertRaisesMessage(FieldError, msg):
Author.objects.aggregate(
age_agg=Sum(F("age")),
avg_age=Avg(F("age_agg")),
)
def test_annotated_aggregate_over_annotated_aggregate(self):
with self.assertRaisesMessage(
FieldError, "Cannot compute Sum('id__max'): 'id__max' is an aggregate"
):
Book.objects.annotate(Max("id")).annotate(Sum("id__max"))
class MyMax(Max):
def as_sql(self, compiler, connection):
self.set_source_expressions(self.get_source_expressions()[0:1])
return super().as_sql(compiler, connection)
with self.assertRaisesMessage(
FieldError, "Cannot compute Max('id__max'): 'id__max' is an aggregate"
):
Book.objects.annotate(Max("id")).annotate(my_max=MyMax("id__max", "price"))
def test_multi_arg_aggregate(self):
class MyMax(Max):
output_field = DecimalField()
def as_sql(self, compiler, connection):
copy = self.copy()
copy.set_source_expressions(copy.get_source_expressions()[0:1])
return super(MyMax, copy).as_sql(compiler, connection)
with self.assertRaisesMessage(TypeError, "Complex aggregates require an alias"):
Book.objects.aggregate(MyMax("pages", "price"))
with self.assertRaisesMessage(
TypeError, "Complex annotations require an alias"
):
Book.objects.annotate(MyMax("pages", "price"))
Book.objects.aggregate(max_field=MyMax("pages", "price"))
def test_add_implementation(self):
class MySum(Sum):
pass
# test completely changing how the output is rendered
def lower_case_function_override(self, compiler, connection):
sql, params = compiler.compile(self.source_expressions[0])
substitutions = {
"function": self.function.lower(),
"expressions": sql,
"distinct": "",
}
substitutions.update(self.extra)
return self.template % substitutions, params
setattr(MySum, "as_" + connection.vendor, lower_case_function_override)
qs = Book.objects.annotate(
sums=MySum(
F("rating") + F("pages") + F("price"), output_field=IntegerField()
)
)
self.assertEqual(str(qs.query).count("sum("), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test changing the dict and delegating
def lower_case_function_super(self, compiler, connection):
self.extra["function"] = self.function.lower()
return super(MySum, self).as_sql(compiler, connection)
setattr(MySum, "as_" + connection.vendor, lower_case_function_super)
qs = Book.objects.annotate(
sums=MySum(
F("rating") + F("pages") + F("price"), output_field=IntegerField()
)
)
self.assertEqual(str(qs.query).count("sum("), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test overriding all parts of the template
def be_evil(self, compiler, connection):
substitutions = {"function": "MAX", "expressions": "2", "distinct": ""}
substitutions.update(self.extra)
return self.template % substitutions, ()
setattr(MySum, "as_" + connection.vendor, be_evil)
qs = Book.objects.annotate(
sums=MySum(
F("rating") + F("pages") + F("price"), output_field=IntegerField()
)
)
self.assertEqual(str(qs.query).count("MAX("), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 2)
def test_complex_values_aggregation(self):
max_rating = Book.objects.values("rating").aggregate(
double_max_rating=Max("rating") + Max("rating")
)
self.assertEqual(max_rating["double_max_rating"], 5 * 2)
max_books_per_rating = (
Book.objects.values("rating")
.annotate(books_per_rating=Count("id") + 5)
.aggregate(Max("books_per_rating"))
)
self.assertEqual(max_books_per_rating, {"books_per_rating__max": 3 + 5})
def test_expression_on_aggregation(self):
qs = (
Publisher.objects.annotate(
price_or_median=Greatest(
Avg("book__rating", output_field=DecimalField()), Avg("book__price")
)
)
.filter(price_or_median__gte=F("num_awards"))
.order_by("num_awards")
)
self.assertQuerySetEqual(qs, [1, 3, 7, 9], lambda v: v.num_awards)
qs2 = (
Publisher.objects.annotate(
rating_or_num_awards=Greatest(
Avg("book__rating"), F("num_awards"), output_field=FloatField()
)
)
.filter(rating_or_num_awards__gt=F("num_awards"))
.order_by("num_awards")
)
self.assertQuerySetEqual(qs2, [1, 3], lambda v: v.num_awards)
def test_arguments_must_be_expressions(self):
msg = "QuerySet.aggregate() received non-expression(s): %s."
with self.assertRaisesMessage(TypeError, msg % FloatField()):
Book.objects.aggregate(FloatField())
with self.assertRaisesMessage(TypeError, msg % True):
Book.objects.aggregate(is_book=True)
with self.assertRaisesMessage(
TypeError, msg % ", ".join([str(FloatField()), "True"])
):
Book.objects.aggregate(FloatField(), Avg("price"), is_book=True)
def test_aggregation_subquery_annotation(self):
"""Subquery annotations are excluded from the GROUP BY if they are
not explicitly grouped against."""
latest_book_pubdate_qs = (
Book.objects.filter(publisher=OuterRef("pk"))
.order_by("-pubdate")
.values("pubdate")[:1]
)
publisher_qs = Publisher.objects.annotate(
latest_book_pubdate=Subquery(latest_book_pubdate_qs),
).annotate(count=Count("book"))
with self.assertNumQueries(1) as ctx:
list(publisher_qs)
self.assertEqual(ctx[0]["sql"].count("SELECT"), 2)
# The GROUP BY should not be by alias either.
self.assertEqual(ctx[0]["sql"].lower().count("latest_book_pubdate"), 1)
def test_aggregation_subquery_annotation_exists(self):
latest_book_pubdate_qs = (
Book.objects.filter(publisher=OuterRef("pk"))
.order_by("-pubdate")
.values("pubdate")[:1]
)
publisher_qs = Publisher.objects.annotate(
latest_book_pubdate=Subquery(latest_book_pubdate_qs),
count=Count("book"),
)
self.assertTrue(publisher_qs.exists())
def test_aggregation_filter_exists(self):
publishers_having_more_than_one_book_qs = (
Book.objects.values("publisher")
.annotate(cnt=Count("isbn"))
.filter(cnt__gt=1)
)
query = publishers_having_more_than_one_book_qs.query.exists()
_, _, group_by = query.get_compiler(connection=connection).pre_sql_setup()
self.assertEqual(len(group_by), 1)
def test_aggregation_exists_annotation(self):
published_books = Book.objects.filter(publisher=OuterRef("pk"))
publisher_qs = Publisher.objects.annotate(
published_book=Exists(published_books),
count=Count("book"),
).values_list("name", flat=True)
self.assertCountEqual(
list(publisher_qs),
[
"Apress",
"Morgan Kaufmann",
"Jonno's House of Books",
"Prentice Hall",
"Sams",
],
)
def test_aggregation_subquery_annotation_values(self):
"""
Subquery annotations and external aliases are excluded from the GROUP
BY if they are not selected.
"""
books_qs = (
Book.objects.annotate(
first_author_the_same_age=Subquery(
Author.objects.filter(
age=OuterRef("contact__friends__age"),
)
.order_by("age")
.values("id")[:1],
)
)
.filter(
publisher=self.p1,
first_author_the_same_age__isnull=False,
)
.annotate(
min_age=Min("contact__friends__age"),
)
.values("name", "min_age")
.order_by("name")
)
self.assertEqual(
list(books_qs),
[
{"name": "Practical Django Projects", "min_age": 34},
{
"name": (
"The Definitive Guide to Django: Web Development Done Right"
),
"min_age": 29,
},
],
)
def test_aggregation_subquery_annotation_values_collision(self):
books_rating_qs = Book.objects.filter(
publisher=OuterRef("pk"),
price=Decimal("29.69"),
).values("rating")
publisher_qs = (
Publisher.objects.filter(
book__contact__age__gt=20,
name=self.p1.name,
)
.annotate(
rating=Subquery(books_rating_qs),
contacts_count=Count("book__contact"),
)
.values("rating")
.annotate(total_count=Count("rating"))
)
self.assertEqual(
list(publisher_qs),
[
{"rating": 4.0, "total_count": 2},
],
)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_aggregation_subquery_annotation_multivalued(self):
"""
Subquery annotations must be included in the GROUP BY if they use
potentially multivalued relations (contain the LOOKUP_SEP).
"""
subquery_qs = Author.objects.filter(
pk=OuterRef("pk"),
book__name=OuterRef("book__name"),
).values("pk")
author_qs = Author.objects.annotate(
subquery_id=Subquery(subquery_qs),
).annotate(count=Count("book"))
self.assertEqual(author_qs.count(), Author.objects.count())
def test_aggregation_order_by_not_selected_annotation_values(self):
result_asc = [
self.b4.pk,
self.b3.pk,
self.b1.pk,
self.b2.pk,
self.b5.pk,
self.b6.pk,
]
result_desc = result_asc[::-1]
tests = [
("min_related_age", result_asc),
("-min_related_age", result_desc),
(F("min_related_age"), result_asc),
(F("min_related_age").asc(), result_asc),
(F("min_related_age").desc(), result_desc),
]
for ordering, expected_result in tests:
with self.subTest(ordering=ordering):
books_qs = (
Book.objects.annotate(
min_age=Min("authors__age"),
)
.annotate(
min_related_age=Coalesce("min_age", "contact__age"),
)
.order_by(ordering)
.values_list("pk", flat=True)
)
self.assertEqual(list(books_qs), expected_result)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_group_by_subquery_annotation(self):
"""
Subquery annotations are included in the GROUP BY if they are
grouped against.
"""
long_books_count_qs = (
Book.objects.filter(
publisher=OuterRef("pk"),
pages__gt=400,
)
.values("publisher")
.annotate(count=Count("pk"))
.values("count")
)
groups = [
Subquery(long_books_count_qs),
long_books_count_qs,
long_books_count_qs.query,
]
for group in groups:
with self.subTest(group=group.__class__.__name__):
long_books_count_breakdown = Publisher.objects.values_list(
group,
).annotate(total=Count("*"))
self.assertEqual(dict(long_books_count_breakdown), {None: 1, 1: 4})
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_group_by_exists_annotation(self):
"""
Exists annotations are included in the GROUP BY if they are
grouped against.
"""
long_books_qs = Book.objects.filter(
publisher=OuterRef("pk"),
pages__gt=800,
)
has_long_books_breakdown = Publisher.objects.values_list(
Exists(long_books_qs),
).annotate(total=Count("*"))
self.assertEqual(dict(has_long_books_breakdown), {True: 2, False: 3})
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_aggregation_subquery_annotation_related_field(self):
publisher = Publisher.objects.create(name=self.a9.name, num_awards=2)
book = Book.objects.create(
isbn="159059999",
name="Test book.",
pages=819,
rating=2.5,
price=Decimal("14.44"),
contact=self.a9,
publisher=publisher,
pubdate=datetime.date(2019, 12, 6),
)
book.authors.add(self.a5, self.a6, self.a7)
books_qs = (
Book.objects.annotate(
contact_publisher=Subquery(
Publisher.objects.filter(
pk=OuterRef("publisher"),
name=OuterRef("contact__name"),
).values("name")[:1],
)
)
.filter(
contact_publisher__isnull=False,
)
.annotate(count=Count("authors"))
)
with self.assertNumQueries(1) as ctx:
self.assertSequenceEqual(books_qs, [book])
# Outerquery SELECT, annotation SELECT, and WHERE SELECT but GROUP BY
# selected alias, if allowed.
if connection.features.allows_group_by_refs:
self.assertEqual(ctx[0]["sql"].count("SELECT"), 3)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_aggregation_nested_subquery_outerref(self):
publisher_with_same_name = Publisher.objects.filter(
id__in=Subquery(
Publisher.objects.filter(
name=OuterRef(OuterRef("publisher__name")),
).values("id"),
),
).values(publisher_count=Count("id"))[:1]
books_breakdown = Book.objects.annotate(
publisher_count=Subquery(publisher_with_same_name),
authors_count=Count("authors"),
).values_list("publisher_count", flat=True)
self.assertSequenceEqual(books_breakdown, [1] * 6)
def test_aggregation_exists_multivalued_outeref(self):
self.assertCountEqual(
Publisher.objects.annotate(
books_exists=Exists(
Book.objects.filter(publisher=OuterRef("book__publisher"))
),
books_count=Count("book"),
),
Publisher.objects.all(),
)
def test_filter_in_subquery_or_aggregation(self):
"""
Filtering against an aggregate requires the usage of the HAVING clause.
If such a filter is unionized to a non-aggregate one the latter will
also need to be moved to the HAVING clause and have its grouping
columns used in the GROUP BY.
When this is done with a subquery the specialized logic in charge of
using outer reference columns to group should be used instead of the
subquery itself as the latter might return multiple rows.
"""
authors = Author.objects.annotate(
Count("book"),
).filter(Q(book__count__gt=0) | Q(pk__in=Book.objects.values("authors")))
self.assertCountEqual(authors, Author.objects.all())
def test_aggregation_random_ordering(self):
"""Random() is not included in the GROUP BY when used for ordering."""
authors = Author.objects.annotate(contact_count=Count("book")).order_by("?")
self.assertQuerySetEqual(
authors,
[
("Adrian Holovaty", 1),
("Jacob Kaplan-Moss", 1),
("Brad Dayley", 1),
("James Bennett", 1),
("Jeffrey Forcier", 1),
("Paul Bissex", 1),
("Wesley J. Chun", 1),
("Stuart Russell", 1),
("Peter Norvig", 2),
],
lambda a: (a.name, a.contact_count),
ordered=False,
)
def test_empty_result_optimization(self):
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Sum("num_awards"),
books_count=Count("book"),
),
{
"sum_awards": None,
"books_count": 0,
},
)
# Expression without empty_result_set_value forces queries to be
# executed even if they would return an empty result set.
raw_books_count = Func("book", function="COUNT")
raw_books_count.contains_aggregate = True
with self.assertNumQueries(1):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Sum("num_awards"),
books_count=raw_books_count,
),
{
"sum_awards": None,
"books_count": 0,
},
)
def test_coalesced_empty_result_set(self):
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Coalesce(Sum("num_awards"), 0),
)["sum_awards"],
0,
)
# Multiple expressions.
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Coalesce(Sum("num_awards"), None, 0),
)["sum_awards"],
0,
)
# Nested coalesce.
with self.assertNumQueries(0):
self.assertEqual(
Publisher.objects.none().aggregate(
sum_awards=Coalesce(Coalesce(Sum("num_awards"), None), 0),
)["sum_awards"],
0,
)
# Expression coalesce.
with self.assertNumQueries(1):
self.assertIsInstance(
Store.objects.none().aggregate(
latest_opening=Coalesce(
Max("original_opening"),
RawSQL("CURRENT_TIMESTAMP", []),
),
)["latest_opening"],
datetime.datetime,
)
def test_aggregation_default_unsupported_by_count(self):
msg = "Count does not allow default."
with self.assertRaisesMessage(TypeError, msg):
Count("age", default=0)
def test_aggregation_default_unset(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age"),
)
self.assertIsNone(result["value"])
def test_aggregation_default_zero(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age", default=0),
)
self.assertEqual(result["value"], 0)
def test_aggregation_default_integer(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age", default=21),
)
self.assertEqual(result["value"], 21)
def test_aggregation_default_expression(self):
for Aggregate in [Avg, Max, Min, StdDev, Sum, Variance]:
with self.subTest(Aggregate):
result = Author.objects.filter(age__gt=100).aggregate(
value=Aggregate("age", default=Value(5) * Value(7)),
)
self.assertEqual(result["value"], 35)
def test_aggregation_default_group_by(self):
qs = (
Publisher.objects.values("name")
.annotate(
books=Count("book"),
pages=Sum("book__pages", default=0),
)
.filter(books=0)
)
self.assertSequenceEqual(
qs,
[{"name": "Jonno's House of Books", "books": 0, "pages": 0}],
)
def test_aggregation_default_compound_expression(self):
# Scale rating to a percentage; default to 50% if no books published.
formula = Avg("book__rating", default=2.5) * 20.0
queryset = Publisher.objects.annotate(rating=formula).order_by("name")
self.assertSequenceEqual(
queryset.values("name", "rating"),
[
{"name": "Apress", "rating": 85.0},
{"name": "Jonno's House of Books", "rating": 50.0},
{"name": "Morgan Kaufmann", "rating": 100.0},
{"name": "Prentice Hall", "rating": 80.0},
{"name": "Sams", "rating": 60.0},
],
)
def test_aggregation_default_using_time_from_python(self):
expr = Min(
"store__friday_night_closing",
filter=~Q(store__name="Amazon.com"),
default=datetime.time(17),
)
if connection.vendor == "mysql":
# Workaround for #30224 for MySQL & MariaDB.
expr.default = Cast(expr.default, TimeField())
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{"isbn": "013235613", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "013790395",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "067232959", "oldest_store_opening": datetime.time(17)},
{"isbn": "155860191", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "159059725",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "159059996", "oldest_store_opening": datetime.time(21, 30)},
],
)
def test_aggregation_default_using_time_from_database(self):
now = timezone.now().astimezone(datetime.timezone.utc)
expr = Min(
"store__friday_night_closing",
filter=~Q(store__name="Amazon.com"),
default=TruncHour(NowUTC(), output_field=TimeField()),
)
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{"isbn": "013235613", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "013790395",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "067232959", "oldest_store_opening": datetime.time(now.hour)},
{"isbn": "155860191", "oldest_store_opening": datetime.time(21, 30)},
{
"isbn": "159059725",
"oldest_store_opening": datetime.time(23, 59, 59),
},
{"isbn": "159059996", "oldest_store_opening": datetime.time(21, 30)},
],
)
def test_aggregation_default_using_date_from_python(self):
expr = Min("book__pubdate", default=datetime.date(1970, 1, 1))
if connection.vendor == "mysql":
# Workaround for #30224 for MySQL & MariaDB.
expr.default = Cast(expr.default, DateField())
queryset = Publisher.objects.annotate(earliest_pubdate=expr).order_by("name")
self.assertSequenceEqual(
queryset.values("name", "earliest_pubdate"),
[
{"name": "Apress", "earliest_pubdate": datetime.date(2007, 12, 6)},
{
"name": "Jonno's House of Books",
"earliest_pubdate": datetime.date(1970, 1, 1),
},
{
"name": "Morgan Kaufmann",
"earliest_pubdate": datetime.date(1991, 10, 15),
},
{
"name": "Prentice Hall",
"earliest_pubdate": datetime.date(1995, 1, 15),
},
{"name": "Sams", "earliest_pubdate": datetime.date(2008, 3, 3)},
],
)
def test_aggregation_default_using_date_from_database(self):
now = timezone.now().astimezone(datetime.timezone.utc)
expr = Min("book__pubdate", default=TruncDate(NowUTC()))
queryset = Publisher.objects.annotate(earliest_pubdate=expr).order_by("name")
self.assertSequenceEqual(
queryset.values("name", "earliest_pubdate"),
[
{"name": "Apress", "earliest_pubdate": datetime.date(2007, 12, 6)},
{"name": "Jonno's House of Books", "earliest_pubdate": now.date()},
{
"name": "Morgan Kaufmann",
"earliest_pubdate": datetime.date(1991, 10, 15),
},
{
"name": "Prentice Hall",
"earliest_pubdate": datetime.date(1995, 1, 15),
},
{"name": "Sams", "earliest_pubdate": datetime.date(2008, 3, 3)},
],
)
def test_aggregation_default_using_datetime_from_python(self):
expr = Min(
"store__original_opening",
filter=~Q(store__name="Amazon.com"),
default=datetime.datetime(1970, 1, 1),
)
if connection.vendor == "mysql":
# Workaround for #30224 for MySQL & MariaDB.
expr.default = Cast(expr.default, DateTimeField())
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{
"isbn": "013235613",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "013790395",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "067232959",
"oldest_store_opening": datetime.datetime(1970, 1, 1),
},
{
"isbn": "155860191",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "159059725",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "159059996",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
],
)
def test_aggregation_default_using_datetime_from_database(self):
now = timezone.now().astimezone(datetime.timezone.utc)
expr = Min(
"store__original_opening",
filter=~Q(store__name="Amazon.com"),
default=TruncHour(NowUTC(), output_field=DateTimeField()),
)
queryset = Book.objects.annotate(oldest_store_opening=expr).order_by("isbn")
self.assertSequenceEqual(
queryset.values("isbn", "oldest_store_opening"),
[
{
"isbn": "013235613",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "013790395",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "067232959",
"oldest_store_opening": now.replace(
minute=0, second=0, microsecond=0, tzinfo=None
),
},
{
"isbn": "155860191",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
{
"isbn": "159059725",
"oldest_store_opening": datetime.datetime(2001, 3, 15, 11, 23, 37),
},
{
"isbn": "159059996",
"oldest_store_opening": datetime.datetime(1945, 4, 25, 16, 24, 14),
},
],
)
def test_aggregation_default_using_duration_from_python(self):
result = Publisher.objects.filter(num_awards__gt=3).aggregate(
value=Sum("duration", default=datetime.timedelta(0)),
)
self.assertEqual(result["value"], datetime.timedelta(0))
def test_aggregation_default_using_duration_from_database(self):
result = Publisher.objects.filter(num_awards__gt=3).aggregate(
value=Sum("duration", default=Now() - Now()),
)
self.assertEqual(result["value"], datetime.timedelta(0))
def test_aggregation_default_using_decimal_from_python(self):
result = Book.objects.filter(rating__lt=3.0).aggregate(
value=Sum("price", default=Decimal("0.00")),
)
self.assertEqual(result["value"], Decimal("0.00"))
def test_aggregation_default_using_decimal_from_database(self):
result = Book.objects.filter(rating__lt=3.0).aggregate(
value=Sum("price", default=Pi()),
)
self.assertAlmostEqual(result["value"], Decimal.from_float(math.pi), places=6)
def test_aggregation_default_passed_another_aggregate(self):
result = Book.objects.aggregate(
value=Sum("price", filter=Q(rating__lt=3.0), default=Avg("pages") / 10.0),
)
self.assertAlmostEqual(result["value"], Decimal("61.72"), places=2)
def test_aggregation_default_after_annotation(self):
result = Publisher.objects.annotate(
double_num_awards=F("num_awards") * 2,
).aggregate(value=Sum("double_num_awards", default=0))
self.assertEqual(result["value"], 40)
def test_aggregation_default_not_in_aggregate(self):
result = Publisher.objects.annotate(
avg_rating=Avg("book__rating", default=2.5),
).aggregate(Sum("num_awards"))
self.assertEqual(result["num_awards__sum"], 20)
def test_exists_none_with_aggregate(self):
qs = Book.objects.annotate(
count=Count("id"),
exists=Exists(Author.objects.none()),
)
self.assertEqual(len(qs), 6)
def test_alias_sql_injection(self):
crafted_alias = """injected_name" from "aggregation_author"; --"""
msg = (
"Column aliases cannot contain whitespace characters, quotation marks, "
"semicolons, or SQL comments."
)
with self.assertRaisesMessage(ValueError, msg):
Author.objects.aggregate(**{crafted_alias: Avg("age")})
def test_exists_extra_where_with_aggregate(self):
qs = Book.objects.annotate(
count=Count("id"),
exists=Exists(Author.objects.extra(where=["1=0"])),
)
self.assertEqual(len(qs), 6)
def test_aggregation_over_annotation_shared_alias(self):
self.assertEqual(
Publisher.objects.annotate(agg=Count("book__authors"),).aggregate(
agg=Count("agg"),
),
{"agg": 5},
)
class AggregateAnnotationPruningTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(age=1)
cls.a2 = Author.objects.create(age=2)
cls.p1 = Publisher.objects.create(num_awards=1)
cls.p2 = Publisher.objects.create(num_awards=0)
cls.b1 = Book.objects.create(
name="b1",
publisher=cls.p1,
pages=100,
rating=4.5,
price=10,
contact=cls.a1,
pubdate=datetime.date.today(),
)
cls.b1.authors.add(cls.a1)
cls.b2 = Book.objects.create(
name="b2",
publisher=cls.p2,
pages=1000,
rating=3.2,
price=50,
contact=cls.a2,
pubdate=datetime.date.today(),
)
cls.b2.authors.add(cls.a1, cls.a2)
def test_unused_aliased_aggregate_pruned(self):
with CaptureQueriesContext(connection) as ctx:
cnt = Book.objects.alias(
authors_count=Count("authors"),
).count()
self.assertEqual(cnt, 2)
sql = ctx.captured_queries[0]["sql"].lower()
self.assertEqual(sql.count("select"), 2, "Subquery wrapping required")
self.assertNotIn("authors_count", sql)
def test_non_aggregate_annotation_pruned(self):
with CaptureQueriesContext(connection) as ctx:
Book.objects.annotate(
name_lower=Lower("name"),
).count()
sql = ctx.captured_queries[0]["sql"].lower()
self.assertEqual(sql.count("select"), 1, "No subquery wrapping required")
self.assertNotIn("name_lower", sql)
def test_unreferenced_aggregate_annotation_pruned(self):
with CaptureQueriesContext(connection) as ctx:
cnt = Book.objects.annotate(
authors_count=Count("authors"),
).count()
self.assertEqual(cnt, 2)
sql = ctx.captured_queries[0]["sql"].lower()
self.assertEqual(sql.count("select"), 2, "Subquery wrapping required")
self.assertNotIn("authors_count", sql)
def test_referenced_aggregate_annotation_kept(self):
with CaptureQueriesContext(connection) as ctx:
Book.objects.annotate(
authors_count=Count("authors"),
).aggregate(Avg("authors_count"))
sql = ctx.captured_queries[0]["sql"].lower()
self.assertEqual(sql.count("select"), 2, "Subquery wrapping required")
self.assertEqual(sql.count("authors_count"), 2)
|
c5061f8145cc5a672adfd9e9d72c51ba7923559fa3770f37de8e53af77e06895 | import logging
import os
import unittest
import warnings
from io import StringIO
from unittest import mock
from django.conf import settings
from django.contrib.staticfiles.finders import get_finder, get_finders
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import default_storage
from django.db import (
IntegrityError,
connection,
connections,
models,
router,
transaction,
)
from django.forms import (
CharField,
EmailField,
Form,
IntegerField,
ValidationError,
formset_factory,
)
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.test import (
SimpleTestCase,
TestCase,
TransactionTestCase,
skipIfDBFeature,
skipUnlessDBFeature,
)
from django.test.html import HTMLParseError, parse_html
from django.test.testcases import DatabaseOperationForbidden
from django.test.utils import (
CaptureQueriesContext,
TestContextDecorator,
ignore_warnings,
isolate_apps,
override_settings,
setup_test_environment,
)
from django.urls import NoReverseMatch, path, reverse, reverse_lazy
from django.utils.deprecation import RemovedInDjango50Warning, RemovedInDjango51Warning
from django.utils.log import DEFAULT_LOGGING
from django.utils.version import PY311
from .models import Car, Person, PossessedCar
from .views import empty_response
class SkippingTestCase(SimpleTestCase):
def _assert_skipping(self, func, expected_exc, msg=None):
try:
if msg is not None:
with self.assertRaisesMessage(expected_exc, msg):
func()
else:
with self.assertRaises(expected_exc):
func()
except unittest.SkipTest:
self.fail("%s should not result in a skipped test." % func.__name__)
def test_skip_unless_db_feature(self):
"""
Testing the django.test.skipUnlessDBFeature decorator.
"""
# Total hack, but it works, just want an attribute that's always true.
@skipUnlessDBFeature("__class__")
def test_func():
raise ValueError
@skipUnlessDBFeature("notprovided")
def test_func2():
raise ValueError
@skipUnlessDBFeature("__class__", "__class__")
def test_func3():
raise ValueError
@skipUnlessDBFeature("__class__", "notprovided")
def test_func4():
raise ValueError
self._assert_skipping(test_func, ValueError)
self._assert_skipping(test_func2, unittest.SkipTest)
self._assert_skipping(test_func3, ValueError)
self._assert_skipping(test_func4, unittest.SkipTest)
class SkipTestCase(SimpleTestCase):
@skipUnlessDBFeature("missing")
def test_foo(self):
pass
self._assert_skipping(
SkipTestCase("test_foo").test_foo,
ValueError,
"skipUnlessDBFeature cannot be used on test_foo (test_utils.tests."
"SkippingTestCase.test_skip_unless_db_feature.<locals>.SkipTestCase%s) "
"as SkippingTestCase.test_skip_unless_db_feature.<locals>.SkipTestCase "
"doesn't allow queries against the 'default' database."
# Python 3.11 uses fully qualified test name in the output.
% (".test_foo" if PY311 else ""),
)
def test_skip_if_db_feature(self):
"""
Testing the django.test.skipIfDBFeature decorator.
"""
@skipIfDBFeature("__class__")
def test_func():
raise ValueError
@skipIfDBFeature("notprovided")
def test_func2():
raise ValueError
@skipIfDBFeature("__class__", "__class__")
def test_func3():
raise ValueError
@skipIfDBFeature("__class__", "notprovided")
def test_func4():
raise ValueError
@skipIfDBFeature("notprovided", "notprovided")
def test_func5():
raise ValueError
self._assert_skipping(test_func, unittest.SkipTest)
self._assert_skipping(test_func2, ValueError)
self._assert_skipping(test_func3, unittest.SkipTest)
self._assert_skipping(test_func4, unittest.SkipTest)
self._assert_skipping(test_func5, ValueError)
class SkipTestCase(SimpleTestCase):
@skipIfDBFeature("missing")
def test_foo(self):
pass
self._assert_skipping(
SkipTestCase("test_foo").test_foo,
ValueError,
"skipIfDBFeature cannot be used on test_foo (test_utils.tests."
"SkippingTestCase.test_skip_if_db_feature.<locals>.SkipTestCase%s) "
"as SkippingTestCase.test_skip_if_db_feature.<locals>.SkipTestCase "
"doesn't allow queries against the 'default' database."
# Python 3.11 uses fully qualified test name in the output.
% (".test_foo" if PY311 else ""),
)
class SkippingClassTestCase(TestCase):
def test_skip_class_unless_db_feature(self):
@skipUnlessDBFeature("__class__")
class NotSkippedTests(TestCase):
def test_dummy(self):
return
@skipUnlessDBFeature("missing")
@skipIfDBFeature("__class__")
class SkippedTests(TestCase):
def test_will_be_skipped(self):
self.fail("We should never arrive here.")
@skipIfDBFeature("__dict__")
class SkippedTestsSubclass(SkippedTests):
pass
test_suite = unittest.TestSuite()
test_suite.addTest(NotSkippedTests("test_dummy"))
try:
test_suite.addTest(SkippedTests("test_will_be_skipped"))
test_suite.addTest(SkippedTestsSubclass("test_will_be_skipped"))
except unittest.SkipTest:
self.fail("SkipTest should not be raised here.")
result = unittest.TextTestRunner(stream=StringIO()).run(test_suite)
self.assertEqual(result.testsRun, 3)
self.assertEqual(len(result.skipped), 2)
self.assertEqual(result.skipped[0][1], "Database has feature(s) __class__")
self.assertEqual(result.skipped[1][1], "Database has feature(s) __class__")
def test_missing_default_databases(self):
@skipIfDBFeature("missing")
class MissingDatabases(SimpleTestCase):
def test_assertion_error(self):
pass
suite = unittest.TestSuite()
try:
suite.addTest(MissingDatabases("test_assertion_error"))
except unittest.SkipTest:
self.fail("SkipTest should not be raised at this stage")
runner = unittest.TextTestRunner(stream=StringIO())
msg = (
"skipIfDBFeature cannot be used on <class 'test_utils.tests."
"SkippingClassTestCase.test_missing_default_databases.<locals>."
"MissingDatabases'> as it doesn't allow queries against the "
"'default' database."
)
with self.assertRaisesMessage(ValueError, msg):
runner.run(suite)
@override_settings(ROOT_URLCONF="test_utils.urls")
class AssertNumQueriesTests(TestCase):
def test_assert_num_queries(self):
def test_func():
raise ValueError
with self.assertRaises(ValueError):
self.assertNumQueries(2, test_func)
def test_assert_num_queries_with_client(self):
person = Person.objects.create(name="test")
self.assertNumQueries(
1, self.client.get, "/test_utils/get_person/%s/" % person.pk
)
self.assertNumQueries(
1, self.client.get, "/test_utils/get_person/%s/" % person.pk
)
def test_func():
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.assertNumQueries(2, test_func)
class AssertNumQueriesUponConnectionTests(TransactionTestCase):
available_apps = []
def test_ignores_connection_configuration_queries(self):
real_ensure_connection = connection.ensure_connection
connection.close()
def make_configuration_query():
is_opening_connection = connection.connection is None
real_ensure_connection()
if is_opening_connection:
# Avoid infinite recursion. Creating a cursor calls
# ensure_connection() which is currently mocked by this method.
with connection.cursor() as cursor:
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
ensure_connection = (
"django.db.backends.base.base.BaseDatabaseWrapper.ensure_connection"
)
with mock.patch(ensure_connection, side_effect=make_configuration_query):
with self.assertNumQueries(1):
list(Car.objects.all())
class AssertQuerySetEqualTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.p1 = Person.objects.create(name="p1")
cls.p2 = Person.objects.create(name="p2")
def test_rename_assertquerysetequal_deprecation_warning(self):
msg = "assertQuerysetEqual() is deprecated in favor of assertQuerySetEqual()."
with self.assertRaisesMessage(RemovedInDjango51Warning, msg):
self.assertQuerysetEqual()
@ignore_warnings(category=RemovedInDjango51Warning)
def test_deprecated_assertquerysetequal(self):
self.assertQuerysetEqual(Person.objects.filter(name="p3"), [])
def test_empty(self):
self.assertQuerySetEqual(Person.objects.filter(name="p3"), [])
def test_ordered(self):
self.assertQuerySetEqual(
Person.objects.order_by("name"),
[self.p1, self.p2],
)
def test_unordered(self):
self.assertQuerySetEqual(
Person.objects.order_by("name"), [self.p2, self.p1], ordered=False
)
def test_queryset(self):
self.assertQuerySetEqual(
Person.objects.order_by("name"),
Person.objects.order_by("name"),
)
def test_flat_values_list(self):
self.assertQuerySetEqual(
Person.objects.order_by("name").values_list("name", flat=True),
["p1", "p2"],
)
def test_transform(self):
self.assertQuerySetEqual(
Person.objects.order_by("name"),
[self.p1.pk, self.p2.pk],
transform=lambda x: x.pk,
)
def test_repr_transform(self):
self.assertQuerySetEqual(
Person.objects.order_by("name"),
[repr(self.p1), repr(self.p2)],
transform=repr,
)
def test_undefined_order(self):
# Using an unordered queryset with more than one ordered value
# is an error.
msg = (
"Trying to compare non-ordered queryset against more than one "
"ordered value."
)
with self.assertRaisesMessage(ValueError, msg):
self.assertQuerySetEqual(
Person.objects.all(),
[self.p1, self.p2],
)
# No error for one value.
self.assertQuerySetEqual(Person.objects.filter(name="p1"), [self.p1])
def test_repeated_values(self):
"""
assertQuerySetEqual checks the number of appearance of each item
when used with option ordered=False.
"""
batmobile = Car.objects.create(name="Batmobile")
k2000 = Car.objects.create(name="K 2000")
PossessedCar.objects.bulk_create(
[
PossessedCar(car=batmobile, belongs_to=self.p1),
PossessedCar(car=batmobile, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
PossessedCar(car=k2000, belongs_to=self.p1),
]
)
with self.assertRaises(AssertionError):
self.assertQuerySetEqual(
self.p1.cars.all(), [batmobile, k2000], ordered=False
)
self.assertQuerySetEqual(
self.p1.cars.all(), [batmobile] * 2 + [k2000] * 4, ordered=False
)
def test_maxdiff(self):
names = ["Joe Smith %s" % i for i in range(20)]
Person.objects.bulk_create([Person(name=name) for name in names])
names.append("Extra Person")
with self.assertRaises(AssertionError) as ctx:
self.assertQuerySetEqual(
Person.objects.filter(name__startswith="Joe"),
names,
ordered=False,
transform=lambda p: p.name,
)
self.assertIn("Set self.maxDiff to None to see it.", str(ctx.exception))
original = self.maxDiff
self.maxDiff = None
try:
with self.assertRaises(AssertionError) as ctx:
self.assertQuerySetEqual(
Person.objects.filter(name__startswith="Joe"),
names,
ordered=False,
transform=lambda p: p.name,
)
finally:
self.maxDiff = original
exception_msg = str(ctx.exception)
self.assertNotIn("Set self.maxDiff to None to see it.", exception_msg)
for name in names:
self.assertIn(name, exception_msg)
@override_settings(ROOT_URLCONF="test_utils.urls")
class CaptureQueriesContextManagerTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.person_pk = str(Person.objects.create(name="test").pk)
def test_simple(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.get(pk=self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]["sql"])
with CaptureQueriesContext(connection) as captured_queries:
pass
self.assertEqual(0, len(captured_queries))
def test_within(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.get(pk=self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]["sql"])
def test_nested(self):
with CaptureQueriesContext(connection) as captured_queries:
Person.objects.count()
with CaptureQueriesContext(connection) as nested_captured_queries:
Person.objects.count()
self.assertEqual(1, len(nested_captured_queries))
self.assertEqual(2, len(captured_queries))
def test_failure(self):
with self.assertRaises(TypeError):
with CaptureQueriesContext(connection):
raise TypeError
def test_with_client(self):
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]["sql"])
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 1)
self.assertIn(self.person_pk, captured_queries[0]["sql"])
with CaptureQueriesContext(connection) as captured_queries:
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.client.get("/test_utils/get_person/%s/" % self.person_pk)
self.assertEqual(len(captured_queries), 2)
self.assertIn(self.person_pk, captured_queries[0]["sql"])
self.assertIn(self.person_pk, captured_queries[1]["sql"])
@override_settings(ROOT_URLCONF="test_utils.urls")
class AssertNumQueriesContextManagerTests(TestCase):
def test_simple(self):
with self.assertNumQueries(0):
pass
with self.assertNumQueries(1):
Person.objects.count()
with self.assertNumQueries(2):
Person.objects.count()
Person.objects.count()
def test_failure(self):
msg = "1 != 2 : 1 queries executed, 2 expected\nCaptured queries were:\n1."
with self.assertRaisesMessage(AssertionError, msg):
with self.assertNumQueries(2):
Person.objects.count()
with self.assertRaises(TypeError):
with self.assertNumQueries(4000):
raise TypeError
def test_with_client(self):
person = Person.objects.create(name="test")
with self.assertNumQueries(1):
self.client.get("/test_utils/get_person/%s/" % person.pk)
with self.assertNumQueries(1):
self.client.get("/test_utils/get_person/%s/" % person.pk)
with self.assertNumQueries(2):
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.client.get("/test_utils/get_person/%s/" % person.pk)
@override_settings(ROOT_URLCONF="test_utils.urls")
class AssertTemplateUsedContextManagerTests(SimpleTestCase):
def test_usage(self):
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/base.html")
with self.assertTemplateUsed(template_name="template_used/base.html"):
render_to_string("template_used/base.html")
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/include.html")
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/extends.html")
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/base.html")
render_to_string("template_used/base.html")
def test_nested_usage(self):
with self.assertTemplateUsed("template_used/base.html"):
with self.assertTemplateUsed("template_used/include.html"):
render_to_string("template_used/include.html")
with self.assertTemplateUsed("template_used/extends.html"):
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/extends.html")
with self.assertTemplateUsed("template_used/base.html"):
with self.assertTemplateUsed("template_used/alternative.html"):
render_to_string("template_used/alternative.html")
render_to_string("template_used/base.html")
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/extends.html")
with self.assertTemplateNotUsed("template_used/base.html"):
render_to_string("template_used/alternative.html")
render_to_string("template_used/base.html")
def test_not_used(self):
with self.assertTemplateNotUsed("template_used/base.html"):
pass
with self.assertTemplateNotUsed("template_used/alternative.html"):
pass
def test_error_message(self):
msg = "No templates used to render the response"
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed("template_used/base.html"):
pass
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(template_name="template_used/base.html"):
pass
msg2 = (
"Template 'template_used/base.html' was not a template used to render "
"the response. Actual template(s) used: template_used/alternative.html"
)
with self.assertRaisesMessage(AssertionError, msg2):
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/alternative.html")
with self.assertRaisesMessage(
AssertionError, "No templates used to render the response"
):
response = self.client.get("/test_utils/no_template_used/")
self.assertTemplateUsed(response, "template_used/base.html")
def test_msg_prefix(self):
msg_prefix = "Prefix"
msg = f"{msg_prefix}: No templates used to render the response"
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(
"template_used/base.html", msg_prefix=msg_prefix
):
pass
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(
template_name="template_used/base.html",
msg_prefix=msg_prefix,
):
pass
msg = (
f"{msg_prefix}: Template 'template_used/base.html' was not a "
f"template used to render the response. Actual template(s) used: "
f"template_used/alternative.html"
)
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(
"template_used/base.html", msg_prefix=msg_prefix
):
render_to_string("template_used/alternative.html")
def test_count(self):
with self.assertTemplateUsed("template_used/base.html", count=2):
render_to_string("template_used/base.html")
render_to_string("template_used/base.html")
msg = (
"Template 'template_used/base.html' was expected to be rendered "
"3 time(s) but was actually rendered 2 time(s)."
)
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed("template_used/base.html", count=3):
render_to_string("template_used/base.html")
render_to_string("template_used/base.html")
def test_failure(self):
msg = "response and/or template_name argument must be provided"
with self.assertRaisesMessage(TypeError, msg):
with self.assertTemplateUsed():
pass
msg = "No templates used to render the response"
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(""):
pass
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(""):
render_to_string("template_used/base.html")
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed(template_name=""):
pass
msg = (
"Template 'template_used/base.html' was not a template used to "
"render the response. Actual template(s) used: "
"template_used/alternative.html"
)
with self.assertRaisesMessage(AssertionError, msg):
with self.assertTemplateUsed("template_used/base.html"):
render_to_string("template_used/alternative.html")
def test_assert_used_on_http_response(self):
response = HttpResponse()
msg = "%s() is only usable on responses fetched using the Django test Client."
with self.assertRaisesMessage(ValueError, msg % "assertTemplateUsed"):
self.assertTemplateUsed(response, "template.html")
with self.assertRaisesMessage(ValueError, msg % "assertTemplateNotUsed"):
self.assertTemplateNotUsed(response, "template.html")
class HTMLEqualTests(SimpleTestCase):
def test_html_parser(self):
element = parse_html("<div><p>Hello</p></div>")
self.assertEqual(len(element.children), 1)
self.assertEqual(element.children[0].name, "p")
self.assertEqual(element.children[0].children[0], "Hello")
parse_html("<p>")
parse_html("<p attr>")
dom = parse_html("<p>foo")
self.assertEqual(len(dom.children), 1)
self.assertEqual(dom.name, "p")
self.assertEqual(dom[0], "foo")
def test_parse_html_in_script(self):
parse_html('<script>var a = "<p" + ">";</script>')
parse_html(
"""
<script>
var js_sha_link='<p>***</p>';
</script>
"""
)
# script content will be parsed to text
dom = parse_html(
"""
<script><p>foo</p> '</scr'+'ipt>' <span>bar</span></script>
"""
)
self.assertEqual(len(dom.children), 1)
self.assertEqual(dom.children[0], "<p>foo</p> '</scr'+'ipt>' <span>bar</span>")
def test_self_closing_tags(self):
self_closing_tags = [
"area",
"base",
"br",
"col",
"embed",
"hr",
"img",
"input",
"link",
"meta",
"param",
"source",
"track",
"wbr",
# Deprecated tags
"frame",
"spacer",
]
for tag in self_closing_tags:
with self.subTest(tag):
dom = parse_html("<p>Hello <%s> world</p>" % tag)
self.assertEqual(len(dom.children), 3)
self.assertEqual(dom[0], "Hello")
self.assertEqual(dom[1].name, tag)
self.assertEqual(dom[2], "world")
dom = parse_html("<p>Hello <%s /> world</p>" % tag)
self.assertEqual(len(dom.children), 3)
self.assertEqual(dom[0], "Hello")
self.assertEqual(dom[1].name, tag)
self.assertEqual(dom[2], "world")
def test_simple_equal_html(self):
self.assertHTMLEqual("", "")
self.assertHTMLEqual("<p></p>", "<p></p>")
self.assertHTMLEqual("<p></p>", " <p> </p> ")
self.assertHTMLEqual("<div><p>Hello</p></div>", "<div><p>Hello</p></div>")
self.assertHTMLEqual("<div><p>Hello</p></div>", "<div> <p>Hello</p> </div>")
self.assertHTMLEqual("<div>\n<p>Hello</p></div>", "<div><p>Hello</p></div>\n")
self.assertHTMLEqual(
"<div><p>Hello\nWorld !</p></div>", "<div><p>Hello World\n!</p></div>"
)
self.assertHTMLEqual(
"<div><p>Hello\nWorld !</p></div>", "<div><p>Hello World\n!</p></div>"
)
self.assertHTMLEqual("<p>Hello World !</p>", "<p>Hello World\n\n!</p>")
self.assertHTMLEqual("<p> </p>", "<p></p>")
self.assertHTMLEqual("<p/>", "<p></p>")
self.assertHTMLEqual("<p />", "<p></p>")
self.assertHTMLEqual("<input checked>", '<input checked="checked">')
self.assertHTMLEqual("<p>Hello", "<p> Hello")
self.assertHTMLEqual("<p>Hello</p>World", "<p>Hello</p> World")
def test_ignore_comments(self):
self.assertHTMLEqual(
"<div>Hello<!-- this is a comment --> World!</div>",
"<div>Hello World!</div>",
)
def test_unequal_html(self):
self.assertHTMLNotEqual("<p>Hello</p>", "<p>Hello!</p>")
self.assertHTMLNotEqual("<p>foobar</p>", "<p>foo bar</p>")
self.assertHTMLNotEqual("<p>foo bar</p>", "<p>foo bar</p>")
self.assertHTMLNotEqual("<p>foo nbsp</p>", "<p>foo </p>")
self.assertHTMLNotEqual("<p>foo #20</p>", "<p>foo </p>")
self.assertHTMLNotEqual(
"<p><span>Hello</span><span>World</span></p>",
"<p><span>Hello</span>World</p>",
)
self.assertHTMLNotEqual(
"<p><span>Hello</span>World</p>",
"<p><span>Hello</span><span>World</span></p>",
)
def test_attributes(self):
self.assertHTMLEqual(
'<input type="text" id="id_name" />', '<input id="id_name" type="text" />'
)
self.assertHTMLEqual(
"""<input type='text' id="id_name" />""",
'<input id="id_name" type="text" />',
)
self.assertHTMLNotEqual(
'<input type="text" id="id_name" />',
'<input type="password" id="id_name" />',
)
def test_class_attribute(self):
pairs = [
('<p class="foo bar"></p>', '<p class="bar foo"></p>'),
('<p class=" foo bar "></p>', '<p class="bar foo"></p>'),
('<p class=" foo bar "></p>', '<p class="bar foo"></p>'),
('<p class="foo\tbar"></p>', '<p class="bar foo"></p>'),
('<p class="\tfoo\tbar\t"></p>', '<p class="bar foo"></p>'),
('<p class="\t\t\tfoo\t\t\tbar\t\t\t"></p>', '<p class="bar foo"></p>'),
('<p class="\t \nfoo \t\nbar\n\t "></p>', '<p class="bar foo"></p>'),
]
for html1, html2 in pairs:
with self.subTest(html1):
self.assertHTMLEqual(html1, html2)
def test_boolean_attribute(self):
html1 = "<input checked>"
html2 = '<input checked="">'
html3 = '<input checked="checked">'
self.assertHTMLEqual(html1, html2)
self.assertHTMLEqual(html1, html3)
self.assertHTMLEqual(html2, html3)
self.assertHTMLNotEqual(html1, '<input checked="invalid">')
self.assertEqual(str(parse_html(html1)), "<input checked>")
self.assertEqual(str(parse_html(html2)), "<input checked>")
self.assertEqual(str(parse_html(html3)), "<input checked>")
def test_non_boolean_attibutes(self):
html1 = "<input value>"
html2 = '<input value="">'
html3 = '<input value="value">'
self.assertHTMLEqual(html1, html2)
self.assertHTMLNotEqual(html1, html3)
self.assertEqual(str(parse_html(html1)), '<input value="">')
self.assertEqual(str(parse_html(html2)), '<input value="">')
def test_normalize_refs(self):
pairs = [
("'", "'"),
("'", "'"),
("'", "'"),
("'", "'"),
("'", "'"),
("'", "'"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
("&", "&"),
]
for pair in pairs:
with self.subTest(repr(pair)):
self.assertHTMLEqual(*pair)
def test_complex_examples(self):
self.assertHTMLEqual(
"""<tr><th><label for="id_first_name">First name:</label></th>
<td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>
<tr><th><label for="id_last_name">Last name:</label></th>
<td><input type="text" id="id_last_name" name="last_name" value="Lennon" /></td></tr>
<tr><th><label for="id_birthday">Birthday:</label></th>
<td><input type="text" value="1940-10-9" name="birthday" id="id_birthday" /></td></tr>""", # NOQA
"""
<tr><th>
<label for="id_first_name">First name:</label></th><td>
<input type="text" name="first_name" value="John" id="id_first_name" />
</td></tr>
<tr><th>
<label for="id_last_name">Last name:</label></th><td>
<input type="text" name="last_name" value="Lennon" id="id_last_name" />
</td></tr>
<tr><th>
<label for="id_birthday">Birthday:</label></th><td>
<input type="text" name="birthday" value="1940-10-9" id="id_birthday" />
</td></tr>
""",
)
self.assertHTMLEqual(
"""<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet">
<title>Document</title>
<meta attribute="value">
</head>
<body>
<p>
This is a valid paragraph
<div> this is a div AFTER the p</div>
</body>
</html>""",
"""
<html>
<head>
<link rel="stylesheet">
<title>Document</title>
<meta attribute="value">
</head>
<body>
<p> This is a valid paragraph
<!-- browsers would close the p tag here -->
<div> this is a div AFTER the p</div>
</p> <!-- this is invalid HTML parsing, but it should make no
difference in most cases -->
</body>
</html>""",
)
def test_html_contain(self):
# equal html contains each other
dom1 = parse_html("<p>foo")
dom2 = parse_html("<p>foo</p>")
self.assertIn(dom1, dom2)
self.assertIn(dom2, dom1)
dom2 = parse_html("<div><p>foo</p></div>")
self.assertIn(dom1, dom2)
self.assertNotIn(dom2, dom1)
self.assertNotIn("<p>foo</p>", dom2)
self.assertIn("foo", dom2)
# when a root element is used ...
dom1 = parse_html("<p>foo</p><p>bar</p>")
dom2 = parse_html("<p>foo</p><p>bar</p>")
self.assertIn(dom1, dom2)
dom1 = parse_html("<p>foo</p>")
self.assertIn(dom1, dom2)
dom1 = parse_html("<p>bar</p>")
self.assertIn(dom1, dom2)
dom1 = parse_html("<div><p>foo</p><p>bar</p></div>")
self.assertIn(dom2, dom1)
def test_count(self):
# equal html contains each other one time
dom1 = parse_html("<p>foo")
dom2 = parse_html("<p>foo</p>")
self.assertEqual(dom1.count(dom2), 1)
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html("<p>foo</p><p>bar</p>")
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html("<p>foo foo</p><p>foo</p>")
self.assertEqual(dom2.count("foo"), 3)
dom2 = parse_html('<p class="bar">foo</p>')
self.assertEqual(dom2.count("bar"), 0)
self.assertEqual(dom2.count("class"), 0)
self.assertEqual(dom2.count("p"), 0)
self.assertEqual(dom2.count("o"), 2)
dom2 = parse_html("<p>foo</p><p>foo</p>")
self.assertEqual(dom2.count(dom1), 2)
dom2 = parse_html('<div><p>foo<input type=""></p><p>foo</p></div>')
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html("<div><div><p>foo</p></div></div>")
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html("<p>foo<p>foo</p></p>")
self.assertEqual(dom2.count(dom1), 1)
dom2 = parse_html("<p>foo<p>bar</p></p>")
self.assertEqual(dom2.count(dom1), 0)
# HTML with a root element contains the same HTML with no root element.
dom1 = parse_html("<p>foo</p><p>bar</p>")
dom2 = parse_html("<div><p>foo</p><p>bar</p></div>")
self.assertEqual(dom2.count(dom1), 1)
# Target of search is a sequence of child elements and appears more
# than once.
dom2 = parse_html("<div><p>foo</p><p>bar</p><p>foo</p><p>bar</p></div>")
self.assertEqual(dom2.count(dom1), 2)
# Searched HTML has additional children.
dom1 = parse_html("<a/><b/>")
dom2 = parse_html("<a/><b/><c/>")
self.assertEqual(dom2.count(dom1), 1)
# No match found in children.
dom1 = parse_html("<b/><a/>")
self.assertEqual(dom2.count(dom1), 0)
# Target of search found among children and grandchildren.
dom1 = parse_html("<b/><b/>")
dom2 = parse_html("<a><b/><b/></a><b/><b/>")
self.assertEqual(dom2.count(dom1), 2)
def test_root_element_escaped_html(self):
html = "<br>"
parsed = parse_html(html)
self.assertEqual(str(parsed), html)
def test_parsing_errors(self):
with self.assertRaises(AssertionError):
self.assertHTMLEqual("<p>", "")
with self.assertRaises(AssertionError):
self.assertHTMLEqual("", "<p>")
error_msg = (
"First argument is not valid HTML:\n"
"('Unexpected end tag `div` (Line 1, Column 6)', (1, 6))"
)
with self.assertRaisesMessage(AssertionError, error_msg):
self.assertHTMLEqual("< div></ div>", "<div></div>")
with self.assertRaises(HTMLParseError):
parse_html("</p>")
def test_escaped_html_errors(self):
msg = "<p>\n<foo>\n</p> != <p>\n<foo>\n</p>\n"
with self.assertRaisesMessage(AssertionError, msg):
self.assertHTMLEqual("<p><foo></p>", "<p><foo></p>")
with self.assertRaisesMessage(AssertionError, msg):
self.assertHTMLEqual("<p><foo></p>", "<p><foo></p>")
def test_contains_html(self):
response = HttpResponse(
"""<body>
This is a form: <form method="get">
<input type="text" name="Hello" />
</form></body>"""
)
self.assertNotContains(response, "<input name='Hello' type='text'>")
self.assertContains(response, '<form method="get">')
self.assertContains(response, "<input name='Hello' type='text'>", html=True)
self.assertNotContains(response, '<form method="get">', html=True)
invalid_response = HttpResponse("""<body <bad>>""")
with self.assertRaises(AssertionError):
self.assertContains(invalid_response, "<p></p>")
with self.assertRaises(AssertionError):
self.assertContains(response, '<p "whats" that>')
def test_unicode_handling(self):
response = HttpResponse(
'<p class="help">Some help text for the title (with Unicode ŠĐĆŽćžšđ)</p>'
)
self.assertContains(
response,
'<p class="help">Some help text for the title (with Unicode ŠĐĆŽćžšđ)</p>',
html=True,
)
class JSONEqualTests(SimpleTestCase):
def test_simple_equal(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr1": "foo", "attr2":"baz"}'
self.assertJSONEqual(json1, json2)
def test_simple_equal_unordered(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr2":"baz", "attr1": "foo"}'
self.assertJSONEqual(json1, json2)
def test_simple_equal_raise(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONEqual(json1, json2)
def test_equal_parsing_errors(self):
invalid_json = '{"attr1": "foo, "attr2":"baz"}'
valid_json = '{"attr1": "foo", "attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONEqual(invalid_json, valid_json)
with self.assertRaises(AssertionError):
self.assertJSONEqual(valid_json, invalid_json)
def test_simple_not_equal(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr2":"baz"}'
self.assertJSONNotEqual(json1, json2)
def test_simple_not_equal_raise(self):
json1 = '{"attr1": "foo", "attr2":"baz"}'
json2 = '{"attr1": "foo", "attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONNotEqual(json1, json2)
def test_not_equal_parsing_errors(self):
invalid_json = '{"attr1": "foo, "attr2":"baz"}'
valid_json = '{"attr1": "foo", "attr2":"baz"}'
with self.assertRaises(AssertionError):
self.assertJSONNotEqual(invalid_json, valid_json)
with self.assertRaises(AssertionError):
self.assertJSONNotEqual(valid_json, invalid_json)
class XMLEqualTests(SimpleTestCase):
def test_simple_equal(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr1='a' attr2='b' />"
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_unordered(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_raise(self):
xml1 = "<elem attr1='a' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_raises_message(self):
xml1 = "<elem attr1='a' />"
xml2 = "<elem attr2='b' attr1='a' />"
msg = """{xml1} != {xml2}
- <elem attr1='a' />
+ <elem attr2='b' attr1='a' />
? ++++++++++
""".format(
xml1=repr(xml1), xml2=repr(xml2)
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertXMLEqual(xml1, xml2)
def test_simple_not_equal(self):
xml1 = "<elem attr1='a' attr2='c' />"
xml2 = "<elem attr1='a' attr2='b' />"
self.assertXMLNotEqual(xml1, xml2)
def test_simple_not_equal_raise(self):
xml1 = "<elem attr1='a' attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLNotEqual(xml1, xml2)
def test_parsing_errors(self):
xml_unvalid = "<elem attr1='a attr2='b' />"
xml2 = "<elem attr2='b' attr1='a' />"
with self.assertRaises(AssertionError):
self.assertXMLNotEqual(xml_unvalid, xml2)
def test_comment_root(self):
xml1 = "<?xml version='1.0'?><!-- comment1 --><elem attr1='a' attr2='b' />"
xml2 = "<?xml version='1.0'?><!-- comment2 --><elem attr2='b' attr1='a' />"
self.assertXMLEqual(xml1, xml2)
def test_simple_equal_with_leading_or_trailing_whitespace(self):
xml1 = "<elem>foo</elem> \t\n"
xml2 = " \t\n<elem>foo</elem>"
self.assertXMLEqual(xml1, xml2)
def test_simple_not_equal_with_whitespace_in_the_middle(self):
xml1 = "<elem>foo</elem><elem>bar</elem>"
xml2 = "<elem>foo</elem> <elem>bar</elem>"
self.assertXMLNotEqual(xml1, xml2)
def test_doctype_root(self):
xml1 = '<?xml version="1.0"?><!DOCTYPE root SYSTEM "example1.dtd"><root />'
xml2 = '<?xml version="1.0"?><!DOCTYPE root SYSTEM "example2.dtd"><root />'
self.assertXMLEqual(xml1, xml2)
def test_processing_instruction(self):
xml1 = (
'<?xml version="1.0"?>'
'<?xml-model href="http://www.example1.com"?><root />'
)
xml2 = (
'<?xml version="1.0"?>'
'<?xml-model href="http://www.example2.com"?><root />'
)
self.assertXMLEqual(xml1, xml2)
self.assertXMLEqual(
'<?xml-stylesheet href="style1.xslt" type="text/xsl"?><root />',
'<?xml-stylesheet href="style2.xslt" type="text/xsl"?><root />',
)
class SkippingExtraTests(TestCase):
fixtures = ["should_not_be_loaded.json"]
# HACK: This depends on internals of our TestCase subclasses
def __call__(self, result=None):
# Detect fixture loading by counting SQL queries, should be zero
with self.assertNumQueries(0):
super().__call__(result)
@unittest.skip("Fixture loading should not be performed for skipped tests.")
def test_fixtures_are_skipped(self):
pass
class AssertRaisesMsgTest(SimpleTestCase):
def test_assert_raises_message(self):
msg = "'Expected message' not found in 'Unexpected message'"
# context manager form of assertRaisesMessage()
with self.assertRaisesMessage(AssertionError, msg):
with self.assertRaisesMessage(ValueError, "Expected message"):
raise ValueError("Unexpected message")
# callable form
def func():
raise ValueError("Unexpected message")
with self.assertRaisesMessage(AssertionError, msg):
self.assertRaisesMessage(ValueError, "Expected message", func)
def test_special_re_chars(self):
"""assertRaisesMessage shouldn't interpret RE special chars."""
def func1():
raise ValueError("[.*x+]y?")
with self.assertRaisesMessage(ValueError, "[.*x+]y?"):
func1()
class AssertWarnsMessageTests(SimpleTestCase):
def test_context_manager(self):
with self.assertWarnsMessage(UserWarning, "Expected message"):
warnings.warn("Expected message", UserWarning)
def test_context_manager_failure(self):
msg = "Expected message' not found in 'Unexpected message'"
with self.assertRaisesMessage(AssertionError, msg):
with self.assertWarnsMessage(UserWarning, "Expected message"):
warnings.warn("Unexpected message", UserWarning)
def test_callable(self):
def func():
warnings.warn("Expected message", UserWarning)
self.assertWarnsMessage(UserWarning, "Expected message", func)
def test_special_re_chars(self):
def func1():
warnings.warn("[.*x+]y?", UserWarning)
with self.assertWarnsMessage(UserWarning, "[.*x+]y?"):
func1()
# TODO: Remove when dropping support for PY39.
class AssertNoLogsTest(SimpleTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
logging.config.dictConfig(DEFAULT_LOGGING)
cls.addClassCleanup(logging.config.dictConfig, settings.LOGGING)
def setUp(self):
self.logger = logging.getLogger("django")
@override_settings(DEBUG=True)
def test_fails_when_log_emitted(self):
msg = "Unexpected logs found: ['INFO:django:FAIL!']"
with self.assertRaisesMessage(AssertionError, msg):
with self.assertNoLogs("django", "INFO"):
self.logger.info("FAIL!")
@override_settings(DEBUG=True)
def test_text_level(self):
with self.assertNoLogs("django", "INFO"):
self.logger.debug("DEBUG logs are ignored.")
@override_settings(DEBUG=True)
def test_int_level(self):
with self.assertNoLogs("django", logging.INFO):
self.logger.debug("DEBUG logs are ignored.")
@override_settings(DEBUG=True)
def test_default_level(self):
with self.assertNoLogs("django"):
self.logger.debug("DEBUG logs are ignored.")
@override_settings(DEBUG=True)
def test_does_not_hide_other_failures(self):
msg = "1 != 2"
with self.assertRaisesMessage(AssertionError, msg):
with self.assertNoLogs("django"):
self.assertEqual(1, 2)
class AssertFieldOutputTests(SimpleTestCase):
def test_assert_field_output(self):
error_invalid = ["Enter a valid email address."]
self.assertFieldOutput(
EmailField, {"[email protected]": "[email protected]"}, {"aaa": error_invalid}
)
with self.assertRaises(AssertionError):
self.assertFieldOutput(
EmailField,
{"[email protected]": "[email protected]"},
{"aaa": error_invalid + ["Another error"]},
)
with self.assertRaises(AssertionError):
self.assertFieldOutput(
EmailField, {"[email protected]": "Wrong output"}, {"aaa": error_invalid}
)
with self.assertRaises(AssertionError):
self.assertFieldOutput(
EmailField,
{"[email protected]": "[email protected]"},
{"aaa": ["Come on, gimme some well formatted data, dude."]},
)
def test_custom_required_message(self):
class MyCustomField(IntegerField):
default_error_messages = {
"required": "This is really required.",
}
self.assertFieldOutput(MyCustomField, {}, {}, empty_value=None)
@override_settings(ROOT_URLCONF="test_utils.urls")
class AssertURLEqualTests(SimpleTestCase):
def test_equal(self):
valid_tests = (
("http://example.com/?", "http://example.com/"),
("http://example.com/?x=1&", "http://example.com/?x=1"),
("http://example.com/?x=1&y=2", "http://example.com/?y=2&x=1"),
("http://example.com/?x=1&y=2", "http://example.com/?y=2&x=1"),
(
"http://example.com/?x=1&y=2&a=1&a=2",
"http://example.com/?a=1&a=2&y=2&x=1",
),
("/path/to/?x=1&y=2&z=3", "/path/to/?z=3&y=2&x=1"),
("?x=1&y=2&z=3", "?z=3&y=2&x=1"),
("/test_utils/no_template_used/", reverse_lazy("no_template_used")),
)
for url1, url2 in valid_tests:
with self.subTest(url=url1):
self.assertURLEqual(url1, url2)
def test_not_equal(self):
invalid_tests = (
# Protocol must be the same.
("http://example.com/", "https://example.com/"),
("http://example.com/?x=1&x=2", "https://example.com/?x=2&x=1"),
("http://example.com/?x=1&y=bar&x=2", "https://example.com/?y=bar&x=2&x=1"),
# Parameters of the same name must be in the same order.
("/path/to?a=1&a=2", "/path/to/?a=2&a=1"),
)
for url1, url2 in invalid_tests:
with self.subTest(url=url1), self.assertRaises(AssertionError):
self.assertURLEqual(url1, url2)
def test_message(self):
msg = (
"Expected 'http://example.com/?x=1&x=2' to equal "
"'https://example.com/?x=2&x=1'"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertURLEqual(
"http://example.com/?x=1&x=2", "https://example.com/?x=2&x=1"
)
def test_msg_prefix(self):
msg = (
"Prefix: Expected 'http://example.com/?x=1&x=2' to equal "
"'https://example.com/?x=2&x=1'"
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertURLEqual(
"http://example.com/?x=1&x=2",
"https://example.com/?x=2&x=1",
msg_prefix="Prefix: ",
)
class TestForm(Form):
field = CharField()
def clean_field(self):
value = self.cleaned_data.get("field", "")
if value == "invalid":
raise ValidationError("invalid value")
return value
def clean(self):
if self.cleaned_data.get("field") == "invalid_non_field":
raise ValidationError("non-field error")
return self.cleaned_data
@classmethod
def _get_cleaned_form(cls, field_value):
form = cls({"field": field_value})
form.full_clean()
return form
@classmethod
def valid(cls):
return cls._get_cleaned_form("valid")
@classmethod
def invalid(cls, nonfield=False):
return cls._get_cleaned_form("invalid_non_field" if nonfield else "invalid")
class TestFormset(formset_factory(TestForm)):
@classmethod
def _get_cleaned_formset(cls, field_value):
formset = cls(
{
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "0",
"form-0-field": field_value,
}
)
formset.full_clean()
return formset
@classmethod
def valid(cls):
return cls._get_cleaned_formset("valid")
@classmethod
def invalid(cls, nonfield=False, nonform=False):
if nonform:
formset = cls({}, error_messages={"missing_management_form": "error"})
formset.full_clean()
return formset
return cls._get_cleaned_formset("invalid_non_field" if nonfield else "invalid")
class AssertFormErrorTests(SimpleTestCase):
@ignore_warnings(category=RemovedInDjango50Warning)
def test_non_client_response(self):
msg = (
"assertFormError() is only usable on responses fetched using the "
"Django test Client."
)
response = HttpResponse()
with self.assertRaisesMessage(ValueError, msg):
self.assertFormError(response, "form", "field", "invalid value")
@ignore_warnings(category=RemovedInDjango50Warning)
def test_response_with_no_context(self):
msg = "Response did not use any contexts to render the response"
response = mock.Mock(context=[])
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormError(response, "form", "field", "invalid value")
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormError(
response,
"form",
"field",
"invalid value",
msg_prefix=msg_prefix,
)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_form_not_in_context(self):
msg = "The form 'form' was not used to render the response"
response = mock.Mock(context=[{}])
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormError(response, "form", "field", "invalid value")
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormError(
response, "form", "field", "invalid value", msg_prefix=msg_prefix
)
def test_single_error(self):
self.assertFormError(TestForm.invalid(), "field", "invalid value")
def test_error_list(self):
self.assertFormError(TestForm.invalid(), "field", ["invalid value"])
def test_empty_errors_valid_form(self):
self.assertFormError(TestForm.valid(), "field", [])
def test_empty_errors_valid_form_non_field_errors(self):
self.assertFormError(TestForm.valid(), None, [])
def test_field_not_in_form(self):
msg = (
"The form <TestForm bound=True, valid=False, fields=(field)> does not "
"contain the field 'other_field'."
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormError(TestForm.invalid(), "other_field", "invalid value")
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormError(
TestForm.invalid(),
"other_field",
"invalid value",
msg_prefix=msg_prefix,
)
def test_field_with_no_errors(self):
msg = (
"The errors of field 'field' on form <TestForm bound=True, valid=True, "
"fields=(field)> don't match."
)
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormError(TestForm.valid(), "field", "invalid value")
self.assertIn("[] != ['invalid value']", str(ctx.exception))
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormError(
TestForm.valid(), "field", "invalid value", msg_prefix=msg_prefix
)
def test_field_with_different_error(self):
msg = (
"The errors of field 'field' on form <TestForm bound=True, valid=False, "
"fields=(field)> don't match."
)
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormError(TestForm.invalid(), "field", "other error")
self.assertIn("['invalid value'] != ['other error']", str(ctx.exception))
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormError(
TestForm.invalid(), "field", "other error", msg_prefix=msg_prefix
)
def test_unbound_form(self):
msg = (
"The form <TestForm bound=False, valid=Unknown, fields=(field)> is not "
"bound, it will never have any errors."
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormError(TestForm(), "field", [])
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormError(TestForm(), "field", [], msg_prefix=msg_prefix)
def test_empty_errors_invalid_form(self):
msg = (
"The errors of field 'field' on form <TestForm bound=True, valid=False, "
"fields=(field)> don't match."
)
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormError(TestForm.invalid(), "field", [])
self.assertIn("['invalid value'] != []", str(ctx.exception))
def test_non_field_errors(self):
self.assertFormError(TestForm.invalid(nonfield=True), None, "non-field error")
def test_different_non_field_errors(self):
msg = (
"The non-field errors of form <TestForm bound=True, valid=False, "
"fields=(field)> don't match."
)
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormError(
TestForm.invalid(nonfield=True), None, "other non-field error"
)
self.assertIn(
"['non-field error'] != ['other non-field error']", str(ctx.exception)
)
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormError(
TestForm.invalid(nonfield=True),
None,
"other non-field error",
msg_prefix=msg_prefix,
)
class AssertFormSetErrorTests(SimpleTestCase):
@ignore_warnings(category=RemovedInDjango50Warning)
def test_non_client_response(self):
msg = (
"assertFormSetError() is only usable on responses fetched using "
"the Django test Client."
)
response = HttpResponse()
with self.assertRaisesMessage(ValueError, msg):
self.assertFormSetError(response, "formset", 0, "field", "invalid value")
@ignore_warnings(category=RemovedInDjango50Warning)
def test_response_with_no_context(self):
msg = "Response did not use any contexts to render the response"
response = mock.Mock(context=[])
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormSetError(response, "formset", 0, "field", "invalid value")
@ignore_warnings(category=RemovedInDjango50Warning)
def test_formset_not_in_context(self):
msg = "The formset 'formset' was not used to render the response"
response = mock.Mock(context=[{}])
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormSetError(response, "formset", 0, "field", "invalid value")
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormSetError(
response, "formset", 0, "field", "invalid value", msg_prefix=msg_prefix
)
def test_rename_assertformseterror_deprecation_warning(self):
msg = "assertFormsetError() is deprecated in favor of assertFormSetError()."
with self.assertRaisesMessage(RemovedInDjango51Warning, msg):
self.assertFormsetError()
@ignore_warnings(category=RemovedInDjango51Warning)
def test_deprecated_assertformseterror(self):
self.assertFormsetError(TestFormset.invalid(), 0, "field", "invalid value")
def test_single_error(self):
self.assertFormSetError(TestFormset.invalid(), 0, "field", "invalid value")
def test_error_list(self):
self.assertFormSetError(TestFormset.invalid(), 0, "field", ["invalid value"])
def test_empty_errors_valid_formset(self):
self.assertFormSetError(TestFormset.valid(), 0, "field", [])
def test_multiple_forms(self):
formset = TestFormset(
{
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-0-field": "valid",
"form-1-field": "invalid",
}
)
formset.full_clean()
self.assertFormSetError(formset, 0, "field", [])
self.assertFormSetError(formset, 1, "field", ["invalid value"])
def test_field_not_in_form(self):
msg = (
"The form 0 of formset <TestFormset: bound=True valid=False total_forms=1> "
"does not contain the field 'other_field'."
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormSetError(
TestFormset.invalid(), 0, "other_field", "invalid value"
)
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormSetError(
TestFormset.invalid(),
0,
"other_field",
"invalid value",
msg_prefix=msg_prefix,
)
def test_field_with_no_errors(self):
msg = (
"The errors of field 'field' on form 0 of formset <TestFormset: bound=True "
"valid=True total_forms=1> don't match."
)
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormSetError(TestFormset.valid(), 0, "field", "invalid value")
self.assertIn("[] != ['invalid value']", str(ctx.exception))
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormSetError(
TestFormset.valid(), 0, "field", "invalid value", msg_prefix=msg_prefix
)
def test_field_with_different_error(self):
msg = (
"The errors of field 'field' on form 0 of formset <TestFormset: bound=True "
"valid=False total_forms=1> don't match."
)
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormSetError(TestFormset.invalid(), 0, "field", "other error")
self.assertIn("['invalid value'] != ['other error']", str(ctx.exception))
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormSetError(
TestFormset.invalid(), 0, "field", "other error", msg_prefix=msg_prefix
)
def test_unbound_formset(self):
msg = (
"The formset <TestFormset: bound=False valid=Unknown total_forms=1> is not "
"bound, it will never have any errors."
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormSetError(TestFormset(), 0, "field", [])
def test_empty_errors_invalid_formset(self):
msg = (
"The errors of field 'field' on form 0 of formset <TestFormset: bound=True "
"valid=False total_forms=1> don't match."
)
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormSetError(TestFormset.invalid(), 0, "field", [])
self.assertIn("['invalid value'] != []", str(ctx.exception))
def test_non_field_errors(self):
self.assertFormSetError(
TestFormset.invalid(nonfield=True), 0, None, "non-field error"
)
def test_different_non_field_errors(self):
msg = (
"The non-field errors of form 0 of formset <TestFormset: bound=True "
"valid=False total_forms=1> don't match."
)
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormSetError(
TestFormset.invalid(nonfield=True), 0, None, "other non-field error"
)
self.assertIn(
"['non-field error'] != ['other non-field error']", str(ctx.exception)
)
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormSetError(
TestFormset.invalid(nonfield=True),
0,
None,
"other non-field error",
msg_prefix=msg_prefix,
)
def test_no_non_field_errors(self):
msg = (
"The non-field errors of form 0 of formset <TestFormset: bound=True "
"valid=False total_forms=1> don't match."
)
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormSetError(TestFormset.invalid(), 0, None, "non-field error")
self.assertIn("[] != ['non-field error']", str(ctx.exception))
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormSetError(
TestFormset.invalid(), 0, None, "non-field error", msg_prefix=msg_prefix
)
def test_non_form_errors(self):
self.assertFormSetError(TestFormset.invalid(nonform=True), None, None, "error")
def test_different_non_form_errors(self):
msg = (
"The non-form errors of formset <TestFormset: bound=True valid=False "
"total_forms=0> don't match."
)
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormSetError(
TestFormset.invalid(nonform=True), None, None, "other error"
)
self.assertIn("['error'] != ['other error']", str(ctx.exception))
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormSetError(
TestFormset.invalid(nonform=True),
None,
None,
"other error",
msg_prefix=msg_prefix,
)
def test_no_non_form_errors(self):
msg = (
"The non-form errors of formset <TestFormset: bound=True valid=False "
"total_forms=1> don't match."
)
with self.assertRaisesMessage(AssertionError, msg) as ctx:
self.assertFormSetError(TestFormset.invalid(), None, None, "error")
self.assertIn("[] != ['error']", str(ctx.exception))
msg_prefix = "Custom prefix"
with self.assertRaisesMessage(AssertionError, f"{msg_prefix}: {msg}"):
self.assertFormSetError(
TestFormset.invalid(),
None,
None,
"error",
msg_prefix=msg_prefix,
)
def test_non_form_errors_with_field(self):
msg = "You must use field=None with form_index=None."
with self.assertRaisesMessage(ValueError, msg):
self.assertFormSetError(
TestFormset.invalid(nonform=True), None, "field", "error"
)
def test_form_index_too_big(self):
msg = (
"The formset <TestFormset: bound=True valid=False total_forms=1> only has "
"1 form."
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormSetError(TestFormset.invalid(), 2, "field", "error")
def test_form_index_too_big_plural(self):
formset = TestFormset(
{
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-0-field": "valid",
"form-1-field": "valid",
}
)
formset.full_clean()
msg = (
"The formset <TestFormset: bound=True valid=True total_forms=2> only has 2 "
"forms."
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormSetError(formset, 2, "field", "error")
# RemovedInDjango50Warning
class AssertFormErrorDeprecationTests(SimpleTestCase):
"""
Exhaustively test all possible combinations of args/kwargs for the old
signature.
"""
@ignore_warnings(category=RemovedInDjango50Warning)
def test_assert_form_error_errors_none(self):
msg = (
"The errors of field 'field' on form <TestForm bound=True, valid=False, "
"fields=(field)> don't match."
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormError(TestForm.invalid(), "field", None)
def test_assert_form_error_errors_none_warning(self):
msg = (
"Passing errors=None to assertFormError() is deprecated, use "
"errors=[] instead."
)
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
self.assertFormError(TestForm.valid(), "field", None)
def _assert_form_error_old_api_cases(self, form, field, errors, msg_prefix):
response = mock.Mock(context=[{"form": TestForm.invalid()}])
return (
((response, form, field, errors), {}),
((response, form, field, errors, msg_prefix), {}),
((response, form, field, errors), {"msg_prefix": msg_prefix}),
((response, form, field), {"errors": errors}),
((response, form, field), {"errors": errors, "msg_prefix": msg_prefix}),
((response, form), {"field": field, "errors": errors}),
(
(response, form),
{"field": field, "errors": errors, "msg_prefix": msg_prefix},
),
((response,), {"form": form, "field": field, "errors": errors}),
(
(response,),
{
"form": form,
"field": field,
"errors": errors,
"msg_prefix": msg_prefix,
},
),
(
(),
{"response": response, "form": form, "field": field, "errors": errors},
),
(
(),
{
"response": response,
"form": form,
"field": field,
"errors": errors,
"msg_prefix": msg_prefix,
},
),
)
def test_assert_form_error_old_api(self):
deprecation_msg = (
"Passing response to assertFormError() is deprecated. Use the form object "
"directly: assertFormError(response.context['form'], 'field', ...)"
)
for args, kwargs in self._assert_form_error_old_api_cases(
form="form",
field="field",
errors=["invalid value"],
msg_prefix="Custom prefix",
):
with self.subTest(args=args, kwargs=kwargs):
with self.assertWarnsMessage(RemovedInDjango50Warning, deprecation_msg):
self.assertFormError(*args, **kwargs)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_assert_form_error_old_api_assertion_error(self):
for args, kwargs in self._assert_form_error_old_api_cases(
form="form",
field="field",
errors=["other error"],
msg_prefix="Custom prefix",
):
with self.subTest(args=args, kwargs=kwargs):
with self.assertRaises(AssertionError):
self.assertFormError(*args, **kwargs)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_assert_formset_error_errors_none(self):
msg = (
"The errors of field 'field' on form 0 of formset <TestFormset: bound=True "
"valid=False total_forms=1> don't match."
)
with self.assertRaisesMessage(AssertionError, msg):
self.assertFormSetError(TestFormset.invalid(), 0, "field", None)
def test_assert_formset_error_errors_none_warning(self):
msg = (
"Passing errors=None to assertFormSetError() is deprecated, use "
"errors=[] instead."
)
with self.assertWarnsMessage(RemovedInDjango50Warning, msg):
self.assertFormSetError(TestFormset.valid(), 0, "field", None)
def _assert_formset_error_old_api_cases(
self, formset, form_index, field, errors, msg_prefix
):
response = mock.Mock(context=[{"formset": TestFormset.invalid()}])
return (
((response, formset, form_index, field, errors), {}),
((response, formset, form_index, field, errors, msg_prefix), {}),
(
(response, formset, form_index, field, errors),
{"msg_prefix": msg_prefix},
),
((response, formset, form_index, field), {"errors": errors}),
(
(response, formset, form_index, field),
{"errors": errors, "msg_prefix": msg_prefix},
),
((response, formset, form_index), {"field": field, "errors": errors}),
(
(response, formset, form_index),
{"field": field, "errors": errors, "msg_prefix": msg_prefix},
),
(
(response, formset),
{"form_index": form_index, "field": field, "errors": errors},
),
(
(response, formset),
{
"form_index": form_index,
"field": field,
"errors": errors,
"msg_prefix": msg_prefix,
},
),
(
(response,),
{
"formset": formset,
"form_index": form_index,
"field": field,
"errors": errors,
},
),
(
(response,),
{
"formset": formset,
"form_index": form_index,
"field": field,
"errors": errors,
"msg_prefix": msg_prefix,
},
),
(
(),
{
"response": response,
"formset": formset,
"form_index": form_index,
"field": field,
"errors": errors,
},
),
(
(),
{
"response": response,
"formset": formset,
"form_index": form_index,
"field": field,
"errors": errors,
"msg_prefix": msg_prefix,
},
),
)
def test_assert_formset_error_old_api(self):
deprecation_msg = (
"Passing response to assertFormSetError() is deprecated. Use the formset "
"object directly: assertFormSetError(response.context['formset'], 0, ...)"
)
for args, kwargs in self._assert_formset_error_old_api_cases(
formset="formset",
form_index=0,
field="field",
errors=["invalid value"],
msg_prefix="Custom prefix",
):
with self.subTest(args=args, kwargs=kwargs):
with self.assertWarnsMessage(RemovedInDjango50Warning, deprecation_msg):
self.assertFormSetError(*args, **kwargs)
@ignore_warnings(category=RemovedInDjango50Warning)
def test_assert_formset_error_old_api_assertion_error(self):
for args, kwargs in self._assert_formset_error_old_api_cases(
formset="formset",
form_index=0,
field="field",
errors=["other error"],
msg_prefix="Custom prefix",
):
with self.subTest(args=args, kwargs=kwargs):
with self.assertRaises(AssertionError):
self.assertFormSetError(*args, **kwargs)
class FirstUrls:
urlpatterns = [path("first/", empty_response, name="first")]
class SecondUrls:
urlpatterns = [path("second/", empty_response, name="second")]
class SetupTestEnvironmentTests(SimpleTestCase):
def test_setup_test_environment_calling_more_than_once(self):
with self.assertRaisesMessage(
RuntimeError, "setup_test_environment() was already called"
):
setup_test_environment()
def test_allowed_hosts(self):
for type_ in (list, tuple):
with self.subTest(type_=type_):
allowed_hosts = type_("*")
with mock.patch("django.test.utils._TestState") as x:
del x.saved_data
with self.settings(ALLOWED_HOSTS=allowed_hosts):
setup_test_environment()
self.assertEqual(settings.ALLOWED_HOSTS, ["*", "testserver"])
class OverrideSettingsTests(SimpleTestCase):
# #21518 -- If neither override_settings nor a setting_changed receiver
# clears the URL cache between tests, then one of test_first or
# test_second will fail.
@override_settings(ROOT_URLCONF=FirstUrls)
def test_urlconf_first(self):
reverse("first")
@override_settings(ROOT_URLCONF=SecondUrls)
def test_urlconf_second(self):
reverse("second")
def test_urlconf_cache(self):
with self.assertRaises(NoReverseMatch):
reverse("first")
with self.assertRaises(NoReverseMatch):
reverse("second")
with override_settings(ROOT_URLCONF=FirstUrls):
self.client.get(reverse("first"))
with self.assertRaises(NoReverseMatch):
reverse("second")
with override_settings(ROOT_URLCONF=SecondUrls):
with self.assertRaises(NoReverseMatch):
reverse("first")
self.client.get(reverse("second"))
self.client.get(reverse("first"))
with self.assertRaises(NoReverseMatch):
reverse("second")
with self.assertRaises(NoReverseMatch):
reverse("first")
with self.assertRaises(NoReverseMatch):
reverse("second")
def test_override_media_root(self):
"""
Overriding the MEDIA_ROOT setting should be reflected in the
base_location attribute of django.core.files.storage.default_storage.
"""
self.assertEqual(default_storage.base_location, "")
with self.settings(MEDIA_ROOT="test_value"):
self.assertEqual(default_storage.base_location, "test_value")
def test_override_media_url(self):
"""
Overriding the MEDIA_URL setting should be reflected in the
base_url attribute of django.core.files.storage.default_storage.
"""
self.assertEqual(default_storage.base_location, "")
with self.settings(MEDIA_URL="/test_value/"):
self.assertEqual(default_storage.base_url, "/test_value/")
def test_override_file_upload_permissions(self):
"""
Overriding the FILE_UPLOAD_PERMISSIONS setting should be reflected in
the file_permissions_mode attribute of
django.core.files.storage.default_storage.
"""
self.assertEqual(default_storage.file_permissions_mode, 0o644)
with self.settings(FILE_UPLOAD_PERMISSIONS=0o777):
self.assertEqual(default_storage.file_permissions_mode, 0o777)
def test_override_file_upload_directory_permissions(self):
"""
Overriding the FILE_UPLOAD_DIRECTORY_PERMISSIONS setting should be
reflected in the directory_permissions_mode attribute of
django.core.files.storage.default_storage.
"""
self.assertIsNone(default_storage.directory_permissions_mode)
with self.settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o777):
self.assertEqual(default_storage.directory_permissions_mode, 0o777)
def test_override_database_routers(self):
"""
Overriding DATABASE_ROUTERS should update the base router.
"""
test_routers = [object()]
with self.settings(DATABASE_ROUTERS=test_routers):
self.assertEqual(router.routers, test_routers)
def test_override_static_url(self):
"""
Overriding the STATIC_URL setting should be reflected in the
base_url attribute of
django.contrib.staticfiles.storage.staticfiles_storage.
"""
with self.settings(STATIC_URL="/test/"):
self.assertEqual(staticfiles_storage.base_url, "/test/")
def test_override_static_root(self):
"""
Overriding the STATIC_ROOT setting should be reflected in the
location attribute of
django.contrib.staticfiles.storage.staticfiles_storage.
"""
with self.settings(STATIC_ROOT="/tmp/test"):
self.assertEqual(staticfiles_storage.location, os.path.abspath("/tmp/test"))
def test_override_staticfiles_storage(self):
"""
Overriding the STATICFILES_STORAGE setting should be reflected in
the value of django.contrib.staticfiles.storage.staticfiles_storage.
"""
new_class = "ManifestStaticFilesStorage"
new_storage = "django.contrib.staticfiles.storage." + new_class
with self.settings(STATICFILES_STORAGE=new_storage):
self.assertEqual(staticfiles_storage.__class__.__name__, new_class)
def test_override_staticfiles_finders(self):
"""
Overriding the STATICFILES_FINDERS setting should be reflected in
the return value of django.contrib.staticfiles.finders.get_finders.
"""
current = get_finders()
self.assertGreater(len(list(current)), 1)
finders = ["django.contrib.staticfiles.finders.FileSystemFinder"]
with self.settings(STATICFILES_FINDERS=finders):
self.assertEqual(len(list(get_finders())), len(finders))
def test_override_staticfiles_dirs(self):
"""
Overriding the STATICFILES_DIRS setting should be reflected in
the locations attribute of the
django.contrib.staticfiles.finders.FileSystemFinder instance.
"""
finder = get_finder("django.contrib.staticfiles.finders.FileSystemFinder")
test_path = "/tmp/test"
expected_location = ("", test_path)
self.assertNotIn(expected_location, finder.locations)
with self.settings(STATICFILES_DIRS=[test_path]):
finder = get_finder("django.contrib.staticfiles.finders.FileSystemFinder")
self.assertIn(expected_location, finder.locations)
@skipUnlessDBFeature("supports_transactions")
class TestBadSetUpTestData(TestCase):
"""
An exception in setUpTestData() shouldn't leak a transaction which would
cascade across the rest of the test suite.
"""
class MyException(Exception):
pass
@classmethod
def setUpClass(cls):
try:
super().setUpClass()
except cls.MyException:
cls._in_atomic_block = connection.in_atomic_block
@classmethod
def tearDownClass(Cls):
# override to avoid a second cls._rollback_atomics() which would fail.
# Normal setUpClass() methods won't have exception handling so this
# method wouldn't typically be run.
pass
@classmethod
def setUpTestData(cls):
# Simulate a broken setUpTestData() method.
raise cls.MyException()
def test_failure_in_setUpTestData_should_rollback_transaction(self):
# setUpTestData() should call _rollback_atomics() so that the
# transaction doesn't leak.
self.assertFalse(self._in_atomic_block)
@skipUnlessDBFeature("supports_transactions")
class CaptureOnCommitCallbacksTests(TestCase):
databases = {"default", "other"}
callback_called = False
def enqueue_callback(self, using="default"):
def hook():
self.callback_called = True
transaction.on_commit(hook, using=using)
def test_no_arguments(self):
with self.captureOnCommitCallbacks() as callbacks:
self.enqueue_callback()
self.assertEqual(len(callbacks), 1)
self.assertIs(self.callback_called, False)
callbacks[0]()
self.assertIs(self.callback_called, True)
def test_using(self):
with self.captureOnCommitCallbacks(using="other") as callbacks:
self.enqueue_callback(using="other")
self.assertEqual(len(callbacks), 1)
self.assertIs(self.callback_called, False)
callbacks[0]()
self.assertIs(self.callback_called, True)
def test_different_using(self):
with self.captureOnCommitCallbacks(using="default") as callbacks:
self.enqueue_callback(using="other")
self.assertEqual(callbacks, [])
def test_execute(self):
with self.captureOnCommitCallbacks(execute=True) as callbacks:
self.enqueue_callback()
self.assertEqual(len(callbacks), 1)
self.assertIs(self.callback_called, True)
def test_pre_callback(self):
def pre_hook():
pass
transaction.on_commit(pre_hook, using="default")
with self.captureOnCommitCallbacks() as callbacks:
self.enqueue_callback()
self.assertEqual(len(callbacks), 1)
self.assertNotEqual(callbacks[0], pre_hook)
def test_with_rolled_back_savepoint(self):
with self.captureOnCommitCallbacks() as callbacks:
try:
with transaction.atomic():
self.enqueue_callback()
raise IntegrityError
except IntegrityError:
# Inner transaction.atomic() has been rolled back.
pass
self.assertEqual(callbacks, [])
def test_execute_recursive(self):
with self.captureOnCommitCallbacks(execute=True) as callbacks:
transaction.on_commit(self.enqueue_callback)
self.assertEqual(len(callbacks), 2)
self.assertIs(self.callback_called, True)
def test_execute_tree(self):
"""
A visualisation of the callback tree tested. Each node is expected to
be visited only once:
└─branch_1
├─branch_2
│ ├─leaf_1
│ └─leaf_2
└─leaf_3
"""
branch_1_call_counter = 0
branch_2_call_counter = 0
leaf_1_call_counter = 0
leaf_2_call_counter = 0
leaf_3_call_counter = 0
def leaf_1():
nonlocal leaf_1_call_counter
leaf_1_call_counter += 1
def leaf_2():
nonlocal leaf_2_call_counter
leaf_2_call_counter += 1
def leaf_3():
nonlocal leaf_3_call_counter
leaf_3_call_counter += 1
def branch_1():
nonlocal branch_1_call_counter
branch_1_call_counter += 1
transaction.on_commit(branch_2)
transaction.on_commit(leaf_3)
def branch_2():
nonlocal branch_2_call_counter
branch_2_call_counter += 1
transaction.on_commit(leaf_1)
transaction.on_commit(leaf_2)
with self.captureOnCommitCallbacks(execute=True) as callbacks:
transaction.on_commit(branch_1)
self.assertEqual(branch_1_call_counter, 1)
self.assertEqual(branch_2_call_counter, 1)
self.assertEqual(leaf_1_call_counter, 1)
self.assertEqual(leaf_2_call_counter, 1)
self.assertEqual(leaf_3_call_counter, 1)
self.assertEqual(callbacks, [branch_1, branch_2, leaf_3, leaf_1, leaf_2])
def test_execute_robust(self):
class MyException(Exception):
pass
def hook():
self.callback_called = True
raise MyException("robust callback")
with self.assertLogs("django.test", "ERROR") as cm:
with self.captureOnCommitCallbacks(execute=True) as callbacks:
transaction.on_commit(hook, robust=True)
self.assertEqual(len(callbacks), 1)
self.assertIs(self.callback_called, True)
log_record = cm.records[0]
self.assertEqual(
log_record.getMessage(),
"Error calling CaptureOnCommitCallbacksTests.test_execute_robust.<locals>."
"hook in on_commit() (robust callback).",
)
self.assertIsNotNone(log_record.exc_info)
raised_exception = log_record.exc_info[1]
self.assertIsInstance(raised_exception, MyException)
self.assertEqual(str(raised_exception), "robust callback")
class DisallowedDatabaseQueriesTests(SimpleTestCase):
def test_disallowed_database_connections(self):
expected_message = (
"Database connections to 'default' are not allowed in SimpleTestCase "
"subclasses. Either subclass TestCase or TransactionTestCase to "
"ensure proper test isolation or add 'default' to "
"test_utils.tests.DisallowedDatabaseQueriesTests.databases to "
"silence this failure."
)
with self.assertRaisesMessage(DatabaseOperationForbidden, expected_message):
connection.connect()
with self.assertRaisesMessage(DatabaseOperationForbidden, expected_message):
connection.temporary_connection()
def test_disallowed_database_queries(self):
expected_message = (
"Database queries to 'default' are not allowed in SimpleTestCase "
"subclasses. Either subclass TestCase or TransactionTestCase to "
"ensure proper test isolation or add 'default' to "
"test_utils.tests.DisallowedDatabaseQueriesTests.databases to "
"silence this failure."
)
with self.assertRaisesMessage(DatabaseOperationForbidden, expected_message):
Car.objects.first()
def test_disallowed_database_chunked_cursor_queries(self):
expected_message = (
"Database queries to 'default' are not allowed in SimpleTestCase "
"subclasses. Either subclass TestCase or TransactionTestCase to "
"ensure proper test isolation or add 'default' to "
"test_utils.tests.DisallowedDatabaseQueriesTests.databases to "
"silence this failure."
)
with self.assertRaisesMessage(DatabaseOperationForbidden, expected_message):
next(Car.objects.iterator())
class AllowedDatabaseQueriesTests(SimpleTestCase):
databases = {"default"}
def test_allowed_database_queries(self):
Car.objects.first()
def test_allowed_database_chunked_cursor_queries(self):
next(Car.objects.iterator(), None)
class DatabaseAliasTests(SimpleTestCase):
def setUp(self):
self.addCleanup(setattr, self.__class__, "databases", self.databases)
def test_no_close_match(self):
self.__class__.databases = {"void"}
message = (
"test_utils.tests.DatabaseAliasTests.databases refers to 'void' which is "
"not defined in settings.DATABASES."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
self._validate_databases()
def test_close_match(self):
self.__class__.databases = {"defualt"}
message = (
"test_utils.tests.DatabaseAliasTests.databases refers to 'defualt' which "
"is not defined in settings.DATABASES. Did you mean 'default'?"
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
self._validate_databases()
def test_match(self):
self.__class__.databases = {"default", "other"}
self.assertEqual(self._validate_databases(), frozenset({"default", "other"}))
def test_all(self):
self.__class__.databases = "__all__"
self.assertEqual(self._validate_databases(), frozenset(connections))
@isolate_apps("test_utils", attr_name="class_apps")
class IsolatedAppsTests(SimpleTestCase):
def test_installed_apps(self):
self.assertEqual(
[app_config.label for app_config in self.class_apps.get_app_configs()],
["test_utils"],
)
def test_class_decoration(self):
class ClassDecoration(models.Model):
pass
self.assertEqual(ClassDecoration._meta.apps, self.class_apps)
@isolate_apps("test_utils", kwarg_name="method_apps")
def test_method_decoration(self, method_apps):
class MethodDecoration(models.Model):
pass
self.assertEqual(MethodDecoration._meta.apps, method_apps)
def test_context_manager(self):
with isolate_apps("test_utils") as context_apps:
class ContextManager(models.Model):
pass
self.assertEqual(ContextManager._meta.apps, context_apps)
@isolate_apps("test_utils", kwarg_name="method_apps")
def test_nested(self, method_apps):
class MethodDecoration(models.Model):
pass
with isolate_apps("test_utils") as context_apps:
class ContextManager(models.Model):
pass
with isolate_apps("test_utils") as nested_context_apps:
class NestedContextManager(models.Model):
pass
self.assertEqual(MethodDecoration._meta.apps, method_apps)
self.assertEqual(ContextManager._meta.apps, context_apps)
self.assertEqual(NestedContextManager._meta.apps, nested_context_apps)
class DoNothingDecorator(TestContextDecorator):
def enable(self):
pass
def disable(self):
pass
class TestContextDecoratorTests(SimpleTestCase):
@mock.patch.object(DoNothingDecorator, "disable")
def test_exception_in_setup(self, mock_disable):
"""An exception is setUp() is reraised after disable() is called."""
class ExceptionInSetUp(unittest.TestCase):
def setUp(self):
raise NotImplementedError("reraised")
decorator = DoNothingDecorator()
decorated_test_class = decorator.__call__(ExceptionInSetUp)()
self.assertFalse(mock_disable.called)
with self.assertRaisesMessage(NotImplementedError, "reraised"):
decorated_test_class.setUp()
decorated_test_class.doCleanups()
self.assertTrue(mock_disable.called)
def test_cleanups_run_after_tearDown(self):
calls = []
class SaveCallsDecorator(TestContextDecorator):
def enable(self):
calls.append("enable")
def disable(self):
calls.append("disable")
class AddCleanupInSetUp(unittest.TestCase):
def setUp(self):
calls.append("setUp")
self.addCleanup(lambda: calls.append("cleanup"))
decorator = SaveCallsDecorator()
decorated_test_class = decorator.__call__(AddCleanupInSetUp)()
decorated_test_class.setUp()
decorated_test_class.tearDown()
decorated_test_class.doCleanups()
self.assertEqual(calls, ["enable", "setUp", "cleanup", "disable"])
|
50fe8b63d0f66751ccf5f378cd14a74917650f02f0ac07bde585b710c699b6e6 | import operator
from django.db import DatabaseError, NotSupportedError, connection
from django.db.models import Exists, F, IntegerField, OuterRef, Subquery, Value
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import CaptureQueriesContext
from .models import Author, Celebrity, ExtraInfo, Number, ReservedName
@skipUnlessDBFeature("supports_select_union")
class QuerySetSetOperationTests(TestCase):
@classmethod
def setUpTestData(cls):
Number.objects.bulk_create(Number(num=i, other_num=10 - i) for i in range(10))
def assertNumbersEqual(self, queryset, expected_numbers, ordered=True):
self.assertQuerySetEqual(
queryset, expected_numbers, operator.attrgetter("num"), ordered
)
def test_simple_union(self):
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=8)
qs3 = Number.objects.filter(num=5)
self.assertNumbersEqual(qs1.union(qs2, qs3), [0, 1, 5, 8, 9], ordered=False)
@skipUnlessDBFeature("supports_select_intersection")
def test_simple_intersection(self):
qs1 = Number.objects.filter(num__lte=5)
qs2 = Number.objects.filter(num__gte=5)
qs3 = Number.objects.filter(num__gte=4, num__lte=6)
self.assertNumbersEqual(qs1.intersection(qs2, qs3), [5], ordered=False)
@skipUnlessDBFeature("supports_select_intersection")
def test_intersection_with_values(self):
ReservedName.objects.create(name="a", order=2)
qs1 = ReservedName.objects.all()
reserved_name = qs1.intersection(qs1).values("name", "order", "id").get()
self.assertEqual(reserved_name["name"], "a")
self.assertEqual(reserved_name["order"], 2)
reserved_name = qs1.intersection(qs1).values_list("name", "order", "id").get()
self.assertEqual(reserved_name[:2], ("a", 2))
@skipUnlessDBFeature("supports_select_difference")
def test_simple_difference(self):
qs1 = Number.objects.filter(num__lte=5)
qs2 = Number.objects.filter(num__lte=4)
self.assertNumbersEqual(qs1.difference(qs2), [5], ordered=False)
def test_union_distinct(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
self.assertEqual(len(list(qs1.union(qs2, all=True))), 20)
self.assertEqual(len(list(qs1.union(qs2))), 10)
def test_union_none(self):
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=8)
qs3 = qs1.union(qs2)
self.assertSequenceEqual(qs3.none(), [])
self.assertNumbersEqual(qs3, [0, 1, 8, 9], ordered=False)
def test_union_none_slice(self):
qs1 = Number.objects.filter(num__lte=0)
qs2 = Number.objects.none()
qs3 = qs1.union(qs2)
self.assertNumbersEqual(qs3[:1], [0])
def test_union_empty_filter_slice(self):
qs1 = Number.objects.filter(num__lte=0)
qs2 = Number.objects.filter(pk__in=[])
qs3 = qs1.union(qs2)
self.assertNumbersEqual(qs3[:1], [0])
@skipUnlessDBFeature("supports_slicing_ordering_in_compound")
def test_union_slice_compound_empty(self):
qs1 = Number.objects.filter(num__lte=0)[:1]
qs2 = Number.objects.none()
qs3 = qs1.union(qs2)
self.assertNumbersEqual(qs3[:1], [0])
@skipUnlessDBFeature("supports_slicing_ordering_in_compound")
def test_union_combined_slice_compound_empty(self):
qs1 = Number.objects.filter(num__lte=2)[:3]
qs2 = Number.objects.none()
qs3 = qs1.union(qs2)
self.assertNumbersEqual(qs3.order_by("num")[2:3], [2])
def test_union_order_with_null_first_last(self):
Number.objects.filter(other_num=5).update(other_num=None)
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=2)
qs3 = qs1.union(qs2)
self.assertSequenceEqual(
qs3.order_by(
F("other_num").asc(nulls_first=True),
).values_list("other_num", flat=True),
[None, 1, 2, 3, 4, 6, 7, 8, 9, 10],
)
self.assertSequenceEqual(
qs3.order_by(
F("other_num").asc(nulls_last=True),
).values_list("other_num", flat=True),
[1, 2, 3, 4, 6, 7, 8, 9, 10, None],
)
@skipUnlessDBFeature("supports_select_intersection")
def test_intersection_with_empty_qs(self):
qs1 = Number.objects.all()
qs2 = Number.objects.none()
qs3 = Number.objects.filter(pk__in=[])
self.assertEqual(len(qs1.intersection(qs2)), 0)
self.assertEqual(len(qs1.intersection(qs3)), 0)
self.assertEqual(len(qs2.intersection(qs1)), 0)
self.assertEqual(len(qs3.intersection(qs1)), 0)
self.assertEqual(len(qs2.intersection(qs2)), 0)
self.assertEqual(len(qs3.intersection(qs3)), 0)
@skipUnlessDBFeature("supports_select_difference")
def test_difference_with_empty_qs(self):
qs1 = Number.objects.all()
qs2 = Number.objects.none()
qs3 = Number.objects.filter(pk__in=[])
self.assertEqual(len(qs1.difference(qs2)), 10)
self.assertEqual(len(qs1.difference(qs3)), 10)
self.assertEqual(len(qs2.difference(qs1)), 0)
self.assertEqual(len(qs3.difference(qs1)), 0)
self.assertEqual(len(qs2.difference(qs2)), 0)
self.assertEqual(len(qs3.difference(qs3)), 0)
@skipUnlessDBFeature("supports_select_difference")
def test_difference_with_values(self):
ReservedName.objects.create(name="a", order=2)
qs1 = ReservedName.objects.all()
qs2 = ReservedName.objects.none()
reserved_name = qs1.difference(qs2).values("name", "order", "id").get()
self.assertEqual(reserved_name["name"], "a")
self.assertEqual(reserved_name["order"], 2)
reserved_name = qs1.difference(qs2).values_list("name", "order", "id").get()
self.assertEqual(reserved_name[:2], ("a", 2))
def test_union_with_empty_qs(self):
qs1 = Number.objects.all()
qs2 = Number.objects.none()
qs3 = Number.objects.filter(pk__in=[])
self.assertEqual(len(qs1.union(qs2)), 10)
self.assertEqual(len(qs2.union(qs1)), 10)
self.assertEqual(len(qs1.union(qs3)), 10)
self.assertEqual(len(qs3.union(qs1)), 10)
self.assertEqual(len(qs2.union(qs1, qs1, qs1)), 10)
self.assertEqual(len(qs2.union(qs1, qs1, all=True)), 20)
self.assertEqual(len(qs2.union(qs2)), 0)
self.assertEqual(len(qs3.union(qs3)), 0)
def test_empty_qs_union_with_ordered_qs(self):
qs1 = Number.objects.order_by("num")
qs2 = Number.objects.none().union(qs1).order_by("num")
self.assertEqual(list(qs1), list(qs2))
def test_limits(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
self.assertEqual(len(list(qs1.union(qs2)[:2])), 2)
def test_ordering(self):
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=2, num__lte=3)
self.assertNumbersEqual(qs1.union(qs2).order_by("-num"), [3, 2, 1, 0])
def test_ordering_by_alias(self):
qs1 = Number.objects.filter(num__lte=1).values(alias=F("num"))
qs2 = Number.objects.filter(num__gte=2, num__lte=3).values(alias=F("num"))
self.assertQuerySetEqual(
qs1.union(qs2).order_by("-alias"),
[3, 2, 1, 0],
operator.itemgetter("alias"),
)
def test_ordering_by_f_expression(self):
qs1 = Number.objects.filter(num__lte=1)
qs2 = Number.objects.filter(num__gte=2, num__lte=3)
self.assertNumbersEqual(qs1.union(qs2).order_by(F("num").desc()), [3, 2, 1, 0])
def test_ordering_by_f_expression_and_alias(self):
qs1 = Number.objects.filter(num__lte=1).values(alias=F("other_num"))
qs2 = Number.objects.filter(num__gte=2, num__lte=3).values(alias=F("other_num"))
self.assertQuerySetEqual(
qs1.union(qs2).order_by(F("alias").desc()),
[10, 9, 8, 7],
operator.itemgetter("alias"),
)
Number.objects.create(num=-1)
self.assertQuerySetEqual(
qs1.union(qs2).order_by(F("alias").desc(nulls_last=True)),
[10, 9, 8, 7, None],
operator.itemgetter("alias"),
)
def test_union_with_values(self):
ReservedName.objects.create(name="a", order=2)
qs1 = ReservedName.objects.all()
reserved_name = qs1.union(qs1).values("name", "order", "id").get()
self.assertEqual(reserved_name["name"], "a")
self.assertEqual(reserved_name["order"], 2)
reserved_name = qs1.union(qs1).values_list("name", "order", "id").get()
self.assertEqual(reserved_name[:2], ("a", 2))
# List of columns can be changed.
reserved_name = qs1.union(qs1).values_list("order").get()
self.assertEqual(reserved_name, (2,))
def test_union_with_two_annotated_values_list(self):
qs1 = (
Number.objects.filter(num=1)
.annotate(
count=Value(0, IntegerField()),
)
.values_list("num", "count")
)
qs2 = (
Number.objects.filter(num=2)
.values("pk")
.annotate(
count=F("num"),
)
.annotate(
num=Value(1, IntegerField()),
)
.values_list("num", "count")
)
self.assertCountEqual(qs1.union(qs2), [(1, 0), (2, 1)])
def test_union_with_extra_and_values_list(self):
qs1 = (
Number.objects.filter(num=1)
.extra(
select={"count": 0},
)
.values_list("num", "count")
)
qs2 = Number.objects.filter(num=2).extra(select={"count": 1})
self.assertCountEqual(qs1.union(qs2), [(1, 0), (2, 1)])
def test_union_with_values_list_on_annotated_and_unannotated(self):
ReservedName.objects.create(name="rn1", order=1)
qs1 = Number.objects.annotate(
has_reserved_name=Exists(ReservedName.objects.filter(order=OuterRef("num")))
).filter(has_reserved_name=True)
qs2 = Number.objects.filter(num=9)
self.assertCountEqual(qs1.union(qs2).values_list("num", flat=True), [1, 9])
def test_union_with_values_list_and_order(self):
ReservedName.objects.bulk_create(
[
ReservedName(name="rn1", order=7),
ReservedName(name="rn2", order=5),
ReservedName(name="rn0", order=6),
ReservedName(name="rn9", order=-1),
]
)
qs1 = ReservedName.objects.filter(order__gte=6)
qs2 = ReservedName.objects.filter(order__lte=5)
union_qs = qs1.union(qs2)
for qs, expected_result in (
# Order by a single column.
(union_qs.order_by("-pk").values_list("order", flat=True), [-1, 6, 5, 7]),
(union_qs.order_by("pk").values_list("order", flat=True), [7, 5, 6, -1]),
(union_qs.values_list("order", flat=True).order_by("-pk"), [-1, 6, 5, 7]),
(union_qs.values_list("order", flat=True).order_by("pk"), [7, 5, 6, -1]),
# Order by multiple columns.
(
union_qs.order_by("-name", "pk").values_list("order", flat=True),
[-1, 5, 7, 6],
),
(
union_qs.values_list("order", flat=True).order_by("-name", "pk"),
[-1, 5, 7, 6],
),
):
with self.subTest(qs=qs):
self.assertEqual(list(qs), expected_result)
def test_union_with_values_list_and_order_on_annotation(self):
qs1 = Number.objects.annotate(
annotation=Value(-1),
multiplier=F("annotation"),
).filter(num__gte=6)
qs2 = Number.objects.annotate(
annotation=Value(2),
multiplier=F("annotation"),
).filter(num__lte=5)
self.assertSequenceEqual(
qs1.union(qs2).order_by("annotation", "num").values_list("num", flat=True),
[6, 7, 8, 9, 0, 1, 2, 3, 4, 5],
)
self.assertQuerySetEqual(
qs1.union(qs2)
.order_by(
F("annotation") * F("multiplier"),
"num",
)
.values("num"),
[6, 7, 8, 9, 0, 1, 2, 3, 4, 5],
operator.itemgetter("num"),
)
def test_union_with_select_related_and_order(self):
e1 = ExtraInfo.objects.create(value=7, info="e1")
a1 = Author.objects.create(name="a1", num=1, extra=e1)
a2 = Author.objects.create(name="a2", num=3, extra=e1)
Author.objects.create(name="a3", num=2, extra=e1)
base_qs = Author.objects.select_related("extra").order_by()
qs1 = base_qs.filter(name="a1")
qs2 = base_qs.filter(name="a2")
self.assertSequenceEqual(qs1.union(qs2).order_by("pk"), [a1, a2])
@skipUnlessDBFeature("supports_slicing_ordering_in_compound")
def test_union_with_select_related_and_first(self):
e1 = ExtraInfo.objects.create(value=7, info="e1")
a1 = Author.objects.create(name="a1", num=1, extra=e1)
Author.objects.create(name="a2", num=3, extra=e1)
base_qs = Author.objects.select_related("extra")
qs1 = base_qs.filter(name="a1")
qs2 = base_qs.filter(name="a2")
self.assertEqual(qs1.union(qs2).first(), a1)
def test_union_with_first(self):
e1 = ExtraInfo.objects.create(value=7, info="e1")
a1 = Author.objects.create(name="a1", num=1, extra=e1)
base_qs = Author.objects.order_by()
qs1 = base_qs.filter(name="a1")
qs2 = base_qs.filter(name="a2")
self.assertEqual(qs1.union(qs2).first(), a1)
def test_union_multiple_models_with_values_list_and_order(self):
reserved_name = ReservedName.objects.create(name="rn1", order=0)
qs1 = Celebrity.objects.all()
qs2 = ReservedName.objects.all()
self.assertSequenceEqual(
qs1.union(qs2).order_by("name").values_list("pk", flat=True),
[reserved_name.pk],
)
def test_union_multiple_models_with_values_list_and_order_by_extra_select(self):
reserved_name = ReservedName.objects.create(name="rn1", order=0)
qs1 = Celebrity.objects.extra(select={"extra_name": "name"})
qs2 = ReservedName.objects.extra(select={"extra_name": "name"})
self.assertSequenceEqual(
qs1.union(qs2).order_by("extra_name").values_list("pk", flat=True),
[reserved_name.pk],
)
def test_union_in_subquery(self):
ReservedName.objects.bulk_create(
[
ReservedName(name="rn1", order=8),
ReservedName(name="rn2", order=1),
ReservedName(name="rn3", order=5),
]
)
qs1 = Number.objects.filter(num__gt=7, num=OuterRef("order"))
qs2 = Number.objects.filter(num__lt=2, num=OuterRef("order"))
self.assertCountEqual(
ReservedName.objects.annotate(
number=Subquery(qs1.union(qs2).values("num")),
)
.filter(number__isnull=False)
.values_list("order", flat=True),
[8, 1],
)
def test_union_in_subquery_related_outerref(self):
e1 = ExtraInfo.objects.create(value=7, info="e3")
e2 = ExtraInfo.objects.create(value=5, info="e2")
e3 = ExtraInfo.objects.create(value=1, info="e1")
Author.objects.bulk_create(
[
Author(name="a1", num=1, extra=e1),
Author(name="a2", num=3, extra=e2),
Author(name="a3", num=2, extra=e3),
]
)
qs1 = ExtraInfo.objects.order_by().filter(value=OuterRef("num"))
qs2 = ExtraInfo.objects.order_by().filter(value__lt=OuterRef("extra__value"))
qs = (
Author.objects.annotate(
info=Subquery(qs1.union(qs2).values("info")[:1]),
)
.filter(info__isnull=False)
.values_list("name", flat=True)
)
self.assertCountEqual(qs, ["a1", "a2"])
# Combined queries don't mutate.
self.assertCountEqual(qs, ["a1", "a2"])
@skipUnlessDBFeature("supports_slicing_ordering_in_compound")
def test_union_in_with_ordering(self):
qs1 = Number.objects.filter(num__gt=7).order_by("num")
qs2 = Number.objects.filter(num__lt=2).order_by("num")
self.assertNumbersEqual(
Number.objects.exclude(id__in=qs1.union(qs2).values("id")),
[2, 3, 4, 5, 6, 7],
ordered=False,
)
@skipUnlessDBFeature(
"supports_slicing_ordering_in_compound", "allow_sliced_subqueries_with_in"
)
def test_union_in_with_ordering_and_slice(self):
qs1 = Number.objects.filter(num__gt=7).order_by("num")[:1]
qs2 = Number.objects.filter(num__lt=2).order_by("-num")[:1]
self.assertNumbersEqual(
Number.objects.exclude(id__in=qs1.union(qs2).values("id")),
[0, 2, 3, 4, 5, 6, 7, 9],
ordered=False,
)
def test_count_union(self):
qs1 = Number.objects.filter(num__lte=1).values("num")
qs2 = Number.objects.filter(num__gte=2, num__lte=3).values("num")
self.assertEqual(qs1.union(qs2).count(), 4)
def test_count_union_empty_result(self):
qs = Number.objects.filter(pk__in=[])
self.assertEqual(qs.union(qs).count(), 0)
def test_count_union_with_select_related(self):
e1 = ExtraInfo.objects.create(value=1, info="e1")
Author.objects.create(name="a1", num=1, extra=e1)
qs = Author.objects.select_related("extra").order_by()
self.assertEqual(qs.union(qs).count(), 1)
@skipUnlessDBFeature("supports_select_difference")
def test_count_difference(self):
qs1 = Number.objects.filter(num__lt=10)
qs2 = Number.objects.filter(num__lt=9)
self.assertEqual(qs1.difference(qs2).count(), 1)
@skipUnlessDBFeature("supports_select_intersection")
def test_count_intersection(self):
qs1 = Number.objects.filter(num__gte=5)
qs2 = Number.objects.filter(num__lte=5)
self.assertEqual(qs1.intersection(qs2).count(), 1)
def test_exists_union(self):
qs1 = Number.objects.filter(num__gte=5)
qs2 = Number.objects.filter(num__lte=5)
with CaptureQueriesContext(connection) as context:
self.assertIs(qs1.union(qs2).exists(), True)
captured_queries = context.captured_queries
self.assertEqual(len(captured_queries), 1)
captured_sql = captured_queries[0]["sql"]
self.assertNotIn(
connection.ops.quote_name(Number._meta.pk.column),
captured_sql,
)
self.assertEqual(
captured_sql.count(connection.ops.limit_offset_sql(None, 1)),
3 if connection.features.supports_slicing_ordering_in_compound else 1,
)
def test_exists_union_empty_result(self):
qs = Number.objects.filter(pk__in=[])
self.assertIs(qs.union(qs).exists(), False)
@skipUnlessDBFeature("supports_select_intersection")
def test_exists_intersection(self):
qs1 = Number.objects.filter(num__gt=5)
qs2 = Number.objects.filter(num__lt=5)
self.assertIs(qs1.intersection(qs1).exists(), True)
self.assertIs(qs1.intersection(qs2).exists(), False)
@skipUnlessDBFeature("supports_select_difference")
def test_exists_difference(self):
qs1 = Number.objects.filter(num__gte=5)
qs2 = Number.objects.filter(num__gte=3)
self.assertIs(qs1.difference(qs2).exists(), False)
self.assertIs(qs2.difference(qs1).exists(), True)
def test_get_union(self):
qs = Number.objects.filter(num=2)
self.assertEqual(qs.union(qs).get().num, 2)
@skipUnlessDBFeature("supports_select_difference")
def test_get_difference(self):
qs1 = Number.objects.all()
qs2 = Number.objects.exclude(num=2)
self.assertEqual(qs1.difference(qs2).get().num, 2)
@skipUnlessDBFeature("supports_select_intersection")
def test_get_intersection(self):
qs1 = Number.objects.all()
qs2 = Number.objects.filter(num=2)
self.assertEqual(qs1.intersection(qs2).get().num, 2)
@skipUnlessDBFeature("supports_slicing_ordering_in_compound")
def test_ordering_subqueries(self):
qs1 = Number.objects.order_by("num")[:2]
qs2 = Number.objects.order_by("-num")[:2]
self.assertNumbersEqual(qs1.union(qs2).order_by("-num")[:4], [9, 8, 1, 0])
@skipIfDBFeature("supports_slicing_ordering_in_compound")
def test_unsupported_ordering_slicing_raises_db_error(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
qs3 = Number.objects.all()
msg = "LIMIT/OFFSET not allowed in subqueries of compound statements"
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2[:10]))
msg = "ORDER BY not allowed in subqueries of compound statements"
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.order_by("id").union(qs2))
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by("id").union(qs3))
@skipIfDBFeature("supports_select_intersection")
def test_unsupported_intersection_raises_db_error(self):
qs1 = Number.objects.all()
qs2 = Number.objects.all()
msg = "intersection is not supported on this database backend"
with self.assertRaisesMessage(NotSupportedError, msg):
list(qs1.intersection(qs2))
def test_combining_multiple_models(self):
ReservedName.objects.create(name="99 little bugs", order=99)
qs1 = Number.objects.filter(num=1).values_list("num", flat=True)
qs2 = ReservedName.objects.values_list("order")
self.assertEqual(list(qs1.union(qs2).order_by("num")), [1, 99])
def test_order_raises_on_non_selected_column(self):
qs1 = (
Number.objects.filter()
.annotate(
annotation=Value(1, IntegerField()),
)
.values("annotation", num2=F("num"))
)
qs2 = Number.objects.filter().values("id", "num")
# Should not raise
list(qs1.union(qs2).order_by("annotation"))
list(qs1.union(qs2).order_by("num2"))
msg = "ORDER BY term does not match any column in the result set"
# 'id' is not part of the select
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by("id"))
# 'num' got realiased to num2
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by("num"))
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by(F("num")))
with self.assertRaisesMessage(DatabaseError, msg):
list(qs1.union(qs2).order_by(F("num").desc()))
# switched order, now 'exists' again:
list(qs2.union(qs1).order_by("num"))
@skipUnlessDBFeature("supports_select_difference", "supports_select_intersection")
def test_qs_with_subcompound_qs(self):
qs1 = Number.objects.all()
qs2 = Number.objects.intersection(Number.objects.filter(num__gt=1))
self.assertEqual(qs1.difference(qs2).count(), 2)
def test_order_by_same_type(self):
qs = Number.objects.all()
union = qs.union(qs)
numbers = list(range(10))
self.assertNumbersEqual(union.order_by("num"), numbers)
self.assertNumbersEqual(union.order_by("other_num"), reversed(numbers))
def test_unsupported_operations_on_combined_qs(self):
qs = Number.objects.all()
msg = "Calling QuerySet.%s() after %s() is not supported."
combinators = ["union"]
if connection.features.supports_select_difference:
combinators.append("difference")
if connection.features.supports_select_intersection:
combinators.append("intersection")
for combinator in combinators:
for operation in (
"alias",
"annotate",
"defer",
"delete",
"distinct",
"exclude",
"extra",
"filter",
"only",
"prefetch_related",
"select_related",
"update",
):
with self.subTest(combinator=combinator, operation=operation):
with self.assertRaisesMessage(
NotSupportedError,
msg % (operation, combinator),
):
getattr(getattr(qs, combinator)(qs), operation)()
with self.assertRaisesMessage(
NotSupportedError,
msg % ("contains", combinator),
):
obj = Number.objects.first()
getattr(qs, combinator)(qs).contains(obj)
def test_get_with_filters_unsupported_on_combined_qs(self):
qs = Number.objects.all()
msg = "Calling QuerySet.get(...) with filters after %s() is not supported."
combinators = ["union"]
if connection.features.supports_select_difference:
combinators.append("difference")
if connection.features.supports_select_intersection:
combinators.append("intersection")
for combinator in combinators:
with self.subTest(combinator=combinator):
with self.assertRaisesMessage(NotSupportedError, msg % combinator):
getattr(qs, combinator)(qs).get(num=2)
def test_operator_on_combined_qs_error(self):
qs = Number.objects.all()
msg = "Cannot use %s operator with combined queryset."
combinators = ["union"]
if connection.features.supports_select_difference:
combinators.append("difference")
if connection.features.supports_select_intersection:
combinators.append("intersection")
operators = [
("|", operator.or_),
("&", operator.and_),
("^", operator.xor),
]
for combinator in combinators:
combined_qs = getattr(qs, combinator)(qs)
for operator_, operator_func in operators:
with self.subTest(combinator=combinator):
with self.assertRaisesMessage(TypeError, msg % operator_):
operator_func(qs, combined_qs)
with self.assertRaisesMessage(TypeError, msg % operator_):
operator_func(combined_qs, qs)
|
c4300b49a55da82a52f12a8617d397ae8a24597537d96b1d7d02324fc88fd16c | import base64
import os
import shutil
import string
import tempfile
import unittest
from datetime import timedelta
from http import cookies
from pathlib import Path
from unittest import mock
from django.conf import settings
from django.contrib.sessions.backends.base import UpdateError
from django.contrib.sessions.backends.cache import SessionStore as CacheSession
from django.contrib.sessions.backends.cached_db import SessionStore as CacheDBSession
from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
from django.contrib.sessions.backends.file import SessionStore as FileSession
from django.contrib.sessions.backends.signed_cookies import (
SessionStore as CookieSession,
)
from django.contrib.sessions.exceptions import InvalidSessionKey, SessionInterrupted
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sessions.models import Session
from django.contrib.sessions.serializers import JSONSerializer
from django.core import management
from django.core.cache import caches
from django.core.cache.backends.base import InvalidCacheBackendError
from django.core.exceptions import ImproperlyConfigured
from django.core.signing import TimestampSigner
from django.http import HttpResponse
from django.test import (
RequestFactory,
SimpleTestCase,
TestCase,
ignore_warnings,
override_settings,
)
from django.utils import timezone
from .models import SessionStore as CustomDatabaseSession
class SessionTestsMixin:
# This does not inherit from TestCase to avoid any tests being run with this
# class, which wouldn't work, and to allow different TestCase subclasses to
# be used.
backend = None # subclasses must specify
def setUp(self):
self.session = self.backend()
def tearDown(self):
# NB: be careful to delete any sessions created; stale sessions fill up
# the /tmp (with some backends) and eventually overwhelm it after lots
# of runs (think buildbots)
self.session.delete()
def test_new_session(self):
self.assertIs(self.session.modified, False)
self.assertIs(self.session.accessed, False)
def test_get_empty(self):
self.assertIsNone(self.session.get("cat"))
def test_store(self):
self.session["cat"] = "dog"
self.assertIs(self.session.modified, True)
self.assertEqual(self.session.pop("cat"), "dog")
def test_pop(self):
self.session["some key"] = "exists"
# Need to reset these to pretend we haven't accessed it:
self.accessed = False
self.modified = False
self.assertEqual(self.session.pop("some key"), "exists")
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
self.assertIsNone(self.session.get("some key"))
def test_pop_default(self):
self.assertEqual(
self.session.pop("some key", "does not exist"), "does not exist"
)
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_pop_default_named_argument(self):
self.assertEqual(
self.session.pop("some key", default="does not exist"), "does not exist"
)
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_pop_no_default_keyerror_raised(self):
with self.assertRaises(KeyError):
self.session.pop("some key")
def test_setdefault(self):
self.assertEqual(self.session.setdefault("foo", "bar"), "bar")
self.assertEqual(self.session.setdefault("foo", "baz"), "bar")
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
def test_update(self):
self.session.update({"update key": 1})
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
self.assertEqual(self.session.get("update key", None), 1)
def test_has_key(self):
self.session["some key"] = 1
self.session.modified = False
self.session.accessed = False
self.assertIn("some key", self.session)
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_values(self):
self.assertEqual(list(self.session.values()), [])
self.assertIs(self.session.accessed, True)
self.session["some key"] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.values()), [1])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_keys(self):
self.session["x"] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.keys()), ["x"])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_items(self):
self.session["x"] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.items()), [("x", 1)])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, False)
def test_clear(self):
self.session["x"] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.items()), [("x", 1)])
self.session.clear()
self.assertEqual(list(self.session.items()), [])
self.assertIs(self.session.accessed, True)
self.assertIs(self.session.modified, True)
def test_save(self):
self.session.save()
self.assertIs(self.session.exists(self.session.session_key), True)
def test_delete(self):
self.session.save()
self.session.delete(self.session.session_key)
self.assertIs(self.session.exists(self.session.session_key), False)
def test_flush(self):
self.session["foo"] = "bar"
self.session.save()
prev_key = self.session.session_key
self.session.flush()
self.assertIs(self.session.exists(prev_key), False)
self.assertNotEqual(self.session.session_key, prev_key)
self.assertIsNone(self.session.session_key)
self.assertIs(self.session.modified, True)
self.assertIs(self.session.accessed, True)
def test_cycle(self):
self.session["a"], self.session["b"] = "c", "d"
self.session.save()
prev_key = self.session.session_key
prev_data = list(self.session.items())
self.session.cycle_key()
self.assertIs(self.session.exists(prev_key), False)
self.assertNotEqual(self.session.session_key, prev_key)
self.assertEqual(list(self.session.items()), prev_data)
def test_cycle_with_no_session_cache(self):
self.session["a"], self.session["b"] = "c", "d"
self.session.save()
prev_data = self.session.items()
self.session = self.backend(self.session.session_key)
self.assertIs(hasattr(self.session, "_session_cache"), False)
self.session.cycle_key()
self.assertCountEqual(self.session.items(), prev_data)
def test_save_doesnt_clear_data(self):
self.session["a"] = "b"
self.session.save()
self.assertEqual(self.session["a"], "b")
def test_invalid_key(self):
# Submitting an invalid session key (either by guessing, or if the db has
# removed the key) results in a new key being generated.
try:
session = self.backend("1")
session.save()
self.assertNotEqual(session.session_key, "1")
self.assertIsNone(session.get("cat"))
session.delete()
finally:
# Some backends leave a stale cache entry for the invalid
# session key; make sure that entry is manually deleted
session.delete("1")
def test_session_key_empty_string_invalid(self):
"""Falsey values (Such as an empty string) are rejected."""
self.session._session_key = ""
self.assertIsNone(self.session.session_key)
def test_session_key_too_short_invalid(self):
"""Strings shorter than 8 characters are rejected."""
self.session._session_key = "1234567"
self.assertIsNone(self.session.session_key)
def test_session_key_valid_string_saved(self):
"""Strings of length 8 and up are accepted and stored."""
self.session._session_key = "12345678"
self.assertEqual(self.session.session_key, "12345678")
def test_session_key_is_read_only(self):
def set_session_key(session):
session.session_key = session._get_new_session_key()
with self.assertRaises(AttributeError):
set_session_key(self.session)
# Custom session expiry
def test_default_expiry(self):
# A normal session has a max age equal to settings
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
# So does a custom session with an idle expiration time of 0 (but it'll
# expire at browser close)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_custom_expiry_seconds(self):
modification = timezone.now()
self.session.set_expiry(10)
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_timedelta(self):
modification = timezone.now()
# Mock timezone.now, because set_expiry calls it on this code path.
original_now = timezone.now
try:
timezone.now = lambda: modification
self.session.set_expiry(timedelta(seconds=10))
finally:
timezone.now = original_now
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_datetime(self):
modification = timezone.now()
self.session.set_expiry(modification + timedelta(seconds=10))
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_reset(self):
self.session.set_expiry(None)
self.session.set_expiry(10)
self.session.set_expiry(None)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_get_expire_at_browser_close(self):
# Tests get_expire_at_browser_close with different settings and different
# set_expiry calls
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False):
self.session.set_expiry(10)
self.assertIs(self.session.get_expire_at_browser_close(), False)
self.session.set_expiry(0)
self.assertIs(self.session.get_expire_at_browser_close(), True)
self.session.set_expiry(None)
self.assertIs(self.session.get_expire_at_browser_close(), False)
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True):
self.session.set_expiry(10)
self.assertIs(self.session.get_expire_at_browser_close(), False)
self.session.set_expiry(0)
self.assertIs(self.session.get_expire_at_browser_close(), True)
self.session.set_expiry(None)
self.assertIs(self.session.get_expire_at_browser_close(), True)
def test_decode(self):
# Ensure we can decode what we encode
data = {"a test key": "a test value"}
encoded = self.session.encode(data)
self.assertEqual(self.session.decode(encoded), data)
def test_decode_failure_logged_to_security(self):
tests = [
base64.b64encode(b"flaskdj:alkdjf").decode("ascii"),
"bad:encoded:value",
]
for encoded in tests:
with self.subTest(encoded=encoded):
with self.assertLogs(
"django.security.SuspiciousSession", "WARNING"
) as cm:
self.assertEqual(self.session.decode(encoded), {})
# The failed decode is logged.
self.assertIn("Session data corrupted", cm.output[0])
def test_decode_serializer_exception(self):
signer = TimestampSigner(salt=self.session.key_salt)
encoded = signer.sign(b"invalid data")
self.assertEqual(self.session.decode(encoded), {})
def test_actual_expiry(self):
old_session_key = None
new_session_key = None
try:
self.session["foo"] = "bar"
self.session.set_expiry(-timedelta(seconds=10))
self.session.save()
old_session_key = self.session.session_key
# With an expiry date in the past, the session expires instantly.
new_session = self.backend(self.session.session_key)
new_session_key = new_session.session_key
self.assertNotIn("foo", new_session)
finally:
self.session.delete(old_session_key)
self.session.delete(new_session_key)
def test_session_load_does_not_create_record(self):
"""
Loading an unknown session key does not create a session record.
Creating session records on load is a DOS vulnerability.
"""
session = self.backend("someunknownkey")
session.load()
self.assertIsNone(session.session_key)
self.assertIs(session.exists(session.session_key), False)
# provided unknown key was cycled, not reused
self.assertNotEqual(session.session_key, "someunknownkey")
def test_session_save_does_not_resurrect_session_logged_out_in_other_context(self):
"""
Sessions shouldn't be resurrected by a concurrent request.
"""
# Create new session.
s1 = self.backend()
s1["test_data"] = "value1"
s1.save(must_create=True)
# Logout in another context.
s2 = self.backend(s1.session_key)
s2.delete()
# Modify session in first context.
s1["test_data"] = "value2"
with self.assertRaises(UpdateError):
# This should throw an exception as the session is deleted, not
# resurrect the session.
s1.save()
self.assertEqual(s1.load(), {})
class DatabaseSessionTests(SessionTestsMixin, TestCase):
backend = DatabaseSession
session_engine = "django.contrib.sessions.backends.db"
@property
def model(self):
return self.backend.get_model_class()
def test_session_str(self):
"Session repr should be the session key."
self.session["x"] = 1
self.session.save()
session_key = self.session.session_key
s = self.model.objects.get(session_key=session_key)
self.assertEqual(str(s), session_key)
def test_session_get_decoded(self):
"""
Test we can use Session.get_decoded to retrieve data stored
in normal way
"""
self.session["x"] = 1
self.session.save()
s = self.model.objects.get(session_key=self.session.session_key)
self.assertEqual(s.get_decoded(), {"x": 1})
def test_sessionmanager_save(self):
"""
Test SessionManager.save method
"""
# Create a session
self.session["y"] = 1
self.session.save()
s = self.model.objects.get(session_key=self.session.session_key)
# Change it
self.model.objects.save(s.session_key, {"y": 2}, s.expire_date)
# Clear cache, so that it will be retrieved from DB
del self.session._session_cache
self.assertEqual(self.session["y"], 2)
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
self.assertEqual(0, self.model.objects.count())
# One object in the future
self.session["foo"] = "bar"
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session["foo"] = "bar"
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the database before clearsessions...
self.assertEqual(2, self.model.objects.count())
with override_settings(SESSION_ENGINE=self.session_engine):
management.call_command("clearsessions")
# ... and one is deleted.
self.assertEqual(1, self.model.objects.count())
@override_settings(USE_TZ=True)
class DatabaseSessionWithTimeZoneTests(DatabaseSessionTests):
pass
class CustomDatabaseSessionTests(DatabaseSessionTests):
backend = CustomDatabaseSession
session_engine = "sessions_tests.models"
custom_session_cookie_age = 60 * 60 * 24 # One day.
def test_extra_session_field(self):
# Set the account ID to be picked up by a custom session storage
# and saved to a custom session model database column.
self.session["_auth_user_id"] = 42
self.session.save()
# Make sure that the customized create_model_instance() was called.
s = self.model.objects.get(session_key=self.session.session_key)
self.assertEqual(s.account_id, 42)
# Make the session "anonymous".
self.session.pop("_auth_user_id")
self.session.save()
# Make sure that save() on an existing session did the right job.
s = self.model.objects.get(session_key=self.session.session_key)
self.assertIsNone(s.account_id)
def test_custom_expiry_reset(self):
self.session.set_expiry(None)
self.session.set_expiry(10)
self.session.set_expiry(None)
self.assertEqual(self.session.get_expiry_age(), self.custom_session_cookie_age)
def test_default_expiry(self):
self.assertEqual(self.session.get_expiry_age(), self.custom_session_cookie_age)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), self.custom_session_cookie_age)
class CacheDBSessionTests(SessionTestsMixin, TestCase):
backend = CacheDBSession
def test_exists_searches_cache_first(self):
self.session.save()
with self.assertNumQueries(0):
self.assertIs(self.session.exists(self.session.session_key), True)
# Some backends might issue a warning
@ignore_warnings(module="django.core.cache.backends.base")
def test_load_overlong_key(self):
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
@override_settings(SESSION_CACHE_ALIAS="sessions")
def test_non_default_cache(self):
# 21000 - CacheDB backend should respect SESSION_CACHE_ALIAS.
with self.assertRaises(InvalidCacheBackendError):
self.backend()
@override_settings(USE_TZ=True)
class CacheDBSessionWithTimeZoneTests(CacheDBSessionTests):
pass
class FileSessionTests(SessionTestsMixin, SimpleTestCase):
backend = FileSession
def setUp(self):
# Do file session tests in an isolated directory, and kill it after we're done.
self.original_session_file_path = settings.SESSION_FILE_PATH
self.temp_session_store = settings.SESSION_FILE_PATH = self.mkdtemp()
# Reset the file session backend's internal caches
if hasattr(self.backend, "_storage_path"):
del self.backend._storage_path
super().setUp()
def tearDown(self):
super().tearDown()
settings.SESSION_FILE_PATH = self.original_session_file_path
shutil.rmtree(self.temp_session_store)
def mkdtemp(self):
return tempfile.mkdtemp()
@override_settings(
SESSION_FILE_PATH="/if/this/directory/exists/you/have/a/weird/computer",
)
def test_configuration_check(self):
del self.backend._storage_path
# Make sure the file backend checks for a good storage dir
with self.assertRaises(ImproperlyConfigured):
self.backend()
def test_invalid_key_backslash(self):
# Ensure we don't allow directory-traversal.
# This is tested directly on _key_to_file, as load() will swallow
# a SuspiciousOperation in the same way as an OSError - by creating
# a new session, making it unclear whether the slashes were detected.
with self.assertRaises(InvalidSessionKey):
self.backend()._key_to_file("a\\b\\c")
def test_invalid_key_forwardslash(self):
# Ensure we don't allow directory-traversal
with self.assertRaises(InvalidSessionKey):
self.backend()._key_to_file("a/b/c")
@override_settings(
SESSION_ENGINE="django.contrib.sessions.backends.file",
SESSION_COOKIE_AGE=0,
)
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
storage_path = self.backend._get_storage_path()
file_prefix = settings.SESSION_COOKIE_NAME
def count_sessions():
return len(
[
session_file
for session_file in os.listdir(storage_path)
if session_file.startswith(file_prefix)
]
)
self.assertEqual(0, count_sessions())
# One object in the future
self.session["foo"] = "bar"
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session["foo"] = "bar"
other_session.set_expiry(-3600)
other_session.save()
# One object in the present without an expiry (should be deleted since
# its modification time + SESSION_COOKIE_AGE will be in the past when
# clearsessions runs).
other_session2 = self.backend()
other_session2["foo"] = "bar"
other_session2.save()
# Three sessions are in the filesystem before clearsessions...
self.assertEqual(3, count_sessions())
management.call_command("clearsessions")
# ... and two are deleted.
self.assertEqual(1, count_sessions())
class FileSessionPathLibTests(FileSessionTests):
def mkdtemp(self):
tmp_dir = super().mkdtemp()
return Path(tmp_dir)
class CacheSessionTests(SessionTestsMixin, SimpleTestCase):
backend = CacheSession
# Some backends might issue a warning
@ignore_warnings(module="django.core.cache.backends.base")
def test_load_overlong_key(self):
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
def test_default_cache(self):
self.session.save()
self.assertIsNotNone(caches["default"].get(self.session.cache_key))
@override_settings(
CACHES={
"default": {
"BACKEND": "django.core.cache.backends.dummy.DummyCache",
},
"sessions": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "session",
},
},
SESSION_CACHE_ALIAS="sessions",
)
def test_non_default_cache(self):
# Re-initialize the session backend to make use of overridden settings.
self.session = self.backend()
self.session.save()
self.assertIsNone(caches["default"].get(self.session.cache_key))
self.assertIsNotNone(caches["sessions"].get(self.session.cache_key))
def test_create_and_save(self):
self.session = self.backend()
self.session.create()
self.session.save()
self.assertIsNotNone(caches["default"].get(self.session.cache_key))
class SessionMiddlewareTests(TestCase):
request_factory = RequestFactory()
@staticmethod
def get_response_touching_session(request):
request.session["hello"] = "world"
return HttpResponse("Session test")
@override_settings(SESSION_COOKIE_SECURE=True)
def test_secure_session_cookie(self):
request = self.request_factory.get("/")
middleware = SessionMiddleware(self.get_response_touching_session)
# Handle the response through the middleware
response = middleware(request)
self.assertIs(response.cookies[settings.SESSION_COOKIE_NAME]["secure"], True)
@override_settings(SESSION_COOKIE_HTTPONLY=True)
def test_httponly_session_cookie(self):
request = self.request_factory.get("/")
middleware = SessionMiddleware(self.get_response_touching_session)
# Handle the response through the middleware
response = middleware(request)
self.assertIs(response.cookies[settings.SESSION_COOKIE_NAME]["httponly"], True)
self.assertIn(
cookies.Morsel._reserved["httponly"],
str(response.cookies[settings.SESSION_COOKIE_NAME]),
)
@override_settings(SESSION_COOKIE_SAMESITE="Strict")
def test_samesite_session_cookie(self):
request = self.request_factory.get("/")
middleware = SessionMiddleware(self.get_response_touching_session)
response = middleware(request)
self.assertEqual(
response.cookies[settings.SESSION_COOKIE_NAME]["samesite"], "Strict"
)
@override_settings(SESSION_COOKIE_HTTPONLY=False)
def test_no_httponly_session_cookie(self):
request = self.request_factory.get("/")
middleware = SessionMiddleware(self.get_response_touching_session)
response = middleware(request)
self.assertEqual(response.cookies[settings.SESSION_COOKIE_NAME]["httponly"], "")
self.assertNotIn(
cookies.Morsel._reserved["httponly"],
str(response.cookies[settings.SESSION_COOKIE_NAME]),
)
def test_session_save_on_500(self):
def response_500(request):
response = HttpResponse("Horrible error")
response.status_code = 500
request.session["hello"] = "world"
return response
request = self.request_factory.get("/")
SessionMiddleware(response_500)(request)
# The value wasn't saved above.
self.assertNotIn("hello", request.session.load())
def test_session_save_on_5xx(self):
def response_503(request):
response = HttpResponse("Service Unavailable")
response.status_code = 503
request.session["hello"] = "world"
return response
request = self.request_factory.get("/")
SessionMiddleware(response_503)(request)
# The value wasn't saved above.
self.assertNotIn("hello", request.session.load())
def test_session_update_error_redirect(self):
def response_delete_session(request):
request.session = DatabaseSession()
request.session.save(must_create=True)
request.session.delete()
return HttpResponse()
request = self.request_factory.get("/foo/")
middleware = SessionMiddleware(response_delete_session)
msg = (
"The request's session was deleted before the request completed. "
"The user may have logged out in a concurrent request, for example."
)
with self.assertRaisesMessage(SessionInterrupted, msg):
# Handle the response through the middleware. It will try to save
# the deleted session which will cause an UpdateError that's caught
# and raised as a SessionInterrupted.
middleware(request)
def test_session_delete_on_end(self):
def response_ending_session(request):
request.session.flush()
return HttpResponse("Session test")
request = self.request_factory.get("/")
middleware = SessionMiddleware(response_ending_session)
# Before deleting, there has to be an existing cookie
request.COOKIES[settings.SESSION_COOKIE_NAME] = "abc"
# Handle the response through the middleware
response = middleware(request)
# The cookie was deleted, not recreated.
# A deleted cookie header looks like:
# "Set-Cookie: sessionid=; expires=Thu, 01 Jan 1970 00:00:00 GMT; "
# "Max-Age=0; Path=/"
self.assertEqual(
'Set-Cookie: {}=""; expires=Thu, 01 Jan 1970 00:00:00 GMT; '
"Max-Age=0; Path=/; SameSite={}".format(
settings.SESSION_COOKIE_NAME,
settings.SESSION_COOKIE_SAMESITE,
),
str(response.cookies[settings.SESSION_COOKIE_NAME]),
)
# SessionMiddleware sets 'Vary: Cookie' to prevent the 'Set-Cookie'
# from being cached.
self.assertEqual(response.headers["Vary"], "Cookie")
@override_settings(
SESSION_COOKIE_DOMAIN=".example.local", SESSION_COOKIE_PATH="/example/"
)
def test_session_delete_on_end_with_custom_domain_and_path(self):
def response_ending_session(request):
request.session.flush()
return HttpResponse("Session test")
request = self.request_factory.get("/")
middleware = SessionMiddleware(response_ending_session)
# Before deleting, there has to be an existing cookie
request.COOKIES[settings.SESSION_COOKIE_NAME] = "abc"
# Handle the response through the middleware
response = middleware(request)
# The cookie was deleted, not recreated.
# A deleted cookie header with a custom domain and path looks like:
# Set-Cookie: sessionid=; Domain=.example.local;
# expires=Thu, 01 Jan 1970 00:00:00 GMT; Max-Age=0;
# Path=/example/
self.assertEqual(
'Set-Cookie: {}=""; Domain=.example.local; expires=Thu, '
"01 Jan 1970 00:00:00 GMT; Max-Age=0; Path=/example/; SameSite={}".format(
settings.SESSION_COOKIE_NAME,
settings.SESSION_COOKIE_SAMESITE,
),
str(response.cookies[settings.SESSION_COOKIE_NAME]),
)
def test_flush_empty_without_session_cookie_doesnt_set_cookie(self):
def response_ending_session(request):
request.session.flush()
return HttpResponse("Session test")
request = self.request_factory.get("/")
middleware = SessionMiddleware(response_ending_session)
# Handle the response through the middleware
response = middleware(request)
# A cookie should not be set.
self.assertEqual(response.cookies, {})
# The session is accessed so "Vary: Cookie" should be set.
self.assertEqual(response.headers["Vary"], "Cookie")
def test_empty_session_saved(self):
"""
If a session is emptied of data but still has a key, it should still
be updated.
"""
def response_set_session(request):
# Set a session key and some data.
request.session["foo"] = "bar"
return HttpResponse("Session test")
request = self.request_factory.get("/")
middleware = SessionMiddleware(response_set_session)
# Handle the response through the middleware.
response = middleware(request)
self.assertEqual(tuple(request.session.items()), (("foo", "bar"),))
# A cookie should be set, along with Vary: Cookie.
self.assertIn(
"Set-Cookie: sessionid=%s" % request.session.session_key,
str(response.cookies),
)
self.assertEqual(response.headers["Vary"], "Cookie")
# Empty the session data.
del request.session["foo"]
# Handle the response through the middleware.
response = HttpResponse("Session test")
response = middleware.process_response(request, response)
self.assertEqual(dict(request.session.values()), {})
session = Session.objects.get(session_key=request.session.session_key)
self.assertEqual(session.get_decoded(), {})
# While the session is empty, it hasn't been flushed so a cookie should
# still be set, along with Vary: Cookie.
self.assertGreater(len(request.session.session_key), 8)
self.assertIn(
"Set-Cookie: sessionid=%s" % request.session.session_key,
str(response.cookies),
)
self.assertEqual(response.headers["Vary"], "Cookie")
class CookieSessionTests(SessionTestsMixin, SimpleTestCase):
backend = CookieSession
def test_save(self):
"""
This test tested exists() in the other session backends, but that
doesn't make sense for us.
"""
pass
def test_cycle(self):
"""
This test tested cycle_key() which would create a new session
key for the same session data. But we can't invalidate previously
signed cookies (other than letting them expire naturally) so
testing for this behavior is meaningless.
"""
pass
@unittest.expectedFailure
def test_actual_expiry(self):
# The cookie backend doesn't handle non-default expiry dates, see #19201
super().test_actual_expiry()
def test_unpickling_exception(self):
# signed_cookies backend should handle unpickle exceptions gracefully
# by creating a new session
self.assertEqual(self.session.serializer, JSONSerializer)
self.session.save()
with mock.patch("django.core.signing.loads", side_effect=ValueError):
self.session.load()
@unittest.skip(
"Cookie backend doesn't have an external store to create records in."
)
def test_session_load_does_not_create_record(self):
pass
@unittest.skip(
"CookieSession is stored in the client and there is no way to query it."
)
def test_session_save_does_not_resurrect_session_logged_out_in_other_context(self):
pass
class ClearSessionsCommandTests(SimpleTestCase):
def test_clearsessions_unsupported(self):
msg = (
"Session engine 'sessions_tests.no_clear_expired' doesn't "
"support clearing expired sessions."
)
with self.settings(SESSION_ENGINE="sessions_tests.no_clear_expired"):
with self.assertRaisesMessage(management.CommandError, msg):
management.call_command("clearsessions")
|
687f0049e59da657717d1be6cafcc7b46d0adb5348ef0c15662d605a583dbcc8 | """Tests related to django.db.backends that haven't been organized."""
import datetime
import threading
import unittest
import warnings
from unittest import mock
from django.core.management.color import no_style
from django.db import (
DEFAULT_DB_ALIAS,
DatabaseError,
IntegrityError,
connection,
connections,
reset_queries,
transaction,
)
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.signals import connection_created
from django.db.backends.utils import CursorWrapper
from django.db.models.sql.constants import CURSOR
from django.test import (
TestCase,
TransactionTestCase,
override_settings,
skipIfDBFeature,
skipUnlessDBFeature,
)
from .models import (
Article,
Object,
ObjectReference,
Person,
Post,
RawData,
Reporter,
ReporterProxy,
SchoolClass,
SQLKeywordsModel,
Square,
VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ,
)
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') (#12818).
"""
updated = datetime.datetime(2010, 2, 20)
SchoolClass.objects.create(year=2009, last_updated=updated)
years = SchoolClass.objects.dates("last_updated", "year")
self.assertEqual(list(years), [datetime.date(2010, 1, 1)])
def test_django_date_extract(self):
"""
Test the custom ``django_date_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') (#12818).
"""
updated = datetime.datetime(2010, 2, 20)
SchoolClass.objects.create(year=2009, last_updated=updated)
classes = SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
@override_settings(DEBUG=True)
class LastExecutedQueryTest(TestCase):
def test_last_executed_query_without_previous_query(self):
"""
last_executed_query should not raise an exception even if no previous
query has been run.
"""
with connection.cursor() as cursor:
connection.ops.last_executed_query(cursor, "", ())
def test_debug_sql(self):
list(Reporter.objects.filter(first_name="test"))
sql = connection.queries[-1]["sql"].lower()
self.assertIn("select", sql)
self.assertIn(Reporter._meta.db_table, sql)
def test_query_encoding(self):
"""last_executed_query() returns a string."""
data = RawData.objects.filter(raw_data=b"\x00\x46 \xFE").extra(
select={"föö": 1}
)
sql, params = data.query.sql_with_params()
with data.query.get_compiler("default").execute_sql(CURSOR) as cursor:
last_sql = cursor.db.ops.last_executed_query(cursor, sql, params)
self.assertIsInstance(last_sql, str)
def test_last_executed_query(self):
# last_executed_query() interpolate all parameters, in most cases it is
# not equal to QuerySet.query.
for qs in (
Article.objects.filter(pk=1),
Article.objects.filter(pk__in=(1, 2), reporter__pk=3),
Article.objects.filter(
pk=1,
reporter__pk=9,
).exclude(reporter__pk__in=[2, 1]),
Article.objects.filter(pk__in=list(range(20, 31))),
):
sql, params = qs.query.sql_with_params()
with qs.query.get_compiler(DEFAULT_DB_ALIAS).execute_sql(CURSOR) as cursor:
self.assertEqual(
cursor.db.ops.last_executed_query(cursor, sql, params),
str(qs.query),
)
@skipUnlessDBFeature("supports_paramstyle_pyformat")
def test_last_executed_query_dict(self):
square_opts = Square._meta
sql = "INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)" % (
connection.introspection.identifier_converter(square_opts.db_table),
connection.ops.quote_name(square_opts.get_field("root").column),
connection.ops.quote_name(square_opts.get_field("square").column),
)
with connection.cursor() as cursor:
params = {"root": 2, "square": 4}
cursor.execute(sql, params)
self.assertEqual(
cursor.db.ops.last_executed_query(cursor, sql, params),
sql % params,
)
@skipUnlessDBFeature("supports_paramstyle_pyformat")
def test_last_executed_query_dict_overlap_keys(self):
square_opts = Square._meta
sql = "INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(root2)s)" % (
connection.introspection.identifier_converter(square_opts.db_table),
connection.ops.quote_name(square_opts.get_field("root").column),
connection.ops.quote_name(square_opts.get_field("square").column),
)
with connection.cursor() as cursor:
params = {"root": 2, "root2": 4}
cursor.execute(sql, params)
self.assertEqual(
cursor.db.ops.last_executed_query(cursor, sql, params),
sql % params,
)
def test_last_executed_query_with_duplicate_params(self):
square_opts = Square._meta
table = connection.introspection.identifier_converter(square_opts.db_table)
id_column = connection.ops.quote_name(square_opts.get_field("id").column)
root_column = connection.ops.quote_name(square_opts.get_field("root").column)
sql = f"UPDATE {table} SET {root_column} = %s + %s WHERE {id_column} = %s"
with connection.cursor() as cursor:
params = [42, 42, 1]
cursor.execute(sql, params)
last_executed_query = connection.ops.last_executed_query(
cursor, sql, params
)
self.assertEqual(
last_executed_query,
f"UPDATE {table} SET {root_column} = 42 + 42 WHERE {id_column} = 1",
)
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"""
An executemany call with too many/not enough parameters will raise an
exception.
"""
with connection.cursor() as cursor:
query = "INSERT INTO %s (%s, %s) VALUES (%%s, %%s)" % (
connection.introspection.identifier_converter("backends_square"),
connection.ops.quote_name("root"),
connection.ops.quote_name("square"),
)
with self.assertRaises(Exception):
cursor.executemany(query, [(1, 2, 3)])
with self.assertRaises(Exception):
cursor.executemany(query, [(1,)])
class LongNameTest(TransactionTestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
available_apps = ["backends"]
def test_sequence_name_length_limits_create(self):
"""Creation of model with long name and long pk name doesn't error."""
VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
def test_sequence_name_length_limits_m2m(self):
"""
An m2m save of a model with a long name and a long m2m field name
doesn't error (#8901).
"""
obj = (
VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
)
rel_obj = Person.objects.create(first_name="Django", last_name="Reinhardt")
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
def test_sequence_name_length_limits_flush(self):
"""
Sequence resetting as part of a flush with model with long name and
long pk name doesn't error (#8901).
"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = (
VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
)
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sql_list = connection.ops.sql_flush(no_style(), tables, reset_sequences=True)
connection.ops.execute_sql_flush(sql_list)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
Post.objects.create(id=10, name="1st post", text="hello world")
# Reset the sequences for the database
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(
no_style(), [Post]
)
with connection.cursor() as cursor:
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = Post.objects.create(name="New post", text="goodbye world")
self.assertGreater(obj.pk, 10)
# This test needs to run outside of a transaction, otherwise closing the
# connection would implicitly rollback and cause problems during teardown.
class ConnectionCreatedSignalTest(TransactionTestCase):
available_apps = []
# Unfortunately with sqlite3 the in-memory test database cannot be closed,
# and so it cannot be re-opened during testing.
@skipUnlessDBFeature("test_db_allows_multiple_connections")
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
with connection.cursor():
pass
self.assertIs(data["connection"].connection, connection.connection)
connection_created.disconnect(receiver)
data.clear()
with connection.cursor():
pass
self.assertEqual(data, {})
class EscapingChecks(TestCase):
"""
All tests in this test case are also run with settings.DEBUG=True in
EscapingChecksDebug test case, to also test CursorDebugWrapper.
"""
bare_select_suffix = connection.features.bare_select_suffix
def test_paramless_no_escaping(self):
with connection.cursor() as cursor:
cursor.execute("SELECT '%s'" + self.bare_select_suffix)
self.assertEqual(cursor.fetchall()[0][0], "%s")
def test_parameter_escaping(self):
with connection.cursor() as cursor:
cursor.execute("SELECT '%%', %s" + self.bare_select_suffix, ("%d",))
self.assertEqual(cursor.fetchall()[0], ("%", "%d"))
@override_settings(DEBUG=True)
class EscapingChecksDebug(EscapingChecks):
pass
class BackendTestCase(TransactionTestCase):
available_apps = ["backends"]
def create_squares_with_executemany(self, args):
self.create_squares(args, "format", True)
def create_squares(self, args, paramstyle, multiple):
opts = Square._meta
tbl = connection.introspection.identifier_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field("root").column)
f2 = connection.ops.quote_name(opts.get_field("square").column)
if paramstyle == "format":
query = "INSERT INTO %s (%s, %s) VALUES (%%s, %%s)" % (tbl, f1, f2)
elif paramstyle == "pyformat":
query = "INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)" % (
tbl,
f1,
f2,
)
else:
raise ValueError("unsupported paramstyle in test")
with connection.cursor() as cursor:
if multiple:
cursor.executemany(query, args)
else:
cursor.execute(query, args)
def test_cursor_executemany(self):
# Test cursor.executemany #4896
args = [(i, i**2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 11)
for i in range(-5, 6):
square = Square.objects.get(root=i)
self.assertEqual(square.square, i**2)
def test_cursor_executemany_with_empty_params_list(self):
# Test executemany with params=[] does nothing #4765
args = []
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
# Test executemany accepts iterators #10320
args = ((i, i**2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 5)
args = ((i, i**2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(Square.objects.count(), 9)
@skipUnlessDBFeature("supports_paramstyle_pyformat")
def test_cursor_execute_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = {"root": 3, "square": 9}
self.create_squares(args, "pyformat", multiple=False)
self.assertEqual(Square.objects.count(), 1)
@skipUnlessDBFeature("supports_paramstyle_pyformat")
def test_cursor_executemany_with_pyformat(self):
# Support pyformat style passing of parameters #10070
args = [{"root": i, "square": i**2} for i in range(-5, 6)]
self.create_squares(args, "pyformat", multiple=True)
self.assertEqual(Square.objects.count(), 11)
for i in range(-5, 6):
square = Square.objects.get(root=i)
self.assertEqual(square.square, i**2)
@skipUnlessDBFeature("supports_paramstyle_pyformat")
def test_cursor_executemany_with_pyformat_iterator(self):
args = ({"root": i, "square": i**2} for i in range(-3, 2))
self.create_squares(args, "pyformat", multiple=True)
self.assertEqual(Square.objects.count(), 5)
args = ({"root": i, "square": i**2} for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares(args, "pyformat", multiple=True)
self.assertEqual(Square.objects.count(), 9)
def test_unicode_fetches(self):
# fetchone, fetchmany, fetchall return strings as Unicode objects.
qn = connection.ops.quote_name
Person(first_name="John", last_name="Doe").save()
Person(first_name="Jane", last_name="Doe").save()
Person(first_name="Mary", last_name="Agnelline").save()
Person(first_name="Peter", last_name="Parker").save()
Person(first_name="Clark", last_name="Kent").save()
opts2 = Person._meta
f3, f4 = opts2.get_field("first_name"), opts2.get_field("last_name")
with connection.cursor() as cursor:
cursor.execute(
"SELECT %s, %s FROM %s ORDER BY %s"
% (
qn(f3.column),
qn(f4.column),
connection.introspection.identifier_converter(opts2.db_table),
qn(f3.column),
)
)
self.assertEqual(cursor.fetchone(), ("Clark", "Kent"))
self.assertEqual(
list(cursor.fetchmany(2)), [("Jane", "Doe"), ("John", "Doe")]
)
self.assertEqual(
list(cursor.fetchall()), [("Mary", "Agnelline"), ("Peter", "Parker")]
)
def test_unicode_password(self):
old_password = connection.settings_dict["PASSWORD"]
connection.settings_dict["PASSWORD"] = "françois"
try:
with connection.cursor():
pass
except DatabaseError:
# As password is probably wrong, a database exception is expected
pass
except Exception as e:
self.fail("Unexpected error raised with Unicode password: %s" % e)
finally:
connection.settings_dict["PASSWORD"] = old_password
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, "ops"))
self.assertTrue(hasattr(connection.ops, "connection"))
self.assertEqual(connection, connection.ops.connection)
def test_database_operations_init(self):
"""
DatabaseOperations initialization doesn't query the database.
See #17656.
"""
with self.assertNumQueries(0):
connection.ops.__class__(connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
"""Creating an existing table returns a DatabaseError"""
query = "CREATE TABLE %s (id INTEGER);" % Article._meta.db_table
with connection.cursor() as cursor:
with self.assertRaises(DatabaseError):
cursor.execute(query)
def test_cursor_contextmanager(self):
"""
Cursors can be used as a context manager
"""
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
# Both InterfaceError and ProgrammingError seem to be used when
# accessing closed cursor (psycopg2 has InterfaceError, rest seem
# to use ProgrammingError).
with self.assertRaises(connection.features.closed_cursor_error_class):
# cursor should be closed, so no queries should be possible.
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
@unittest.skipUnless(
connection.vendor == "postgresql",
"Psycopg2 specific cursor.closed attribute needed",
)
def test_cursor_contextmanager_closing(self):
# There isn't a generic way to test that cursors are closed, but
# psycopg2 offers us a way to check that by closed attribute.
# So, run only on psycopg2 for that reason.
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
self.assertTrue(cursor.closed)
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature("test_db_allows_multiple_connections")
def test_is_usable_after_database_disconnects(self):
"""
is_usable() doesn't crash when the database disconnects (#21553).
"""
# Open a connection to the database.
with connection.cursor():
pass
# Emulate a connection close by the database.
connection._close()
# Even then is_usable() should not raise an exception.
try:
self.assertFalse(connection.is_usable())
finally:
# Clean up the mess created by connection._close(). Since the
# connection is already closed, this crashes on some backends.
try:
connection.close()
except Exception:
pass
@override_settings(DEBUG=True)
def test_queries(self):
"""
Test the documented API of connection.queries.
"""
sql = "SELECT 1" + connection.features.bare_select_suffix
with connection.cursor() as cursor:
reset_queries()
cursor.execute(sql)
self.assertEqual(1, len(connection.queries))
self.assertIsInstance(connection.queries, list)
self.assertIsInstance(connection.queries[0], dict)
self.assertEqual(list(connection.queries[0]), ["sql", "time"])
self.assertEqual(connection.queries[0]["sql"], sql)
reset_queries()
self.assertEqual(0, len(connection.queries))
sql = "INSERT INTO %s (%s, %s) VALUES (%%s, %%s)" % (
connection.introspection.identifier_converter("backends_square"),
connection.ops.quote_name("root"),
connection.ops.quote_name("square"),
)
with connection.cursor() as cursor:
cursor.executemany(sql, [(1, 1), (2, 4)])
self.assertEqual(1, len(connection.queries))
self.assertIsInstance(connection.queries, list)
self.assertIsInstance(connection.queries[0], dict)
self.assertEqual(list(connection.queries[0]), ["sql", "time"])
self.assertEqual(connection.queries[0]["sql"], "2 times: %s" % sql)
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature("test_db_allows_multiple_connections")
@override_settings(DEBUG=True)
def test_queries_limit(self):
"""
The backend doesn't store an unlimited number of queries (#12581).
"""
old_queries_limit = BaseDatabaseWrapper.queries_limit
BaseDatabaseWrapper.queries_limit = 3
new_connection = connection.copy()
# Initialize the connection and clear initialization statements.
with new_connection.cursor():
pass
new_connection.queries_log.clear()
try:
with new_connection.cursor() as cursor:
cursor.execute("SELECT 1" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 2" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(2, len(new_connection.queries))
self.assertEqual(0, len(w))
with new_connection.cursor() as cursor:
cursor.execute("SELECT 3" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 4" + new_connection.features.bare_select_suffix)
msg = (
"Limit for query logging exceeded, only the last 3 queries will be "
"returned."
)
with self.assertWarnsMessage(UserWarning, msg):
self.assertEqual(3, len(new_connection.queries))
finally:
BaseDatabaseWrapper.queries_limit = old_queries_limit
new_connection.close()
@mock.patch("django.db.backends.utils.logger")
@override_settings(DEBUG=True)
def test_queries_logger(self, mocked_logger):
sql = "SELECT 1" + connection.features.bare_select_suffix
with connection.cursor() as cursor:
cursor.execute(sql)
params, kwargs = mocked_logger.debug.call_args
self.assertIn("; alias=%s", params[0])
self.assertEqual(params[2], sql)
self.assertIsNone(params[3])
self.assertEqual(params[4], connection.alias)
self.assertEqual(
list(kwargs["extra"]),
["duration", "sql", "params", "alias"],
)
self.assertEqual(tuple(kwargs["extra"].values()), params[1:])
def test_queries_bare_where(self):
sql = f"SELECT 1{connection.features.bare_select_suffix} WHERE 1=1"
with connection.cursor() as cursor:
cursor.execute(sql)
self.assertEqual(cursor.fetchone(), (1,))
def test_timezone_none_use_tz_false(self):
connection.ensure_connection()
with self.settings(TIME_ZONE=None, USE_TZ=False):
connection.init_connection_state()
# These tests aren't conditional because it would require differentiating
# between MySQL+InnoDB and MySQL+MYISAM (something we currently can't do).
class FkConstraintsTests(TransactionTestCase):
available_apps = ["backends"]
def setUp(self):
# Create a Reporter.
self.r = Reporter.objects.create(first_name="John", last_name="Smith")
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a1 = Article(
headline="This is a test",
pub_date=datetime.datetime(2005, 7, 27),
reporter_id=30,
)
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy Refs #17519
a2 = Article(
headline="This is another test",
reporter=self.r,
pub_date=datetime.datetime(2012, 8, 3),
reporter_proxy_id=30,
)
with self.assertRaises(IntegrityError):
a2.save()
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a1 = Article.objects.get(headline="Test article")
a1.reporter_id = 30
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy Refs #17519
# Create another article
r_proxy = ReporterProxy.objects.get(pk=self.r.pk)
Article.objects.create(
headline="Another article",
pub_date=datetime.datetime(1988, 5, 15),
reporter=self.r,
reporter_proxy=r_proxy,
)
# Retrieve the second article from the DB
a2 = Article.objects.get(headline="Another article")
a2.reporter_proxy_id = 30
with self.assertRaises(IntegrityError):
a2.save()
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data
without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be
able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
with transaction.atomic():
# Create an Article.
Article.objects.create(
headline="Test article",
pub_date=datetime.datetime(2010, 9, 4),
reporter=self.r,
)
# Retrieve it from the DB
a = Article.objects.get(headline="Test article")
a.reporter_id = 30
with connection.constraint_checks_disabled():
a.save()
try:
connection.check_constraints(table_names=[Article._meta.db_table])
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
transaction.set_rollback(True)
def test_check_constraints_sql_keywords(self):
with transaction.atomic():
obj = SQLKeywordsModel.objects.create(reporter=self.r)
obj.refresh_from_db()
obj.reporter_id = 30
with connection.constraint_checks_disabled():
obj.save()
try:
connection.check_constraints(table_names=["order"])
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
transaction.set_rollback(True)
class ThreadTests(TransactionTestCase):
available_apps = ["backends"]
def test_default_connection_thread_local(self):
"""
The default connection (i.e. django.db.connection) is different for
each thread (#17258).
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
with connection.cursor():
pass
connections_dict[id(connection)] = connection
def runner():
# Passing django.db.connection between threads doesn't work while
# connections[DEFAULT_DB_ALIAS] does.
from django.db import connections
connection = connections[DEFAULT_DB_ALIAS]
# Allow thread sharing so the connection can be closed by the
# main thread.
connection.inc_thread_sharing()
with connection.cursor():
pass
connections_dict[id(connection)] = connection
try:
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
# Each created connection got different inner connection.
self.assertEqual(
len({conn.connection for conn in connections_dict.values()}), 3
)
finally:
# Finish by closing the connections opened by the other threads
# (the connection opened in the main thread will automatically be
# closed on teardown).
for conn in connections_dict.values():
if conn is not connection and conn.allow_thread_sharing:
conn.close()
conn.dec_thread_sharing()
def test_connections_thread_local(self):
"""
The connections are different for each thread (#17258).
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
for conn in connections.all():
connections_dict[id(conn)] = conn
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.inc_thread_sharing()
connections_dict[id(conn)] = conn
try:
num_new_threads = 2
for x in range(num_new_threads):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(
len(connections_dict),
len(connections.all()) * (num_new_threads + 1),
)
finally:
# Finish by closing the connections opened by the other threads
# (the connection opened in the main thread will automatically be
# closed on teardown).
for conn in connections_dict.values():
if conn is not connection and conn.allow_thread_sharing:
conn.close()
conn.dec_thread_sharing()
def test_pass_connection_between_threads(self):
"""
A connection can be passed from one thread to the other (#17258).
"""
Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections["default"] = main_thread_connection
try:
Person.objects.get(first_name="John", last_name="Doe")
except Exception as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections["default"]])
t.start()
t.join()
# Without touching thread sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
connections["default"].close()
# After calling inc_thread_sharing() on the connection.
connections["default"].inc_thread_sharing()
try:
exceptions = []
do_thread()
# All good
self.assertEqual(exceptions, [])
finally:
connections["default"].dec_thread_sharing()
def test_closing_non_shared_connections(self):
"""
A connection that is not explicitly shareable cannot be closed by
another thread (#17258).
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections["default"]])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections["default"].inc_thread_sharing()
try:
t2 = threading.Thread(target=runner2, args=[connections["default"]])
t2.start()
t2.join()
finally:
connections["default"].dec_thread_sharing()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
def test_thread_sharing_count(self):
self.assertIs(connection.allow_thread_sharing, False)
connection.inc_thread_sharing()
self.assertIs(connection.allow_thread_sharing, True)
connection.inc_thread_sharing()
self.assertIs(connection.allow_thread_sharing, True)
connection.dec_thread_sharing()
self.assertIs(connection.allow_thread_sharing, True)
connection.dec_thread_sharing()
self.assertIs(connection.allow_thread_sharing, False)
msg = "Cannot decrement the thread sharing count below zero."
with self.assertRaisesMessage(RuntimeError, msg):
connection.dec_thread_sharing()
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for autoincrement primary key if the
NO_AUTO_VALUE_ON_ZERO SQL mode is not enabled.
"""
@skipIfDBFeature("allows_auto_pk_0")
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
Square.objects.create(id=0, root=0, square=1)
class DBConstraintTestCase(TestCase):
def test_can_reference_existent(self):
obj = Object.objects.create()
ref = ObjectReference.objects.create(obj=obj)
self.assertEqual(ref.obj, obj)
ref = ObjectReference.objects.get(obj=obj)
self.assertEqual(ref.obj, obj)
def test_can_reference_non_existent(self):
self.assertFalse(Object.objects.filter(id=12345).exists())
ref = ObjectReference.objects.create(obj_id=12345)
ref_new = ObjectReference.objects.get(obj_id=12345)
self.assertEqual(ref, ref_new)
with self.assertRaises(Object.DoesNotExist):
ref.obj
def test_many_to_many(self):
obj = Object.objects.create()
obj.related_objects.create()
self.assertEqual(Object.objects.count(), 2)
self.assertEqual(obj.related_objects.count(), 1)
intermediary_model = Object._meta.get_field(
"related_objects"
).remote_field.through
intermediary_model.objects.create(from_object_id=obj.id, to_object_id=12345)
self.assertEqual(obj.related_objects.count(), 1)
self.assertEqual(intermediary_model.objects.count(), 2)
|
47a81d159129cee2f1227fee1a995415cf0bee0a95a3984f7616f6b0bf24b4bf | from math import ceil
from operator import attrgetter
from django.core.exceptions import FieldDoesNotExist
from django.db import (
IntegrityError,
NotSupportedError,
OperationalError,
ProgrammingError,
connection,
)
from django.db.models import FileField, Value
from django.db.models.functions import Lower
from django.test import (
TestCase,
override_settings,
skipIfDBFeature,
skipUnlessDBFeature,
)
from .models import (
BigAutoFieldModel,
Country,
FieldsWithDbColumns,
NoFields,
NullableFields,
Pizzeria,
ProxyCountry,
ProxyMultiCountry,
ProxyMultiProxyCountry,
ProxyProxyCountry,
RelatedModel,
Restaurant,
SmallAutoFieldModel,
State,
TwoFields,
UpsertConflict,
)
class BulkCreateTests(TestCase):
def setUp(self):
self.data = [
Country(name="United States of America", iso_two_letter="US"),
Country(name="The Netherlands", iso_two_letter="NL"),
Country(name="Germany", iso_two_letter="DE"),
Country(name="Czech Republic", iso_two_letter="CZ"),
]
def test_simple(self):
created = Country.objects.bulk_create(self.data)
self.assertEqual(created, self.data)
self.assertQuerySetEqual(
Country.objects.order_by("-name"),
[
"United States of America",
"The Netherlands",
"Germany",
"Czech Republic",
],
attrgetter("name"),
)
created = Country.objects.bulk_create([])
self.assertEqual(created, [])
self.assertEqual(Country.objects.count(), 4)
@skipUnlessDBFeature("has_bulk_insert")
def test_efficiency(self):
with self.assertNumQueries(1):
Country.objects.bulk_create(self.data)
@skipUnlessDBFeature("has_bulk_insert")
def test_long_non_ascii_text(self):
"""
Inserting non-ASCII values with a length in the range 2001 to 4000
characters, i.e. 4002 to 8000 bytes, must be set as a CLOB on Oracle
(#22144).
"""
Country.objects.bulk_create([Country(description="Ж" * 3000)])
self.assertEqual(Country.objects.count(), 1)
@skipUnlessDBFeature("has_bulk_insert")
def test_long_and_short_text(self):
Country.objects.bulk_create(
[
Country(description="a" * 4001, iso_two_letter="A"),
Country(description="a", iso_two_letter="B"),
Country(description="Ж" * 2001, iso_two_letter="C"),
Country(description="Ж", iso_two_letter="D"),
]
)
self.assertEqual(Country.objects.count(), 4)
def test_multi_table_inheritance_unsupported(self):
expected_message = "Can't bulk create a multi-table inherited model"
with self.assertRaisesMessage(ValueError, expected_message):
Pizzeria.objects.bulk_create(
[
Pizzeria(name="The Art of Pizza"),
]
)
with self.assertRaisesMessage(ValueError, expected_message):
ProxyMultiCountry.objects.bulk_create(
[
ProxyMultiCountry(name="Fillory", iso_two_letter="FL"),
]
)
with self.assertRaisesMessage(ValueError, expected_message):
ProxyMultiProxyCountry.objects.bulk_create(
[
ProxyMultiProxyCountry(name="Fillory", iso_two_letter="FL"),
]
)
def test_proxy_inheritance_supported(self):
ProxyCountry.objects.bulk_create(
[
ProxyCountry(name="Qwghlm", iso_two_letter="QW"),
Country(name="Tortall", iso_two_letter="TA"),
]
)
self.assertQuerySetEqual(
ProxyCountry.objects.all(),
{"Qwghlm", "Tortall"},
attrgetter("name"),
ordered=False,
)
ProxyProxyCountry.objects.bulk_create(
[
ProxyProxyCountry(name="Netherlands", iso_two_letter="NT"),
]
)
self.assertQuerySetEqual(
ProxyProxyCountry.objects.all(),
{
"Qwghlm",
"Tortall",
"Netherlands",
},
attrgetter("name"),
ordered=False,
)
def test_non_auto_increment_pk(self):
State.objects.bulk_create(
[State(two_letter_code=s) for s in ["IL", "NY", "CA", "ME"]]
)
self.assertQuerySetEqual(
State.objects.order_by("two_letter_code"),
[
"CA",
"IL",
"ME",
"NY",
],
attrgetter("two_letter_code"),
)
@skipUnlessDBFeature("has_bulk_insert")
def test_non_auto_increment_pk_efficiency(self):
with self.assertNumQueries(1):
State.objects.bulk_create(
[State(two_letter_code=s) for s in ["IL", "NY", "CA", "ME"]]
)
self.assertQuerySetEqual(
State.objects.order_by("two_letter_code"),
[
"CA",
"IL",
"ME",
"NY",
],
attrgetter("two_letter_code"),
)
@skipIfDBFeature("allows_auto_pk_0")
def test_zero_as_autoval(self):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for automatic primary key if the
NO_AUTO_VALUE_ON_ZERO SQL mode is not enabled.
"""
valid_country = Country(name="Germany", iso_two_letter="DE")
invalid_country = Country(id=0, name="Poland", iso_two_letter="PL")
msg = "The database backend does not accept 0 as a value for AutoField."
with self.assertRaisesMessage(ValueError, msg):
Country.objects.bulk_create([valid_country, invalid_country])
def test_batch_same_vals(self):
# SQLite had a problem where all the same-valued models were
# collapsed to one insert.
Restaurant.objects.bulk_create([Restaurant(name="foo") for i in range(0, 2)])
self.assertEqual(Restaurant.objects.count(), 2)
def test_large_batch(self):
TwoFields.objects.bulk_create(
[TwoFields(f1=i, f2=i + 1) for i in range(0, 1001)]
)
self.assertEqual(TwoFields.objects.count(), 1001)
self.assertEqual(
TwoFields.objects.filter(f1__gte=450, f1__lte=550).count(), 101
)
self.assertEqual(TwoFields.objects.filter(f2__gte=901).count(), 101)
@skipUnlessDBFeature("has_bulk_insert")
def test_large_single_field_batch(self):
# SQLite had a problem with more than 500 UNIONed selects in single
# query.
Restaurant.objects.bulk_create([Restaurant() for i in range(0, 501)])
@skipUnlessDBFeature("has_bulk_insert")
def test_large_batch_efficiency(self):
with override_settings(DEBUG=True):
connection.queries_log.clear()
TwoFields.objects.bulk_create(
[TwoFields(f1=i, f2=i + 1) for i in range(0, 1001)]
)
self.assertLess(len(connection.queries), 10)
def test_large_batch_mixed(self):
"""
Test inserting a large batch with objects having primary key set
mixed together with objects without PK set.
"""
TwoFields.objects.bulk_create(
[
TwoFields(id=i if i % 2 == 0 else None, f1=i, f2=i + 1)
for i in range(100000, 101000)
]
)
self.assertEqual(TwoFields.objects.count(), 1000)
# We can't assume much about the ID's created, except that the above
# created IDs must exist.
id_range = range(100000, 101000, 2)
self.assertEqual(TwoFields.objects.filter(id__in=id_range).count(), 500)
self.assertEqual(TwoFields.objects.exclude(id__in=id_range).count(), 500)
@skipUnlessDBFeature("has_bulk_insert")
def test_large_batch_mixed_efficiency(self):
"""
Test inserting a large batch with objects having primary key set
mixed together with objects without PK set.
"""
with override_settings(DEBUG=True):
connection.queries_log.clear()
TwoFields.objects.bulk_create(
[
TwoFields(id=i if i % 2 == 0 else None, f1=i, f2=i + 1)
for i in range(100000, 101000)
]
)
self.assertLess(len(connection.queries), 10)
def test_explicit_batch_size(self):
objs = [TwoFields(f1=i, f2=i) for i in range(0, 4)]
num_objs = len(objs)
TwoFields.objects.bulk_create(objs, batch_size=1)
self.assertEqual(TwoFields.objects.count(), num_objs)
TwoFields.objects.all().delete()
TwoFields.objects.bulk_create(objs, batch_size=2)
self.assertEqual(TwoFields.objects.count(), num_objs)
TwoFields.objects.all().delete()
TwoFields.objects.bulk_create(objs, batch_size=3)
self.assertEqual(TwoFields.objects.count(), num_objs)
TwoFields.objects.all().delete()
TwoFields.objects.bulk_create(objs, batch_size=num_objs)
self.assertEqual(TwoFields.objects.count(), num_objs)
def test_empty_model(self):
NoFields.objects.bulk_create([NoFields() for i in range(2)])
self.assertEqual(NoFields.objects.count(), 2)
@skipUnlessDBFeature("has_bulk_insert")
def test_explicit_batch_size_efficiency(self):
objs = [TwoFields(f1=i, f2=i) for i in range(0, 100)]
with self.assertNumQueries(2):
TwoFields.objects.bulk_create(objs, 50)
TwoFields.objects.all().delete()
with self.assertNumQueries(1):
TwoFields.objects.bulk_create(objs, len(objs))
@skipUnlessDBFeature("has_bulk_insert")
def test_explicit_batch_size_respects_max_batch_size(self):
objs = [Country(name=f"Country {i}") for i in range(1000)]
fields = ["name", "iso_two_letter", "description"]
max_batch_size = max(connection.ops.bulk_batch_size(fields, objs), 1)
with self.assertNumQueries(ceil(len(objs) / max_batch_size)):
Country.objects.bulk_create(objs, batch_size=max_batch_size + 1)
@skipUnlessDBFeature("has_bulk_insert")
def test_bulk_insert_expressions(self):
Restaurant.objects.bulk_create(
[
Restaurant(name="Sam's Shake Shack"),
Restaurant(name=Lower(Value("Betty's Beetroot Bar"))),
]
)
bbb = Restaurant.objects.filter(name="betty's beetroot bar")
self.assertEqual(bbb.count(), 1)
@skipUnlessDBFeature("has_bulk_insert")
def test_bulk_insert_nullable_fields(self):
fk_to_auto_fields = {
"auto_field": NoFields.objects.create(),
"small_auto_field": SmallAutoFieldModel.objects.create(),
"big_auto_field": BigAutoFieldModel.objects.create(),
}
# NULL can be mixed with other values in nullable fields
nullable_fields = [
field for field in NullableFields._meta.get_fields() if field.name != "id"
]
NullableFields.objects.bulk_create(
[
NullableFields(**{**fk_to_auto_fields, field.name: None})
for field in nullable_fields
]
)
self.assertEqual(NullableFields.objects.count(), len(nullable_fields))
for field in nullable_fields:
with self.subTest(field=field):
field_value = "" if isinstance(field, FileField) else None
self.assertEqual(
NullableFields.objects.filter(**{field.name: field_value}).count(),
1,
)
@skipUnlessDBFeature("can_return_rows_from_bulk_insert")
def test_set_pk_and_insert_single_item(self):
with self.assertNumQueries(1):
countries = Country.objects.bulk_create([self.data[0]])
self.assertEqual(len(countries), 1)
self.assertEqual(Country.objects.get(pk=countries[0].pk), countries[0])
@skipUnlessDBFeature("can_return_rows_from_bulk_insert")
def test_set_pk_and_query_efficiency(self):
with self.assertNumQueries(1):
countries = Country.objects.bulk_create(self.data)
self.assertEqual(len(countries), 4)
self.assertEqual(Country.objects.get(pk=countries[0].pk), countries[0])
self.assertEqual(Country.objects.get(pk=countries[1].pk), countries[1])
self.assertEqual(Country.objects.get(pk=countries[2].pk), countries[2])
self.assertEqual(Country.objects.get(pk=countries[3].pk), countries[3])
@skipUnlessDBFeature("can_return_rows_from_bulk_insert")
def test_set_state(self):
country_nl = Country(name="Netherlands", iso_two_letter="NL")
country_be = Country(name="Belgium", iso_two_letter="BE")
Country.objects.bulk_create([country_nl])
country_be.save()
# Objects save via bulk_create() and save() should have equal state.
self.assertEqual(country_nl._state.adding, country_be._state.adding)
self.assertEqual(country_nl._state.db, country_be._state.db)
def test_set_state_with_pk_specified(self):
state_ca = State(two_letter_code="CA")
state_ny = State(two_letter_code="NY")
State.objects.bulk_create([state_ca])
state_ny.save()
# Objects save via bulk_create() and save() should have equal state.
self.assertEqual(state_ca._state.adding, state_ny._state.adding)
self.assertEqual(state_ca._state.db, state_ny._state.db)
@skipIfDBFeature("supports_ignore_conflicts")
def test_ignore_conflicts_value_error(self):
message = "This database backend does not support ignoring conflicts."
with self.assertRaisesMessage(NotSupportedError, message):
TwoFields.objects.bulk_create(self.data, ignore_conflicts=True)
@skipUnlessDBFeature("supports_ignore_conflicts")
def test_ignore_conflicts_ignore(self):
data = [
TwoFields(f1=1, f2=1),
TwoFields(f1=2, f2=2),
TwoFields(f1=3, f2=3),
]
TwoFields.objects.bulk_create(data)
self.assertEqual(TwoFields.objects.count(), 3)
# With ignore_conflicts=True, conflicts are ignored.
conflicting_objects = [
TwoFields(f1=2, f2=2),
TwoFields(f1=3, f2=3),
]
TwoFields.objects.bulk_create([conflicting_objects[0]], ignore_conflicts=True)
TwoFields.objects.bulk_create(conflicting_objects, ignore_conflicts=True)
self.assertEqual(TwoFields.objects.count(), 3)
self.assertIsNone(conflicting_objects[0].pk)
self.assertIsNone(conflicting_objects[1].pk)
# New objects are created and conflicts are ignored.
new_object = TwoFields(f1=4, f2=4)
TwoFields.objects.bulk_create(
conflicting_objects + [new_object], ignore_conflicts=True
)
self.assertEqual(TwoFields.objects.count(), 4)
self.assertIsNone(new_object.pk)
# Without ignore_conflicts=True, there's a problem.
with self.assertRaises(IntegrityError):
TwoFields.objects.bulk_create(conflicting_objects)
def test_nullable_fk_after_parent(self):
parent = NoFields()
child = NullableFields(auto_field=parent, integer_field=88)
parent.save()
NullableFields.objects.bulk_create([child])
child = NullableFields.objects.get(integer_field=88)
self.assertEqual(child.auto_field, parent)
@skipUnlessDBFeature("can_return_rows_from_bulk_insert")
def test_nullable_fk_after_parent_bulk_create(self):
parent = NoFields()
child = NullableFields(auto_field=parent, integer_field=88)
NoFields.objects.bulk_create([parent])
NullableFields.objects.bulk_create([child])
child = NullableFields.objects.get(integer_field=88)
self.assertEqual(child.auto_field, parent)
def test_unsaved_parent(self):
parent = NoFields()
msg = (
"bulk_create() prohibited to prevent data loss due to unsaved "
"related object 'auto_field'."
)
with self.assertRaisesMessage(ValueError, msg):
NullableFields.objects.bulk_create([NullableFields(auto_field=parent)])
def test_invalid_batch_size_exception(self):
msg = "Batch size must be a positive integer."
with self.assertRaisesMessage(ValueError, msg):
Country.objects.bulk_create([], batch_size=-1)
@skipIfDBFeature("supports_update_conflicts")
def test_update_conflicts_unsupported(self):
msg = "This database backend does not support updating conflicts."
with self.assertRaisesMessage(NotSupportedError, msg):
Country.objects.bulk_create(self.data, update_conflicts=True)
@skipUnlessDBFeature("supports_ignore_conflicts", "supports_update_conflicts")
def test_ignore_update_conflicts_exclusive(self):
msg = "ignore_conflicts and update_conflicts are mutually exclusive"
with self.assertRaisesMessage(ValueError, msg):
Country.objects.bulk_create(
self.data,
ignore_conflicts=True,
update_conflicts=True,
)
@skipUnlessDBFeature("supports_update_conflicts")
def test_update_conflicts_no_update_fields(self):
msg = (
"Fields that will be updated when a row insertion fails on "
"conflicts must be provided."
)
with self.assertRaisesMessage(ValueError, msg):
Country.objects.bulk_create(self.data, update_conflicts=True)
@skipUnlessDBFeature("supports_update_conflicts")
@skipIfDBFeature("supports_update_conflicts_with_target")
def test_update_conflicts_unique_field_unsupported(self):
msg = (
"This database backend does not support updating conflicts with "
"specifying unique fields that can trigger the upsert."
)
with self.assertRaisesMessage(NotSupportedError, msg):
TwoFields.objects.bulk_create(
[TwoFields(f1=1, f2=1), TwoFields(f1=2, f2=2)],
update_conflicts=True,
update_fields=["f2"],
unique_fields=["f1"],
)
@skipUnlessDBFeature("supports_update_conflicts")
def test_update_conflicts_nonexistent_update_fields(self):
unique_fields = None
if connection.features.supports_update_conflicts_with_target:
unique_fields = ["f1"]
msg = "TwoFields has no field named 'nonexistent'"
with self.assertRaisesMessage(FieldDoesNotExist, msg):
TwoFields.objects.bulk_create(
[TwoFields(f1=1, f2=1), TwoFields(f1=2, f2=2)],
update_conflicts=True,
update_fields=["nonexistent"],
unique_fields=unique_fields,
)
@skipUnlessDBFeature(
"supports_update_conflicts",
"supports_update_conflicts_with_target",
)
def test_update_conflicts_unique_fields_required(self):
msg = "Unique fields that can trigger the upsert must be provided."
with self.assertRaisesMessage(ValueError, msg):
TwoFields.objects.bulk_create(
[TwoFields(f1=1, f2=1), TwoFields(f1=2, f2=2)],
update_conflicts=True,
update_fields=["f1"],
)
@skipUnlessDBFeature(
"supports_update_conflicts",
"supports_update_conflicts_with_target",
)
def test_update_conflicts_invalid_update_fields(self):
msg = "bulk_create() can only be used with concrete fields in update_fields."
# Reverse one-to-one relationship.
with self.assertRaisesMessage(ValueError, msg):
Country.objects.bulk_create(
self.data,
update_conflicts=True,
update_fields=["relatedmodel"],
unique_fields=["pk"],
)
# Many-to-many relationship.
with self.assertRaisesMessage(ValueError, msg):
RelatedModel.objects.bulk_create(
[RelatedModel(country=self.data[0])],
update_conflicts=True,
update_fields=["big_auto_fields"],
unique_fields=["country"],
)
@skipUnlessDBFeature(
"supports_update_conflicts",
"supports_update_conflicts_with_target",
)
def test_update_conflicts_pk_in_update_fields(self):
msg = "bulk_create() cannot be used with primary keys in update_fields."
with self.assertRaisesMessage(ValueError, msg):
BigAutoFieldModel.objects.bulk_create(
[BigAutoFieldModel()],
update_conflicts=True,
update_fields=["id"],
unique_fields=["id"],
)
@skipUnlessDBFeature(
"supports_update_conflicts",
"supports_update_conflicts_with_target",
)
def test_update_conflicts_invalid_unique_fields(self):
msg = "bulk_create() can only be used with concrete fields in unique_fields."
# Reverse one-to-one relationship.
with self.assertRaisesMessage(ValueError, msg):
Country.objects.bulk_create(
self.data,
update_conflicts=True,
update_fields=["name"],
unique_fields=["relatedmodel"],
)
# Many-to-many relationship.
with self.assertRaisesMessage(ValueError, msg):
RelatedModel.objects.bulk_create(
[RelatedModel(country=self.data[0])],
update_conflicts=True,
update_fields=["name"],
unique_fields=["big_auto_fields"],
)
def _test_update_conflicts_two_fields(self, unique_fields):
TwoFields.objects.bulk_create(
[
TwoFields(f1=1, f2=1, name="a"),
TwoFields(f1=2, f2=2, name="b"),
]
)
self.assertEqual(TwoFields.objects.count(), 2)
conflicting_objects = [
TwoFields(f1=1, f2=1, name="c"),
TwoFields(f1=2, f2=2, name="d"),
]
TwoFields.objects.bulk_create(
conflicting_objects,
update_conflicts=True,
unique_fields=unique_fields,
update_fields=["name"],
)
self.assertEqual(TwoFields.objects.count(), 2)
self.assertCountEqual(
TwoFields.objects.values("f1", "f2", "name"),
[
{"f1": 1, "f2": 1, "name": "c"},
{"f1": 2, "f2": 2, "name": "d"},
],
)
@skipUnlessDBFeature(
"supports_update_conflicts", "supports_update_conflicts_with_target"
)
def test_update_conflicts_two_fields_unique_fields_first(self):
self._test_update_conflicts_two_fields(["f1"])
@skipUnlessDBFeature(
"supports_update_conflicts", "supports_update_conflicts_with_target"
)
def test_update_conflicts_two_fields_unique_fields_second(self):
self._test_update_conflicts_two_fields(["f2"])
@skipUnlessDBFeature(
"supports_update_conflicts", "supports_update_conflicts_with_target"
)
def test_update_conflicts_unique_fields_pk(self):
TwoFields.objects.bulk_create(
[
TwoFields(f1=1, f2=1, name="a"),
TwoFields(f1=2, f2=2, name="b"),
]
)
self.assertEqual(TwoFields.objects.count(), 2)
obj1 = TwoFields.objects.get(f1=1)
obj2 = TwoFields.objects.get(f1=2)
conflicting_objects = [
TwoFields(pk=obj1.pk, f1=3, f2=3, name="c"),
TwoFields(pk=obj2.pk, f1=4, f2=4, name="d"),
]
TwoFields.objects.bulk_create(
conflicting_objects,
update_conflicts=True,
unique_fields=["pk"],
update_fields=["name"],
)
self.assertEqual(TwoFields.objects.count(), 2)
self.assertCountEqual(
TwoFields.objects.values("f1", "f2", "name"),
[
{"f1": 1, "f2": 1, "name": "c"},
{"f1": 2, "f2": 2, "name": "d"},
],
)
@skipUnlessDBFeature(
"supports_update_conflicts", "supports_update_conflicts_with_target"
)
def test_update_conflicts_two_fields_unique_fields_both(self):
with self.assertRaises((OperationalError, ProgrammingError)):
self._test_update_conflicts_two_fields(["f1", "f2"])
@skipUnlessDBFeature("supports_update_conflicts")
@skipIfDBFeature("supports_update_conflicts_with_target")
def test_update_conflicts_two_fields_no_unique_fields(self):
self._test_update_conflicts_two_fields([])
def _test_update_conflicts_unique_two_fields(self, unique_fields):
Country.objects.bulk_create(self.data)
self.assertEqual(Country.objects.count(), 4)
new_data = [
# Conflicting countries.
Country(
name="Germany",
iso_two_letter="DE",
description=("Germany is a country in Central Europe."),
),
Country(
name="Czech Republic",
iso_two_letter="CZ",
description=(
"The Czech Republic is a landlocked country in Central Europe."
),
),
# New countries.
Country(name="Australia", iso_two_letter="AU"),
Country(
name="Japan",
iso_two_letter="JP",
description=("Japan is an island country in East Asia."),
),
]
Country.objects.bulk_create(
new_data,
update_conflicts=True,
update_fields=["description"],
unique_fields=unique_fields,
)
self.assertEqual(Country.objects.count(), 6)
self.assertCountEqual(
Country.objects.values("iso_two_letter", "description"),
[
{"iso_two_letter": "US", "description": ""},
{"iso_two_letter": "NL", "description": ""},
{
"iso_two_letter": "DE",
"description": ("Germany is a country in Central Europe."),
},
{
"iso_two_letter": "CZ",
"description": (
"The Czech Republic is a landlocked country in Central Europe."
),
},
{"iso_two_letter": "AU", "description": ""},
{
"iso_two_letter": "JP",
"description": ("Japan is an island country in East Asia."),
},
],
)
@skipUnlessDBFeature(
"supports_update_conflicts", "supports_update_conflicts_with_target"
)
def test_update_conflicts_unique_two_fields_unique_fields_both(self):
self._test_update_conflicts_unique_two_fields(["iso_two_letter", "name"])
@skipUnlessDBFeature(
"supports_update_conflicts", "supports_update_conflicts_with_target"
)
def test_update_conflicts_unique_two_fields_unique_fields_one(self):
with self.assertRaises((OperationalError, ProgrammingError)):
self._test_update_conflicts_unique_two_fields(["iso_two_letter"])
@skipUnlessDBFeature("supports_update_conflicts")
@skipIfDBFeature("supports_update_conflicts_with_target")
def test_update_conflicts_unique_two_fields_unique_no_unique_fields(self):
self._test_update_conflicts_unique_two_fields([])
def _test_update_conflicts(self, unique_fields):
UpsertConflict.objects.bulk_create(
[
UpsertConflict(number=1, rank=1, name="John"),
UpsertConflict(number=2, rank=2, name="Mary"),
UpsertConflict(number=3, rank=3, name="Hannah"),
]
)
self.assertEqual(UpsertConflict.objects.count(), 3)
conflicting_objects = [
UpsertConflict(number=1, rank=4, name="Steve"),
UpsertConflict(number=2, rank=2, name="Olivia"),
UpsertConflict(number=3, rank=1, name="Hannah"),
]
UpsertConflict.objects.bulk_create(
conflicting_objects,
update_conflicts=True,
update_fields=["name", "rank"],
unique_fields=unique_fields,
)
self.assertEqual(UpsertConflict.objects.count(), 3)
self.assertCountEqual(
UpsertConflict.objects.values("number", "rank", "name"),
[
{"number": 1, "rank": 4, "name": "Steve"},
{"number": 2, "rank": 2, "name": "Olivia"},
{"number": 3, "rank": 1, "name": "Hannah"},
],
)
UpsertConflict.objects.bulk_create(
conflicting_objects + [UpsertConflict(number=4, rank=4, name="Mark")],
update_conflicts=True,
update_fields=["name", "rank"],
unique_fields=unique_fields,
)
self.assertEqual(UpsertConflict.objects.count(), 4)
self.assertCountEqual(
UpsertConflict.objects.values("number", "rank", "name"),
[
{"number": 1, "rank": 4, "name": "Steve"},
{"number": 2, "rank": 2, "name": "Olivia"},
{"number": 3, "rank": 1, "name": "Hannah"},
{"number": 4, "rank": 4, "name": "Mark"},
],
)
@skipUnlessDBFeature(
"supports_update_conflicts", "supports_update_conflicts_with_target"
)
def test_update_conflicts_unique_fields(self):
self._test_update_conflicts(unique_fields=["number"])
@skipUnlessDBFeature("supports_update_conflicts")
@skipIfDBFeature("supports_update_conflicts_with_target")
def test_update_conflicts_no_unique_fields(self):
self._test_update_conflicts([])
@skipUnlessDBFeature(
"supports_update_conflicts", "supports_update_conflicts_with_target"
)
def test_update_conflicts_unique_fields_update_fields_db_column(self):
FieldsWithDbColumns.objects.bulk_create(
[
FieldsWithDbColumns(rank=1, name="a"),
FieldsWithDbColumns(rank=2, name="b"),
]
)
self.assertEqual(FieldsWithDbColumns.objects.count(), 2)
conflicting_objects = [
FieldsWithDbColumns(rank=1, name="c"),
FieldsWithDbColumns(rank=2, name="d"),
]
FieldsWithDbColumns.objects.bulk_create(
conflicting_objects,
update_conflicts=True,
unique_fields=["rank"],
update_fields=["name"],
)
self.assertEqual(FieldsWithDbColumns.objects.count(), 2)
self.assertCountEqual(
FieldsWithDbColumns.objects.values("rank", "name"),
[
{"rank": 1, "name": "c"},
{"rank": 2, "name": "d"},
],
)
|
e2b3150529c6fba8ba785aeaaddb4b452be58252227fca667817144a7b0b474d | import datetime
import uuid
from decimal import Decimal
from django.db import models
from django.utils import timezone
try:
from PIL import Image
except ImportError:
Image = None
class Country(models.Model):
name = models.CharField(max_length=255)
iso_two_letter = models.CharField(max_length=2)
description = models.TextField()
class Meta:
constraints = [
models.UniqueConstraint(
fields=["iso_two_letter", "name"],
name="country_name_iso_unique",
),
]
class ProxyCountry(Country):
class Meta:
proxy = True
class ProxyProxyCountry(ProxyCountry):
class Meta:
proxy = True
class ProxyMultiCountry(ProxyCountry):
pass
class ProxyMultiProxyCountry(ProxyMultiCountry):
class Meta:
proxy = True
class Place(models.Model):
name = models.CharField(max_length=100)
class Meta:
abstract = True
class Restaurant(Place):
pass
class Pizzeria(Restaurant):
pass
class State(models.Model):
two_letter_code = models.CharField(max_length=2, primary_key=True)
class TwoFields(models.Model):
f1 = models.IntegerField(unique=True)
f2 = models.IntegerField(unique=True)
name = models.CharField(max_length=15, null=True)
class FieldsWithDbColumns(models.Model):
rank = models.IntegerField(unique=True, db_column="rAnK")
name = models.CharField(max_length=15, null=True, db_column="oTheRNaMe")
class UpsertConflict(models.Model):
number = models.IntegerField(unique=True)
rank = models.IntegerField()
name = models.CharField(max_length=15)
class NoFields(models.Model):
pass
class SmallAutoFieldModel(models.Model):
id = models.SmallAutoField(primary_key=True)
class BigAutoFieldModel(models.Model):
id = models.BigAutoField(primary_key=True)
class NullableFields(models.Model):
# Fields in db.backends.oracle.BulkInsertMapper
big_int_filed = models.BigIntegerField(null=True, default=1)
binary_field = models.BinaryField(null=True, default=b"data")
date_field = models.DateField(null=True, default=timezone.now)
datetime_field = models.DateTimeField(null=True, default=timezone.now)
decimal_field = models.DecimalField(
null=True, max_digits=2, decimal_places=1, default=Decimal("1.1")
)
duration_field = models.DurationField(null=True, default=datetime.timedelta(1))
float_field = models.FloatField(null=True, default=3.2)
integer_field = models.IntegerField(null=True, default=2)
null_boolean_field = models.BooleanField(null=True, default=False)
positive_big_integer_field = models.PositiveBigIntegerField(
null=True, default=2**63 - 1
)
positive_integer_field = models.PositiveIntegerField(null=True, default=3)
positive_small_integer_field = models.PositiveSmallIntegerField(
null=True, default=4
)
small_integer_field = models.SmallIntegerField(null=True, default=5)
time_field = models.TimeField(null=True, default=timezone.now)
auto_field = models.ForeignKey(NoFields, on_delete=models.CASCADE, null=True)
small_auto_field = models.ForeignKey(
SmallAutoFieldModel, on_delete=models.CASCADE, null=True
)
big_auto_field = models.ForeignKey(
BigAutoFieldModel, on_delete=models.CASCADE, null=True
)
# Fields not required in BulkInsertMapper
char_field = models.CharField(null=True, max_length=4, default="char")
email_field = models.EmailField(null=True, default="[email protected]")
file_field = models.FileField(null=True, default="file.txt")
file_path_field = models.FilePathField(path="/tmp", null=True, default="file.txt")
generic_ip_address_field = models.GenericIPAddressField(
null=True, default="127.0.0.1"
)
if Image:
image_field = models.ImageField(null=True, default="image.jpg")
slug_field = models.SlugField(null=True, default="slug")
text_field = models.TextField(null=True, default="text")
url_field = models.URLField(null=True, default="/")
uuid_field = models.UUIDField(null=True, default=uuid.uuid4)
class RelatedModel(models.Model):
name = models.CharField(max_length=15, null=True)
country = models.OneToOneField(Country, models.CASCADE, primary_key=True)
big_auto_fields = models.ManyToManyField(BigAutoFieldModel)
|
d5aeec29c55e4f116a61b1ab0c1f1f4ded3d5a09879c390daa26b9c8ad6a48ab | import datetime
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import models
from django.forms import CharField, FileField, Form, ModelForm
from django.forms.models import ModelFormMetaclass
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from ..models import (
BoundaryModel,
ChoiceFieldModel,
ChoiceModel,
ChoiceOptionModel,
Defaults,
FileModel,
OptionalMultiChoiceModel,
)
from . import jinja2_tests
class ChoiceFieldForm(ModelForm):
class Meta:
model = ChoiceFieldModel
fields = "__all__"
class OptionalMultiChoiceModelForm(ModelForm):
class Meta:
model = OptionalMultiChoiceModel
fields = "__all__"
class ChoiceFieldExclusionForm(ModelForm):
multi_choice = CharField(max_length=50)
class Meta:
exclude = ["multi_choice"]
model = ChoiceFieldModel
class EmptyCharLabelChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ["name", "choice"]
class EmptyIntegerLabelChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ["name", "choice_integer"]
class EmptyCharLabelNoneChoiceForm(ModelForm):
class Meta:
model = ChoiceModel
fields = ["name", "choice_string_w_none"]
class FileForm(Form):
file1 = FileField()
class TestTicket14567(TestCase):
"""
The return values of ModelMultipleChoiceFields are QuerySets
"""
def test_empty_queryset_return(self):
"""
If a model's ManyToManyField has blank=True and is saved with no data,
a queryset is returned.
"""
option = ChoiceOptionModel.objects.create(name="default")
form = OptionalMultiChoiceModelForm(
{"multi_choice_optional": "", "multi_choice": [option.pk]}
)
self.assertTrue(form.is_valid())
# The empty value is a QuerySet
self.assertIsInstance(
form.cleaned_data["multi_choice_optional"], models.query.QuerySet
)
# While we're at it, test whether a QuerySet is returned if there *is* a value.
self.assertIsInstance(form.cleaned_data["multi_choice"], models.query.QuerySet)
class ModelFormCallableModelDefault(TestCase):
def test_no_empty_option(self):
"""
If a model's ForeignKey has blank=False and a default, no empty option
is created.
"""
option = ChoiceOptionModel.objects.create(name="default")
choices = list(ChoiceFieldForm().fields["choice"].choices)
self.assertEqual(len(choices), 1)
self.assertEqual(choices[0], (option.pk, str(option)))
def test_callable_initial_value(self):
"""
The initial value for a callable default returning a queryset is the
pk.
"""
ChoiceOptionModel.objects.create(id=1, name="default")
ChoiceOptionModel.objects.create(id=2, name="option 2")
ChoiceOptionModel.objects.create(id=3, name="option 3")
self.assertHTMLEqual(
ChoiceFieldForm().as_p(),
"""
<p><label for="id_choice">Choice:</label>
<select name="choice" id="id_choice">
<option value="1" selected>ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-choice" value="1" id="initial-id_choice">
</p>
<p><label for="id_choice_int">Choice int:</label>
<select name="choice_int" id="id_choice_int">
<option value="1" selected>ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-choice_int" value="1"
id="initial-id_choice_int">
</p>
<p><label for="id_multi_choice">Multi choice:</label>
<select multiple name="multi_choice" id="id_multi_choice" required>
<option value="1" selected>ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-multi_choice" value="1"
id="initial-id_multi_choice_0">
</p>
<p><label for="id_multi_choice_int">Multi choice int:</label>
<select multiple name="multi_choice_int" id="id_multi_choice_int" required>
<option value="1" selected>ChoiceOption 1</option>
<option value="2">ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-multi_choice_int" value="1"
id="initial-id_multi_choice_int_0">
</p>
""",
)
def test_initial_instance_value(self):
"Initial instances for model fields may also be instances (refs #7287)"
ChoiceOptionModel.objects.create(id=1, name="default")
obj2 = ChoiceOptionModel.objects.create(id=2, name="option 2")
obj3 = ChoiceOptionModel.objects.create(id=3, name="option 3")
self.assertHTMLEqual(
ChoiceFieldForm(
initial={
"choice": obj2,
"choice_int": obj2,
"multi_choice": [obj2, obj3],
"multi_choice_int": ChoiceOptionModel.objects.exclude(
name="default"
),
}
).as_p(),
"""
<p><label for="id_choice">Choice:</label>
<select name="choice" id="id_choice">
<option value="1">ChoiceOption 1</option>
<option value="2" selected>ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-choice" value="2" id="initial-id_choice">
</p>
<p><label for="id_choice_int">Choice int:</label>
<select name="choice_int" id="id_choice_int">
<option value="1">ChoiceOption 1</option>
<option value="2" selected>ChoiceOption 2</option>
<option value="3">ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-choice_int" value="2"
id="initial-id_choice_int">
</p>
<p><label for="id_multi_choice">Multi choice:</label>
<select multiple name="multi_choice" id="id_multi_choice" required>
<option value="1">ChoiceOption 1</option>
<option value="2" selected>ChoiceOption 2</option>
<option value="3" selected>ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-multi_choice" value="2"
id="initial-id_multi_choice_0">
<input type="hidden" name="initial-multi_choice" value="3"
id="initial-id_multi_choice_1">
</p>
<p><label for="id_multi_choice_int">Multi choice int:</label>
<select multiple name="multi_choice_int" id="id_multi_choice_int" required>
<option value="1">ChoiceOption 1</option>
<option value="2" selected>ChoiceOption 2</option>
<option value="3" selected>ChoiceOption 3</option>
</select>
<input type="hidden" name="initial-multi_choice_int" value="2"
id="initial-id_multi_choice_int_0">
<input type="hidden" name="initial-multi_choice_int" value="3"
id="initial-id_multi_choice_int_1">
</p>
""",
)
@skipUnlessDBFeature("supports_json_field")
def test_callable_default_hidden_widget_value_not_overridden(self):
class FieldWithCallableDefaultsModel(models.Model):
int_field = models.IntegerField(default=lambda: 1)
json_field = models.JSONField(default=dict)
class FieldWithCallableDefaultsModelForm(ModelForm):
class Meta:
model = FieldWithCallableDefaultsModel
fields = "__all__"
form = FieldWithCallableDefaultsModelForm(
data={
"initial-int_field": "1",
"int_field": "1000",
"initial-json_field": "{}",
"json_field": '{"key": "val"}',
}
)
form_html = form.as_p()
self.assertHTMLEqual(
form_html,
"""
<p>
<label for="id_int_field">Int field:</label>
<input type="number" name="int_field" value="1000"
required id="id_int_field">
<input type="hidden" name="initial-int_field" value="1"
id="initial-id_int_field">
</p>
<p>
<label for="id_json_field">Json field:</label>
<textarea cols="40" id="id_json_field" name="json_field" required rows="10">
{"key": "val"}
</textarea>
<input id="initial-id_json_field" name="initial-json_field" type="hidden"
value="{}">
</p>
""",
)
class FormsModelTestCase(TestCase):
def test_unicode_filename(self):
# FileModel with Unicode filename and data #########################
file1 = SimpleUploadedFile(
"我隻氣墊船裝滿晒鱔.txt", "मेरी मँडराने वाली नाव सर्पमीनों से भरी ह".encode()
)
f = FileForm(data={}, files={"file1": file1}, auto_id=False)
self.assertTrue(f.is_valid())
self.assertIn("file1", f.cleaned_data)
m = FileModel.objects.create(file=f.cleaned_data["file1"])
self.assertEqual(
m.file.name,
"tests/\u6211\u96bb\u6c23\u588a\u8239\u88dd\u6eff\u6652\u9c54.txt",
)
m.delete()
def test_boundary_conditions(self):
# Boundary conditions on a PositiveIntegerField #########################
class BoundaryForm(ModelForm):
class Meta:
model = BoundaryModel
fields = "__all__"
f = BoundaryForm({"positive_integer": 100})
self.assertTrue(f.is_valid())
f = BoundaryForm({"positive_integer": 0})
self.assertTrue(f.is_valid())
f = BoundaryForm({"positive_integer": -100})
self.assertFalse(f.is_valid())
def test_formfield_initial(self):
# If the model has default values for some fields, they are used as the
# formfield initial values.
class DefaultsForm(ModelForm):
class Meta:
model = Defaults
fields = "__all__"
self.assertEqual(DefaultsForm().fields["name"].initial, "class default value")
self.assertEqual(
DefaultsForm().fields["def_date"].initial, datetime.date(1980, 1, 1)
)
self.assertEqual(DefaultsForm().fields["value"].initial, 42)
r1 = DefaultsForm()["callable_default"].as_widget()
r2 = DefaultsForm()["callable_default"].as_widget()
self.assertNotEqual(r1, r2)
# In a ModelForm that is passed an instance, the initial values come from the
# instance's values, not the model's defaults.
foo_instance = Defaults(
name="instance value", def_date=datetime.date(1969, 4, 4), value=12
)
instance_form = DefaultsForm(instance=foo_instance)
self.assertEqual(instance_form.initial["name"], "instance value")
self.assertEqual(instance_form.initial["def_date"], datetime.date(1969, 4, 4))
self.assertEqual(instance_form.initial["value"], 12)
from django.forms import CharField
class ExcludingForm(ModelForm):
name = CharField(max_length=255)
class Meta:
model = Defaults
exclude = ["name", "callable_default"]
f = ExcludingForm(
{"name": "Hello", "value": 99, "def_date": datetime.date(1999, 3, 2)}
)
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data["name"], "Hello")
obj = f.save()
self.assertEqual(obj.name, "class default value")
self.assertEqual(obj.value, 99)
self.assertEqual(obj.def_date, datetime.date(1999, 3, 2))
class RelatedModelFormTests(SimpleTestCase):
def test_invalid_loading_order(self):
"""
Test for issue 10405
"""
class A(models.Model):
ref = models.ForeignKey("B", models.CASCADE)
class Meta:
model = A
fields = "__all__"
msg = (
"Cannot create form field for 'ref' yet, because "
"its related model 'B' has not been loaded yet"
)
with self.assertRaisesMessage(ValueError, msg):
ModelFormMetaclass("Form", (ModelForm,), {"Meta": Meta})
class B(models.Model):
pass
def test_valid_loading_order(self):
"""
Test for issue 10405
"""
class C(models.Model):
ref = models.ForeignKey("D", models.CASCADE)
class D(models.Model):
pass
class Meta:
model = C
fields = "__all__"
self.assertTrue(
issubclass(
ModelFormMetaclass("Form", (ModelForm,), {"Meta": Meta}), ModelForm
)
)
class ManyToManyExclusionTestCase(TestCase):
def test_m2m_field_exclusion(self):
# Issue 12337. save_instance should honor the passed-in exclude keyword.
opt1 = ChoiceOptionModel.objects.create(id=1, name="default")
opt2 = ChoiceOptionModel.objects.create(id=2, name="option 2")
opt3 = ChoiceOptionModel.objects.create(id=3, name="option 3")
initial = {
"choice": opt1,
"choice_int": opt1,
}
data = {
"choice": opt2.pk,
"choice_int": opt2.pk,
"multi_choice": "string data!",
"multi_choice_int": [opt1.pk],
}
instance = ChoiceFieldModel.objects.create(**initial)
instance.multi_choice.set([opt2, opt3])
instance.multi_choice_int.set([opt2, opt3])
form = ChoiceFieldExclusionForm(data=data, instance=instance)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data["multi_choice"], data["multi_choice"])
form.save()
self.assertEqual(form.instance.choice.pk, data["choice"])
self.assertEqual(form.instance.choice_int.pk, data["choice_int"])
self.assertEqual(list(form.instance.multi_choice.all()), [opt2, opt3])
self.assertEqual(
[obj.pk for obj in form.instance.multi_choice_int.all()],
data["multi_choice_int"],
)
class EmptyLabelTestCase(TestCase):
def test_empty_field_char(self):
f = EmptyCharLabelChoiceForm()
self.assertHTMLEqual(
f.as_p(),
"""
<p><label for="id_name">Name:</label>
<input id="id_name" maxlength="10" name="name" type="text" required></p>
<p><label for="id_choice">Choice:</label>
<select id="id_choice" name="choice">
<option value="" selected>No Preference</option>
<option value="f">Foo</option>
<option value="b">Bar</option>
</select></p>
""",
)
def test_empty_field_char_none(self):
f = EmptyCharLabelNoneChoiceForm()
self.assertHTMLEqual(
f.as_p(),
"""
<p><label for="id_name">Name:</label>
<input id="id_name" maxlength="10" name="name" type="text" required></p>
<p><label for="id_choice_string_w_none">Choice string w none:</label>
<select id="id_choice_string_w_none" name="choice_string_w_none">
<option value="" selected>No Preference</option>
<option value="f">Foo</option>
<option value="b">Bar</option>
</select></p>
""",
)
def test_save_empty_label_forms(self):
# Saving a form with a blank choice results in the expected
# value being stored in the database.
tests = [
(EmptyCharLabelNoneChoiceForm, "choice_string_w_none", None),
(EmptyIntegerLabelChoiceForm, "choice_integer", None),
(EmptyCharLabelChoiceForm, "choice", ""),
]
for form, key, expected in tests:
with self.subTest(form=form):
f = form({"name": "some-key", key: ""})
self.assertTrue(f.is_valid())
m = f.save()
self.assertEqual(expected, getattr(m, key))
self.assertEqual(
"No Preference", getattr(m, "get_{}_display".format(key))()
)
def test_empty_field_integer(self):
f = EmptyIntegerLabelChoiceForm()
self.assertHTMLEqual(
f.as_p(),
"""
<p><label for="id_name">Name:</label>
<input id="id_name" maxlength="10" name="name" type="text" required></p>
<p><label for="id_choice_integer">Choice integer:</label>
<select id="id_choice_integer" name="choice_integer">
<option value="" selected>No Preference</option>
<option value="1">Foo</option>
<option value="2">Bar</option>
</select></p>
""",
)
def test_get_display_value_on_none(self):
m = ChoiceModel.objects.create(name="test", choice="", choice_integer=None)
self.assertIsNone(m.choice_integer)
self.assertEqual("No Preference", m.get_choice_integer_display())
def test_html_rendering_of_prepopulated_models(self):
none_model = ChoiceModel(name="none-test", choice_integer=None)
f = EmptyIntegerLabelChoiceForm(instance=none_model)
self.assertHTMLEqual(
f.as_p(),
"""
<p><label for="id_name">Name:</label>
<input id="id_name" maxlength="10" name="name" type="text"
value="none-test" required>
</p>
<p><label for="id_choice_integer">Choice integer:</label>
<select id="id_choice_integer" name="choice_integer">
<option value="" selected>No Preference</option>
<option value="1">Foo</option>
<option value="2">Bar</option>
</select></p>
""",
)
foo_model = ChoiceModel(name="foo-test", choice_integer=1)
f = EmptyIntegerLabelChoiceForm(instance=foo_model)
self.assertHTMLEqual(
f.as_p(),
"""
<p><label for="id_name">Name:</label>
<input id="id_name" maxlength="10" name="name" type="text"
value="foo-test" required>
</p>
<p><label for="id_choice_integer">Choice integer:</label>
<select id="id_choice_integer" name="choice_integer">
<option value="">No Preference</option>
<option value="1" selected>Foo</option>
<option value="2">Bar</option>
</select></p>
""",
)
@jinja2_tests
class Jinja2EmptyLabelTestCase(EmptyLabelTestCase):
pass
|
b3c808eb28e1f738bc78a945baf73f096fb33001433142d190224404f8cfdce6 | import importlib
import inspect
import os
import re
import sys
import tempfile
import threading
from io import StringIO
from pathlib import Path
from unittest import mock, skipIf, skipUnless
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import DatabaseError, connection
from django.http import Http404, HttpRequest, HttpResponse
from django.shortcuts import render
from django.template import TemplateDoesNotExist
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import LoggingCaptureMixin
from django.urls import path, reverse
from django.urls.converters import IntConverter
from django.utils.functional import SimpleLazyObject
from django.utils.regex_helper import _lazy_re_compile
from django.utils.safestring import mark_safe
from django.utils.version import PY311
from django.views.debug import (
CallableSettingWrapper,
ExceptionCycleWarning,
ExceptionReporter,
)
from django.views.debug import Path as DebugPath
from django.views.debug import (
SafeExceptionReporterFilter,
default_urlconf,
get_default_exception_reporter_filter,
technical_404_response,
technical_500_response,
)
from django.views.decorators.debug import sensitive_post_parameters, sensitive_variables
from ..views import (
custom_exception_reporter_filter_view,
index_page,
multivalue_dict_key_error,
non_sensitive_view,
paranoid_view,
sensitive_args_function_caller,
sensitive_kwargs_function_caller,
sensitive_method_view,
sensitive_view,
)
class User:
def __str__(self):
return "jacob"
class WithoutEmptyPathUrls:
urlpatterns = [path("url/", index_page, name="url")]
class CallableSettingWrapperTests(SimpleTestCase):
"""Unittests for CallableSettingWrapper"""
def test_repr(self):
class WrappedCallable:
def __repr__(self):
return "repr from the wrapped callable"
def __call__(self):
pass
actual = repr(CallableSettingWrapper(WrappedCallable()))
self.assertEqual(actual, "repr from the wrapped callable")
@override_settings(DEBUG=True, ROOT_URLCONF="view_tests.urls")
class DebugViewTests(SimpleTestCase):
def test_files(self):
with self.assertLogs("django.request", "ERROR"):
response = self.client.get("/raises/")
self.assertEqual(response.status_code, 500)
data = {
"file_data.txt": SimpleUploadedFile("file_data.txt", b"haha"),
}
with self.assertLogs("django.request", "ERROR"):
response = self.client.post("/raises/", data)
self.assertContains(response, "file_data.txt", status_code=500)
self.assertNotContains(response, "haha", status_code=500)
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs("django.security", "WARNING"):
response = self.client.get("/raises400/")
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_400_bad_request(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs("django.request", "WARNING") as cm:
response = self.client.get("/raises400_bad_request/")
self.assertContains(response, '<div class="context" id="', status_code=400)
self.assertEqual(
cm.records[0].getMessage(),
"Malformed request syntax: /raises400_bad_request/",
)
# Ensure no 403.html template exists to test the default case.
@override_settings(
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
}
]
)
def test_403(self):
response = self.client.get("/raises403/")
self.assertContains(response, "<h1>403 Forbidden</h1>", status_code=403)
# Set up a test 403.html template.
@override_settings(
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"OPTIONS": {
"loaders": [
(
"django.template.loaders.locmem.Loader",
{
"403.html": (
"This is a test template for a 403 error "
"({{ exception }})."
),
},
),
],
},
}
]
)
def test_403_template(self):
response = self.client.get("/raises403/")
self.assertContains(response, "test template", status_code=403)
self.assertContains(response, "(Insufficient Permissions).", status_code=403)
def test_404(self):
response = self.client.get("/raises404/")
self.assertNotContains(
response,
'<pre class="exception_value">',
status_code=404,
)
self.assertContains(
response,
"<p>The current path, <code>not-in-urls</code>, didn’t match any "
"of these.</p>",
status_code=404,
html=True,
)
def test_404_not_in_urls(self):
response = self.client.get("/not-in-urls")
self.assertNotContains(response, "Raised by:", status_code=404)
self.assertNotContains(
response,
'<pre class="exception_value">',
status_code=404,
)
self.assertContains(
response, "Django tried these URL patterns", status_code=404
)
self.assertContains(
response,
"<p>The current path, <code>not-in-urls</code>, didn’t match any "
"of these.</p>",
status_code=404,
html=True,
)
# Pattern and view name of a RegexURLPattern appear.
self.assertContains(
response, r"^regex-post/(?P<pk>[0-9]+)/$", status_code=404
)
self.assertContains(response, "[name='regex-post']", status_code=404)
# Pattern and view name of a RoutePattern appear.
self.assertContains(response, r"path-post/<int:pk>/", status_code=404)
self.assertContains(response, "[name='path-post']", status_code=404)
@override_settings(ROOT_URLCONF=WithoutEmptyPathUrls)
def test_404_empty_path_not_in_urls(self):
response = self.client.get("/")
self.assertContains(
response,
"<p>The empty path didn’t match any of these.</p>",
status_code=404,
html=True,
)
def test_technical_404(self):
response = self.client.get("/technical404/")
self.assertContains(
response,
'<pre class="exception_value">Testing technical 404.</pre>',
status_code=404,
html=True,
)
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(
response,
"<td>view_tests.views.technical404</td>",
status_code=404,
)
self.assertContains(
response,
"<p>The current path, <code>technical404/</code>, matched the "
"last one.</p>",
status_code=404,
html=True,
)
def test_classbased_technical_404(self):
response = self.client.get("/classbased404/")
self.assertContains(
response,
"<th>Raised by:</th><td>view_tests.views.Http404View</td>",
status_code=404,
html=True,
)
def test_technical_500(self):
with self.assertLogs("django.request", "ERROR"):
response = self.client.get("/raises500/")
self.assertContains(
response,
"<th>Raised during:</th><td>view_tests.views.raises500</td>",
status_code=500,
html=True,
)
with self.assertLogs("django.request", "ERROR"):
response = self.client.get("/raises500/", HTTP_ACCEPT="text/plain")
self.assertContains(
response,
"Raised during: view_tests.views.raises500",
status_code=500,
)
def test_classbased_technical_500(self):
with self.assertLogs("django.request", "ERROR"):
response = self.client.get("/classbased500/")
self.assertContains(
response,
"<th>Raised during:</th><td>view_tests.views.Raises500View</td>",
status_code=500,
html=True,
)
with self.assertLogs("django.request", "ERROR"):
response = self.client.get("/classbased500/", HTTP_ACCEPT="text/plain")
self.assertContains(
response,
"Raised during: view_tests.views.Raises500View",
status_code=500,
)
def test_non_l10ned_numeric_ids(self):
"""
Numeric IDs and fancy traceback context blocks line numbers shouldn't
be localized.
"""
with self.settings(DEBUG=True):
with self.assertLogs("django.request", "ERROR"):
response = self.client.get("/raises500/")
# We look for a HTML fragment of the form
# '<div class="context" id="c38123208">',
# not '<div class="context" id="c38,123,208"'.
self.assertContains(response, '<div class="context" id="', status_code=500)
match = re.search(
b'<div class="context" id="(?P<id>[^"]+)">', response.content
)
self.assertIsNotNone(match)
id_repr = match["id"]
self.assertFalse(
re.search(b"[^c0-9]", id_repr),
"Numeric IDs in debug response HTML page shouldn't be localized "
"(value: %s)." % id_repr.decode(),
)
def test_template_exceptions(self):
with self.assertLogs("django.request", "ERROR"):
try:
self.client.get(reverse("template_exception"))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertNotEqual(
raising_loc.find('raise Exception("boom")'),
-1,
"Failed to find 'raise Exception' in last frame of "
"traceback, instead found: %s" % raising_loc,
)
@skipIf(
sys.platform == "win32",
"Raises OSError instead of TemplateDoesNotExist on Windows.",
)
def test_safestring_in_exception(self):
with self.assertLogs("django.request", "ERROR"):
response = self.client.get("/safestring_exception/")
self.assertNotContains(
response,
"<script>alert(1);</script>",
status_code=500,
html=True,
)
self.assertContains(
response,
"<script>alert(1);</script>",
count=3,
status_code=500,
html=True,
)
def test_template_loader_postmortem(self):
"""Tests for not existing file"""
template_name = "notfound.html"
with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile:
tempdir = os.path.dirname(tmpfile.name)
template_path = os.path.join(tempdir, template_name)
with override_settings(
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [tempdir],
}
]
), self.assertLogs("django.request", "ERROR"):
response = self.client.get(
reverse(
"raises_template_does_not_exist", kwargs={"path": template_name}
)
)
self.assertContains(
response,
"%s (Source does not exist)" % template_path,
status_code=500,
count=2,
)
# Assert as HTML.
self.assertContains(
response,
"<li><code>django.template.loaders.filesystem.Loader</code>: "
"%s (Source does not exist)</li>"
% os.path.join(tempdir, "notfound.html"),
status_code=500,
html=True,
)
def test_no_template_source_loaders(self):
"""
Make sure if you don't specify a template, the debug view doesn't blow up.
"""
with self.assertLogs("django.request", "ERROR"):
with self.assertRaises(TemplateDoesNotExist):
self.client.get("/render_no_template/")
@override_settings(ROOT_URLCONF="view_tests.default_urls")
def test_default_urlconf_template(self):
"""
Make sure that the default URLconf template is shown instead of the
technical 404 page, if the user has not altered their URLconf yet.
"""
response = self.client.get("/")
self.assertContains(
response, "<h1>The install worked successfully! Congratulations!</h1>"
)
@override_settings(ROOT_URLCONF="view_tests.regression_21530_urls")
def test_regression_21530(self):
"""
Regression test for bug #21530.
If the admin app include is replaced with exactly one url
pattern, then the technical 404 template should be displayed.
The bug here was that an AttributeError caused a 500 response.
"""
response = self.client.get("/")
self.assertContains(
response, "Page not found <span>(404)</span>", status_code=404
)
def test_template_encoding(self):
"""
The templates are loaded directly, not via a template loader, and
should be opened as utf-8 charset as is the default specified on
template engines.
"""
with mock.patch.object(DebugPath, "open") as m:
default_urlconf(None)
m.assert_called_once_with(encoding="utf-8")
m.reset_mock()
technical_404_response(mock.MagicMock(), mock.Mock())
m.assert_called_once_with(encoding="utf-8")
def test_technical_404_converter_raise_404(self):
with mock.patch.object(IntConverter, "to_python", side_effect=Http404):
response = self.client.get("/path-post/1/")
self.assertContains(response, "Page not found", status_code=404)
def test_exception_reporter_from_request(self):
with self.assertLogs("django.request", "ERROR"):
response = self.client.get("/custom_reporter_class_view/")
self.assertContains(response, "custom traceback text", status_code=500)
@override_settings(
DEFAULT_EXCEPTION_REPORTER="view_tests.views.CustomExceptionReporter"
)
def test_exception_reporter_from_settings(self):
with self.assertLogs("django.request", "ERROR"):
response = self.client.get("/raises500/")
self.assertContains(response, "custom traceback text", status_code=500)
@override_settings(
DEFAULT_EXCEPTION_REPORTER="view_tests.views.TemplateOverrideExceptionReporter"
)
def test_template_override_exception_reporter(self):
with self.assertLogs("django.request", "ERROR"):
response = self.client.get("/raises500/")
self.assertContains(
response,
"<h1>Oh no, an error occurred!</h1>",
status_code=500,
html=True,
)
with self.assertLogs("django.request", "ERROR"):
response = self.client.get("/raises500/", HTTP_ACCEPT="text/plain")
self.assertContains(response, "Oh dear, an error occurred!", status_code=500)
class DebugViewQueriesAllowedTests(SimpleTestCase):
# May need a query to initialize MySQL connection
databases = {"default"}
def test_handle_db_exception(self):
"""
Ensure the debug view works when a database exception is raised by
performing an invalid query and passing the exception to the debug view.
"""
with connection.cursor() as cursor:
try:
cursor.execute("INVALID SQL")
except DatabaseError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get("/"), *exc_info)
self.assertContains(response, "OperationalError at /", status_code=500)
@override_settings(
DEBUG=True,
ROOT_URLCONF="view_tests.urls",
# No template directories are configured, so no templates will be found.
TEMPLATES=[
{
"BACKEND": "django.template.backends.dummy.TemplateStrings",
}
],
)
class NonDjangoTemplatesDebugViewTests(SimpleTestCase):
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs("django.security", "WARNING"):
response = self.client.get("/raises400/")
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_400_bad_request(self):
# When DEBUG=True, technical_500_template() is called.
with self.assertLogs("django.request", "WARNING") as cm:
response = self.client.get("/raises400_bad_request/")
self.assertContains(response, '<div class="context" id="', status_code=400)
self.assertEqual(
cm.records[0].getMessage(),
"Malformed request syntax: /raises400_bad_request/",
)
def test_403(self):
response = self.client.get("/raises403/")
self.assertContains(response, "<h1>403 Forbidden</h1>", status_code=403)
def test_404(self):
response = self.client.get("/raises404/")
self.assertEqual(response.status_code, 404)
def test_template_not_found_error(self):
# Raises a TemplateDoesNotExist exception and shows the debug view.
url = reverse(
"raises_template_does_not_exist", kwargs={"path": "notfound.html"}
)
with self.assertLogs("django.request", "ERROR"):
response = self.client.get(url)
self.assertContains(response, '<div class="context" id="', status_code=500)
class ExceptionReporterTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get("/test_view/")
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML("<h1>ValueError at /test_view/</h1>", html)
self.assertIn(
'<pre class="exception_value">Can't find my keys</pre>', html
)
self.assertIn("<th>Request Method:</th>", html)
self.assertIn("<th>Request URL:</th>", html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn("<p>jacob</p>", html)
self.assertIn("<th>Exception Type:</th>", html)
self.assertIn("<th>Exception Value:</th>", html)
self.assertIn("<h2>Traceback ", html)
self.assertIn("<h2>Request information</h2>", html)
self.assertNotIn("<p>Request data not supplied</p>", html)
self.assertIn("<p>No POST data</p>", html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML("<h1>ValueError</h1>", html)
self.assertIn(
'<pre class="exception_value">Can't find my keys</pre>', html
)
self.assertNotIn("<th>Request Method:</th>", html)
self.assertNotIn("<th>Request URL:</th>", html)
self.assertNotIn('<h3 id="user-info">USER</h3>', html)
self.assertIn("<th>Exception Type:</th>", html)
self.assertIn("<th>Exception Value:</th>", html)
self.assertIn("<h2>Traceback ", html)
self.assertIn("<h2>Request information</h2>", html)
self.assertIn("<p>Request data not supplied</p>", html)
def test_sharing_traceback(self):
try:
raise ValueError("Oops")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn(
'<form action="https://dpaste.com/" name="pasteform" '
'id="pasteform" method="post">',
html,
)
def test_eol_support(self):
"""The ExceptionReporter supports Unix, Windows and Macintosh EOL markers"""
LINES = ["print %d" % i for i in range(1, 6)]
reporter = ExceptionReporter(None, None, None, None)
for newline in ["\n", "\r\n", "\r"]:
fd, filename = tempfile.mkstemp(text=False)
os.write(fd, (newline.join(LINES) + newline).encode())
os.close(fd)
try:
self.assertEqual(
reporter._get_lines_from_file(filename, 3, 2),
(1, LINES[1:3], LINES[3], LINES[4:]),
)
finally:
os.unlink(filename)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get("/test_view/")
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML("<h1>Report at /test_view/</h1>", html)
self.assertIn(
'<pre class="exception_value">No exception message supplied</pre>', html
)
self.assertIn("<th>Request Method:</th>", html)
self.assertIn("<th>Request URL:</th>", html)
self.assertNotIn("<th>Exception Type:</th>", html)
self.assertNotIn("<th>Exception Value:</th>", html)
self.assertNotIn("<h2>Traceback ", html)
self.assertIn("<h2>Request information</h2>", html)
self.assertNotIn("<p>Request data not supplied</p>", html)
def test_suppressed_context(self):
try:
try:
raise RuntimeError("Can't find my keys")
except RuntimeError:
raise ValueError("Can't find my keys") from None
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML("<h1>ValueError</h1>", html)
self.assertIn(
'<pre class="exception_value">Can't find my keys</pre>', html
)
self.assertIn("<th>Exception Type:</th>", html)
self.assertIn("<th>Exception Value:</th>", html)
self.assertIn("<h2>Traceback ", html)
self.assertIn("<h2>Request information</h2>", html)
self.assertIn("<p>Request data not supplied</p>", html)
self.assertNotIn("During handling of the above exception", html)
def test_innermost_exception_without_traceback(self):
try:
try:
raise RuntimeError("Oops")
except Exception as exc:
new_exc = RuntimeError("My context")
exc.__context__ = new_exc
raise
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
frames = reporter.get_traceback_frames()
self.assertEqual(len(frames), 2)
html = reporter.get_traceback_html()
self.assertInHTML("<h1>RuntimeError</h1>", html)
self.assertIn('<pre class="exception_value">Oops</pre>', html)
self.assertIn("<th>Exception Type:</th>", html)
self.assertIn("<th>Exception Value:</th>", html)
self.assertIn("<h2>Traceback ", html)
self.assertIn("<h2>Request information</h2>", html)
self.assertIn("<p>Request data not supplied</p>", html)
self.assertIn(
"During handling of the above exception (My context), another "
"exception occurred",
html,
)
self.assertInHTML('<li class="frame user">None</li>', html)
self.assertIn("Traceback (most recent call last):\n None", html)
text = reporter.get_traceback_text()
self.assertIn("Exception Type: RuntimeError", text)
self.assertIn("Exception Value: Oops", text)
self.assertIn("Traceback (most recent call last):\n None", text)
self.assertIn(
"During handling of the above exception (My context), another "
"exception occurred",
text,
)
@skipUnless(PY311, "Exception notes were added in Python 3.11.")
def test_exception_with_notes(self):
request = self.rf.get("/test_view/")
try:
try:
raise RuntimeError("Oops")
except Exception as err:
err.add_note("First Note")
err.add_note("Second Note")
err.add_note(mark_safe("<script>alert(1);</script>"))
raise err
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn(
'<pre class="exception_value">Oops\nFirst Note\nSecond Note\n'
"<script>alert(1);</script></pre>",
html,
)
self.assertIn(
"Exception Value: Oops\nFirst Note\nSecond Note\n"
"<script>alert(1);</script>",
html,
)
text = reporter.get_traceback_text()
self.assertIn(
"Exception Value: Oops\nFirst Note\nSecond Note\n"
"<script>alert(1);</script>",
text,
)
def test_mid_stack_exception_without_traceback(self):
try:
try:
raise RuntimeError("Inner Oops")
except Exception as exc:
new_exc = RuntimeError("My context")
new_exc.__context__ = exc
raise RuntimeError("Oops") from new_exc
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML("<h1>RuntimeError</h1>", html)
self.assertIn('<pre class="exception_value">Oops</pre>', html)
self.assertIn("<th>Exception Type:</th>", html)
self.assertIn("<th>Exception Value:</th>", html)
self.assertIn("<h2>Traceback ", html)
self.assertInHTML('<li class="frame user">Traceback: None</li>', html)
self.assertIn(
"During handling of the above exception (Inner Oops), another "
"exception occurred:\n Traceback: None",
html,
)
text = reporter.get_traceback_text()
self.assertIn("Exception Type: RuntimeError", text)
self.assertIn("Exception Value: Oops", text)
self.assertIn("Traceback (most recent call last):", text)
self.assertIn(
"During handling of the above exception (Inner Oops), another "
"exception occurred:\n Traceback: None",
text,
)
def test_reporting_of_nested_exceptions(self):
request = self.rf.get("/test_view/")
try:
try:
raise AttributeError(mark_safe("<p>Top level</p>"))
except AttributeError as explicit:
try:
raise ValueError(mark_safe("<p>Second exception</p>")) from explicit
except ValueError:
raise IndexError(mark_safe("<p>Final exception</p>"))
except Exception:
# Custom exception handler, just pass it into ExceptionReporter
exc_type, exc_value, tb = sys.exc_info()
explicit_exc = (
"The above exception ({0}) was the direct cause of the following exception:"
)
implicit_exc = (
"During handling of the above exception ({0}), another exception occurred:"
)
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
# Both messages are twice on page -- one rendered as html,
# one as plain text (for pastebin)
self.assertEqual(
2, html.count(explicit_exc.format("<p>Top level</p>"))
)
self.assertEqual(
2, html.count(implicit_exc.format("<p>Second exception</p>"))
)
self.assertEqual(10, html.count("<p>Final exception</p>"))
text = reporter.get_traceback_text()
self.assertIn(explicit_exc.format("<p>Top level</p>"), text)
self.assertIn(implicit_exc.format("<p>Second exception</p>"), text)
self.assertEqual(3, text.count("<p>Final exception</p>"))
def test_reporting_frames_without_source(self):
try:
source = "def funcName():\n raise Error('Whoops')\nfuncName()"
namespace = {}
code = compile(source, "generated", "exec")
exec(code, namespace)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
request = self.rf.get("/test_view/")
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
frames = reporter.get_traceback_frames()
last_frame = frames[-1]
self.assertEqual(last_frame["context_line"], "<source code not available>")
self.assertEqual(last_frame["filename"], "generated")
self.assertEqual(last_frame["function"], "funcName")
self.assertEqual(last_frame["lineno"], 2)
html = reporter.get_traceback_html()
self.assertIn(
'<span class="fname">generated</span>, line 2, in funcName',
html,
)
self.assertIn(
'<code class="fname">generated</code>, line 2, in funcName',
html,
)
self.assertIn(
'"generated", line 2, in funcName\n <source code not available>',
html,
)
text = reporter.get_traceback_text()
self.assertIn(
'"generated", line 2, in funcName\n <source code not available>',
text,
)
def test_reporting_frames_source_not_match(self):
try:
source = "def funcName():\n raise Error('Whoops')\nfuncName()"
namespace = {}
code = compile(source, "generated", "exec")
exec(code, namespace)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
with mock.patch(
"django.views.debug.ExceptionReporter._get_source",
return_value=["wrong source"],
):
request = self.rf.get("/test_view/")
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
frames = reporter.get_traceback_frames()
last_frame = frames[-1]
self.assertEqual(last_frame["context_line"], "<source code not available>")
self.assertEqual(last_frame["filename"], "generated")
self.assertEqual(last_frame["function"], "funcName")
self.assertEqual(last_frame["lineno"], 2)
html = reporter.get_traceback_html()
self.assertIn(
'<span class="fname">generated</span>, line 2, in funcName',
html,
)
self.assertIn(
'<code class="fname">generated</code>, line 2, in funcName',
html,
)
self.assertIn(
'"generated", line 2, in funcName\n'
" <source code not available>",
html,
)
text = reporter.get_traceback_text()
self.assertIn(
'"generated", line 2, in funcName\n <source code not available>',
text,
)
def test_reporting_frames_for_cyclic_reference(self):
try:
def test_func():
try:
raise RuntimeError("outer") from RuntimeError("inner")
except RuntimeError as exc:
raise exc.__cause__
test_func()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
request = self.rf.get("/test_view/")
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
def generate_traceback_frames(*args, **kwargs):
nonlocal tb_frames
tb_frames = reporter.get_traceback_frames()
tb_frames = None
tb_generator = threading.Thread(target=generate_traceback_frames, daemon=True)
msg = (
"Cycle in the exception chain detected: exception 'inner' "
"encountered again."
)
with self.assertWarnsMessage(ExceptionCycleWarning, msg):
tb_generator.start()
tb_generator.join(timeout=5)
if tb_generator.is_alive():
# tb_generator is a daemon that runs until the main thread/process
# exits. This is resource heavy when running the full test suite.
# Setting the following values to None makes
# reporter.get_traceback_frames() exit early.
exc_value.__traceback__ = exc_value.__context__ = exc_value.__cause__ = None
tb_generator.join()
self.fail("Cyclic reference in Exception Reporter.get_traceback_frames()")
if tb_frames is None:
# can happen if the thread generating traceback got killed
# or exception while generating the traceback
self.fail("Traceback generation failed")
last_frame = tb_frames[-1]
self.assertIn("raise exc.__cause__", last_frame["context_line"])
self.assertEqual(last_frame["filename"], __file__)
self.assertEqual(last_frame["function"], "test_func")
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get("/test_view/")
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML("<h1>Report at /test_view/</h1>", html)
self.assertIn(
'<pre class="exception_value">I'm a little teapot</pre>', html
)
self.assertIn("<th>Request Method:</th>", html)
self.assertIn("<th>Request URL:</th>", html)
self.assertNotIn("<th>Exception Type:</th>", html)
self.assertNotIn("<th>Exception Value:</th>", html)
self.assertIn("<h2>Traceback ", html)
self.assertIn("<h2>Request information</h2>", html)
self.assertNotIn("<p>Request data not supplied</p>", html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML("<h1>Report</h1>", html)
self.assertIn(
'<pre class="exception_value">I'm a little teapot</pre>', html
)
self.assertNotIn("<th>Request Method:</th>", html)
self.assertNotIn("<th>Request URL:</th>", html)
self.assertNotIn("<th>Exception Type:</th>", html)
self.assertNotIn("<th>Exception Value:</th>", html)
self.assertIn("<h2>Traceback ", html)
self.assertIn("<h2>Request information</h2>", html)
self.assertIn("<p>Request data not supplied</p>", html)
def test_non_utf8_values_handling(self):
"Non-UTF-8 exceptions/values should not make the output generation choke."
try:
class NonUtf8Output(Exception):
def __repr__(self):
return b"EXC\xe9EXC"
somevar = b"VAL\xe9VAL" # NOQA
raise NonUtf8Output()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn("VAL\\xe9VAL", html)
self.assertIn("EXC\\xe9EXC", html)
def test_local_variable_escaping(self):
"""Safe strings in local variables are escaped."""
try:
local = mark_safe("<p>Local variable</p>")
raise ValueError(local)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
html = ExceptionReporter(None, exc_type, exc_value, tb).get_traceback_html()
self.assertIn(
'<td class="code"><pre>'<p>Local variable</p>'</pre>'
"</td>",
html,
)
def test_unprintable_values_handling(self):
"Unprintable values should not make the output generation choke."
try:
class OomOutput:
def __repr__(self):
raise MemoryError("OOM")
oomvalue = OomOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<td class="code"><pre>Error in formatting', html)
def test_too_large_values_handling(self):
"Large values should not create a large HTML."
large = 256 * 1024
repr_of_str_adds = len(repr(""))
try:
class LargeOutput:
def __repr__(self):
return repr("A" * large)
largevalue = LargeOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb
self.assertIn(
"<trimmed %d bytes string>" % (large + repr_of_str_adds,), html
)
def test_encoding_error(self):
"""
A UnicodeError displays a portion of the problematic string. HTML in
safe strings is escaped.
"""
try:
mark_safe("abcdefghijkl<p>mnὀp</p>qrstuwxyz").encode("ascii")
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn("<h2>Unicode error hint</h2>", html)
self.assertIn("The string that could not be encoded/decoded was: ", html)
self.assertIn("<strong><p>mnὀp</p></strong>", html)
def test_unfrozen_importlib(self):
"""
importlib is not a frozen app, but its loader thinks it's frozen which
results in an ImportError. Refs #21443.
"""
try:
request = self.rf.get("/test_view/")
importlib.import_module("abc.def.invalid.name")
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML("<h1>ModuleNotFoundError at /test_view/</h1>", html)
def test_ignore_traceback_evaluation_exceptions(self):
"""
Don't trip over exceptions generated by crafted objects when
evaluating them while cleansing (#24455).
"""
class BrokenEvaluation(Exception):
pass
def broken_setup():
raise BrokenEvaluation
request = self.rf.get("/test_view/")
broken_lazy = SimpleLazyObject(broken_setup)
try:
bool(broken_lazy)
except BrokenEvaluation:
exc_type, exc_value, tb = sys.exc_info()
self.assertIn(
"BrokenEvaluation",
ExceptionReporter(request, exc_type, exc_value, tb).get_traceback_html(),
"Evaluation exception reason not mentioned in traceback",
)
@override_settings(ALLOWED_HOSTS="example.com")
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get("/", HTTP_HOST="evil.com")
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn("http://evil.com/", html)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
value = '<td>items</td><td class="code"><pre>'Oops'</pre></td>'
# GET
request = self.rf.get("/test_view/?items=Oops")
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# POST
request = self.rf.post("/test_view/", data={"items": "Oops"})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# FILES
fp = StringIO("filecontent")
request = self.rf.post("/test_view/", data={"name": "filename", "items": fp})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(
'<td>items</td><td class="code"><pre><InMemoryUploadedFile: '
"items (application/octet-stream)></pre></td>",
html,
)
# COOKIES
rf = RequestFactory()
rf.cookies["items"] = "Oops"
request = rf.get("/test_view/")
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(
'<td>items</td><td class="code"><pre>'Oops'</pre></td>', html
)
def test_exception_fetching_user(self):
"""
The error page can be rendered if the current user can't be retrieved
(such as when the database is unavailable).
"""
class ExceptionUser:
def __str__(self):
raise Exception()
request = self.rf.get("/test_view/")
request.user = ExceptionUser()
try:
raise ValueError("Oops")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML("<h1>ValueError at /test_view/</h1>", html)
self.assertIn('<pre class="exception_value">Oops</pre>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn("<p>[unable to retrieve the current user]</p>", html)
text = reporter.get_traceback_text()
self.assertIn("USER: [unable to retrieve the current user]", text)
def test_template_encoding(self):
"""
The templates are loaded directly, not via a template loader, and
should be opened as utf-8 charset as is the default specified on
template engines.
"""
reporter = ExceptionReporter(None, None, None, None)
with mock.patch.object(DebugPath, "open") as m:
reporter.get_traceback_html()
m.assert_called_once_with(encoding="utf-8")
m.reset_mock()
reporter.get_traceback_text()
m.assert_called_once_with(encoding="utf-8")
@override_settings(ALLOWED_HOSTS=["example.com"])
def test_get_raw_insecure_uri(self):
factory = RequestFactory(HTTP_HOST="evil.com")
tests = [
("////absolute-uri", "http://evil.com//absolute-uri"),
("/?foo=bar", "http://evil.com/?foo=bar"),
("/path/with:colons", "http://evil.com/path/with:colons"),
]
for url, expected in tests:
with self.subTest(url=url):
request = factory.get(url)
reporter = ExceptionReporter(request, None, None, None)
self.assertEqual(reporter._get_raw_insecure_uri(), expected)
class PlainTextReportTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get("/test_view/")
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn("ValueError at /test_view/", text)
self.assertIn("Can't find my keys", text)
self.assertIn("Request Method:", text)
self.assertIn("Request URL:", text)
self.assertIn("USER: jacob", text)
self.assertIn("Exception Type:", text)
self.assertIn("Exception Value:", text)
self.assertIn("Traceback (most recent call last):", text)
self.assertIn("Request information:", text)
self.assertNotIn("Request data not supplied", text)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn("ValueError", text)
self.assertIn("Can't find my keys", text)
self.assertNotIn("Request Method:", text)
self.assertNotIn("Request URL:", text)
self.assertNotIn("USER:", text)
self.assertIn("Exception Type:", text)
self.assertIn("Exception Value:", text)
self.assertIn("Traceback (most recent call last):", text)
self.assertIn("Request data not supplied", text)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get("/test_view/")
reporter = ExceptionReporter(request, None, None, None)
reporter.get_traceback_text()
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get("/test_view/")
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(DEBUG=True)
def test_template_exception(self):
request = self.rf.get("/test_view/")
try:
render(request, "debug/template_error.html")
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
templ_path = Path(
Path(__file__).parents[1], "templates", "debug", "template_error.html"
)
self.assertIn(
"Template error:\n"
"In template %(path)s, error at line 2\n"
" 'cycle' tag requires at least two arguments\n"
" 1 : Template with error:\n"
" 2 : {%% cycle %%} \n"
" 3 : " % {"path": templ_path},
text,
)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
# GET
request = self.rf.get("/test_view/?items=Oops")
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# POST
request = self.rf.post("/test_view/", data={"items": "Oops"})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# FILES
fp = StringIO("filecontent")
request = self.rf.post("/test_view/", data={"name": "filename", "items": fp})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = <InMemoryUploadedFile:", text)
# COOKIES
rf = RequestFactory()
rf.cookies["items"] = "Oops"
request = rf.get("/test_view/")
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(ALLOWED_HOSTS="example.com")
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get("/", HTTP_HOST="evil.com")
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("http://evil.com/", text)
class ExceptionReportTestMixin:
# Mixin used in the ExceptionReporterFilterTests and
# AjaxResponseExceptionReporterFilter tests below
breakfast_data = {
"sausage-key": "sausage-value",
"baked-beans-key": "baked-beans-value",
"hash-brown-key": "hash-brown-value",
"bacon-key": "bacon-value",
}
def verify_unsafe_response(
self, view, check_for_vars=True, check_for_POST_params=True
):
"""
Asserts that potentially sensitive info are displayed in the response.
"""
request = self.rf.post("/some_url/", self.breakfast_data)
response = view(request)
if check_for_vars:
# All variables are shown.
self.assertContains(response, "cooked_eggs", status_code=500)
self.assertContains(response, "scrambled", status_code=500)
self.assertContains(response, "sauce", status_code=500)
self.assertContains(response, "worcestershire", status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertContains(response, k, status_code=500)
self.assertContains(response, v, status_code=500)
def verify_safe_response(
self, view, check_for_vars=True, check_for_POST_params=True
):
"""
Asserts that certain sensitive info are not displayed in the response.
"""
request = self.rf.post("/some_url/", self.breakfast_data)
response = view(request)
if check_for_vars:
# Non-sensitive variable's name and value are shown.
self.assertContains(response, "cooked_eggs", status_code=500)
self.assertContains(response, "scrambled", status_code=500)
# Sensitive variable's name is shown but not its value.
self.assertContains(response, "sauce", status_code=500)
self.assertNotContains(response, "worcestershire", status_code=500)
if check_for_POST_params:
for k in self.breakfast_data:
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# Non-sensitive POST parameters' values are shown.
self.assertContains(response, "baked-beans-value", status_code=500)
self.assertContains(response, "hash-brown-value", status_code=500)
# Sensitive POST parameters' values are not shown.
self.assertNotContains(response, "sausage-value", status_code=500)
self.assertNotContains(response, "bacon-value", status_code=500)
def verify_paranoid_response(
self, view, check_for_vars=True, check_for_POST_params=True
):
"""
Asserts that no variables or POST parameters are displayed in the response.
"""
request = self.rf.post("/some_url/", self.breakfast_data)
response = view(request)
if check_for_vars:
# Show variable names but not their values.
self.assertContains(response, "cooked_eggs", status_code=500)
self.assertNotContains(response, "scrambled", status_code=500)
self.assertContains(response, "sauce", status_code=500)
self.assertNotContains(response, "worcestershire", status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# No POST parameters' values are shown.
self.assertNotContains(response, v, status_code=500)
def verify_unsafe_email(self, view, check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the email report.
"""
with self.settings(ADMINS=[("Admin", "[email protected]")]):
mail.outbox = [] # Empty outbox
request = self.rf.post("/some_url/", self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn("cooked_eggs", body_plain)
self.assertNotIn("scrambled", body_plain)
self.assertNotIn("sauce", body_plain)
self.assertNotIn("worcestershire", body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn("cooked_eggs", body_html)
self.assertIn("scrambled", body_html)
self.assertIn("sauce", body_html)
self.assertIn("worcestershire", body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertIn(k, body_plain)
self.assertIn(v, body_plain)
self.assertIn(k, body_html)
self.assertIn(v, body_html)
def verify_safe_email(self, view, check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the email report.
"""
with self.settings(ADMINS=[("Admin", "[email protected]")]):
mail.outbox = [] # Empty outbox
request = self.rf.post("/some_url/", self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn("cooked_eggs", body_plain)
self.assertNotIn("scrambled", body_plain)
self.assertNotIn("sauce", body_plain)
self.assertNotIn("worcestershire", body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn("cooked_eggs", body_html)
self.assertIn("scrambled", body_html)
self.assertIn("sauce", body_html)
self.assertNotIn("worcestershire", body_html)
if check_for_POST_params:
for k in self.breakfast_data:
# All POST parameters' names are shown.
self.assertIn(k, body_plain)
# Non-sensitive POST parameters' values are shown.
self.assertIn("baked-beans-value", body_plain)
self.assertIn("hash-brown-value", body_plain)
self.assertIn("baked-beans-value", body_html)
self.assertIn("hash-brown-value", body_html)
# Sensitive POST parameters' values are not shown.
self.assertNotIn("sausage-value", body_plain)
self.assertNotIn("bacon-value", body_plain)
self.assertNotIn("sausage-value", body_html)
self.assertNotIn("bacon-value", body_html)
def verify_paranoid_email(self, view):
"""
Asserts that no variables or POST parameters are displayed in the email report.
"""
with self.settings(ADMINS=[("Admin", "[email protected]")]):
mail.outbox = [] # Empty outbox
request = self.rf.post("/some_url/", self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = str(email.body)
self.assertNotIn("cooked_eggs", body)
self.assertNotIn("scrambled", body)
self.assertNotIn("sauce", body)
self.assertNotIn("worcestershire", body)
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# No POST parameters' values are shown.
self.assertNotIn(v, body)
@override_settings(ROOT_URLCONF="view_tests.urls")
class ExceptionReporterFilterTests(
ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase
):
"""
Sensitive information can be filtered out of error reports (#14614).
"""
rf = RequestFactory()
def test_non_sensitive_request(self):
"""
Everything (request info and frame variables) can bee seen
in the default error reports for non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
def test_sensitive_request(self):
"""
Sensitive POST parameters and frame variables cannot be
seen in the default error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view)
self.verify_unsafe_email(sensitive_view)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view)
self.verify_safe_email(sensitive_view)
def test_paranoid_request(self):
"""
No POST parameters and frame variables can be seen in the
default error reports for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view)
self.verify_unsafe_email(paranoid_view)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view)
self.verify_paranoid_email(paranoid_view)
def test_multivalue_dict_key_error(self):
"""
#21098 -- Sensitive POST parameters cannot be seen in the
error reports for if request.POST['nonexistent_key'] throws an error.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(multivalue_dict_key_error)
self.verify_unsafe_email(multivalue_dict_key_error)
with self.settings(DEBUG=False):
self.verify_safe_response(multivalue_dict_key_error)
self.verify_safe_email(multivalue_dict_key_error)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
def test_sensitive_method(self):
"""
The sensitive_variables decorator works with object methods.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(
sensitive_method_view, check_for_POST_params=False
)
self.verify_unsafe_email(sensitive_method_view, check_for_POST_params=False)
with self.settings(DEBUG=False):
self.verify_safe_response(
sensitive_method_view, check_for_POST_params=False
)
self.verify_safe_email(sensitive_method_view, check_for_POST_params=False)
def test_sensitive_function_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as arguments to the decorated
function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_args_function_caller)
self.verify_unsafe_email(sensitive_args_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(
sensitive_args_function_caller, check_for_POST_params=False
)
self.verify_safe_email(
sensitive_args_function_caller, check_for_POST_params=False
)
def test_sensitive_function_keyword_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as keyword arguments to the
decorated function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_kwargs_function_caller)
self.verify_unsafe_email(sensitive_kwargs_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(
sensitive_kwargs_function_caller, check_for_POST_params=False
)
self.verify_safe_email(
sensitive_kwargs_function_caller, check_for_POST_params=False
)
def test_callable_settings(self):
"""
Callable settings should not be evaluated in the debug page (#21345).
"""
def callable_setting():
return "This should not be displayed"
with self.settings(DEBUG=True, FOOBAR=callable_setting):
response = self.client.get("/raises500/")
self.assertNotContains(
response, "This should not be displayed", status_code=500
)
def test_callable_settings_forbidding_to_set_attributes(self):
"""
Callable settings which forbid to set attributes should not break
the debug page (#23070).
"""
class CallableSettingWithSlots:
__slots__ = []
def __call__(self):
return "This should not be displayed"
with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()):
response = self.client.get("/raises500/")
self.assertNotContains(
response, "This should not be displayed", status_code=500
)
def test_dict_setting_with_non_str_key(self):
"""
A dict setting containing a non-string key should not break the
debug page (#12744).
"""
with self.settings(DEBUG=True, FOOBAR={42: None}):
response = self.client.get("/raises500/")
self.assertContains(response, "FOOBAR", status_code=500)
def test_sensitive_settings(self):
"""
The debug page should not show some sensitive settings
(password, secret key, ...).
"""
sensitive_settings = [
"SECRET_KEY",
"SECRET_KEY_FALLBACKS",
"PASSWORD",
"API_KEY",
"AUTH_TOKEN",
]
for setting in sensitive_settings:
with self.settings(DEBUG=True, **{setting: "should not be displayed"}):
response = self.client.get("/raises500/")
self.assertNotContains(
response, "should not be displayed", status_code=500
)
def test_settings_with_sensitive_keys(self):
"""
The debug page should filter out some sensitive information found in
dict settings.
"""
sensitive_settings = [
"SECRET_KEY",
"SECRET_KEY_FALLBACKS",
"PASSWORD",
"API_KEY",
"AUTH_TOKEN",
]
for setting in sensitive_settings:
FOOBAR = {
setting: "should not be displayed",
"recursive": {setting: "should not be displayed"},
}
with self.settings(DEBUG=True, FOOBAR=FOOBAR):
response = self.client.get("/raises500/")
self.assertNotContains(
response, "should not be displayed", status_code=500
)
def test_cleanse_setting_basic(self):
reporter_filter = SafeExceptionReporterFilter()
self.assertEqual(reporter_filter.cleanse_setting("TEST", "TEST"), "TEST")
self.assertEqual(
reporter_filter.cleanse_setting("PASSWORD", "super_secret"),
reporter_filter.cleansed_substitute,
)
def test_cleanse_setting_ignore_case(self):
reporter_filter = SafeExceptionReporterFilter()
self.assertEqual(
reporter_filter.cleanse_setting("password", "super_secret"),
reporter_filter.cleansed_substitute,
)
def test_cleanse_setting_recurses_in_dictionary(self):
reporter_filter = SafeExceptionReporterFilter()
initial = {"login": "cooper", "password": "secret"}
self.assertEqual(
reporter_filter.cleanse_setting("SETTING_NAME", initial),
{"login": "cooper", "password": reporter_filter.cleansed_substitute},
)
def test_cleanse_setting_recurses_in_dictionary_with_non_string_key(self):
reporter_filter = SafeExceptionReporterFilter()
initial = {("localhost", 8000): {"login": "cooper", "password": "secret"}}
self.assertEqual(
reporter_filter.cleanse_setting("SETTING_NAME", initial),
{
("localhost", 8000): {
"login": "cooper",
"password": reporter_filter.cleansed_substitute,
},
},
)
def test_cleanse_setting_recurses_in_list_tuples(self):
reporter_filter = SafeExceptionReporterFilter()
initial = [
{
"login": "cooper",
"password": "secret",
"apps": (
{"name": "app1", "api_key": "a06b-c462cffae87a"},
{"name": "app2", "api_key": "a9f4-f152e97ad808"},
),
"tokens": ["98b37c57-ec62-4e39", "8690ef7d-8004-4916"],
},
{"SECRET_KEY": "c4d77c62-6196-4f17-a06b-c462cffae87a"},
]
cleansed = [
{
"login": "cooper",
"password": reporter_filter.cleansed_substitute,
"apps": (
{"name": "app1", "api_key": reporter_filter.cleansed_substitute},
{"name": "app2", "api_key": reporter_filter.cleansed_substitute},
),
"tokens": reporter_filter.cleansed_substitute,
},
{"SECRET_KEY": reporter_filter.cleansed_substitute},
]
self.assertEqual(
reporter_filter.cleanse_setting("SETTING_NAME", initial),
cleansed,
)
self.assertEqual(
reporter_filter.cleanse_setting("SETTING_NAME", tuple(initial)),
tuple(cleansed),
)
def test_request_meta_filtering(self):
request = self.rf.get("/", HTTP_SECRET_HEADER="super_secret")
reporter_filter = SafeExceptionReporterFilter()
self.assertEqual(
reporter_filter.get_safe_request_meta(request)["HTTP_SECRET_HEADER"],
reporter_filter.cleansed_substitute,
)
def test_exception_report_uses_meta_filtering(self):
response = self.client.get("/raises500/", HTTP_SECRET_HEADER="super_secret")
self.assertNotIn(b"super_secret", response.content)
response = self.client.get(
"/raises500/",
HTTP_SECRET_HEADER="super_secret",
HTTP_ACCEPT="application/json",
)
self.assertNotIn(b"super_secret", response.content)
@override_settings(SESSION_COOKIE_NAME="djangosession")
def test_cleanse_session_cookie_value(self):
self.client.cookies.load({"djangosession": "should not be displayed"})
response = self.client.get("/raises500/")
self.assertNotContains(response, "should not be displayed", status_code=500)
class CustomExceptionReporterFilter(SafeExceptionReporterFilter):
cleansed_substitute = "XXXXXXXXXXXXXXXXXXXX"
hidden_settings = _lazy_re_compile(
"API|TOKEN|KEY|SECRET|PASS|SIGNATURE|DATABASE_URL", flags=re.I
)
@override_settings(
ROOT_URLCONF="view_tests.urls",
DEFAULT_EXCEPTION_REPORTER_FILTER="%s.CustomExceptionReporterFilter" % __name__,
)
class CustomExceptionReporterFilterTests(SimpleTestCase):
def setUp(self):
get_default_exception_reporter_filter.cache_clear()
def tearDown(self):
get_default_exception_reporter_filter.cache_clear()
def test_setting_allows_custom_subclass(self):
self.assertIsInstance(
get_default_exception_reporter_filter(),
CustomExceptionReporterFilter,
)
def test_cleansed_substitute_override(self):
reporter_filter = get_default_exception_reporter_filter()
self.assertEqual(
reporter_filter.cleanse_setting("password", "super_secret"),
reporter_filter.cleansed_substitute,
)
def test_hidden_settings_override(self):
reporter_filter = get_default_exception_reporter_filter()
self.assertEqual(
reporter_filter.cleanse_setting("database_url", "super_secret"),
reporter_filter.cleansed_substitute,
)
class NonHTMLResponseExceptionReporterFilter(
ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase
):
"""
Sensitive information can be filtered out of error reports.
The plain text 500 debug-only error page is served when it has been
detected the request doesn't accept HTML content. Don't check for
(non)existence of frames vars in the traceback information section of the
response content because they're not included in these error pages.
Refs #14614.
"""
rf = RequestFactory(HTTP_ACCEPT="application/json")
def test_non_sensitive_request(self):
"""
Request info can bee seen in the default error reports for
non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
def test_sensitive_request(self):
"""
Sensitive POST parameters cannot be seen in the default
error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view, check_for_vars=False)
def test_paranoid_request(self):
"""
No POST parameters can be seen in the default error reports
for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view, check_for_vars=False)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(
custom_exception_reporter_filter_view, check_for_vars=False
)
with self.settings(DEBUG=False):
self.verify_unsafe_response(
custom_exception_reporter_filter_view, check_for_vars=False
)
@override_settings(DEBUG=True, ROOT_URLCONF="view_tests.urls")
def test_non_html_response_encoding(self):
response = self.client.get("/raises500/", HTTP_ACCEPT="application/json")
self.assertEqual(response.headers["Content-Type"], "text/plain; charset=utf-8")
class DecoratorsTests(SimpleTestCase):
def test_sensitive_variables_not_called(self):
msg = (
"sensitive_variables() must be called to use it as a decorator, "
"e.g., use @sensitive_variables(), not @sensitive_variables."
)
with self.assertRaisesMessage(TypeError, msg):
@sensitive_variables
def test_func(password):
pass
def test_sensitive_post_parameters_not_called(self):
msg = (
"sensitive_post_parameters() must be called to use it as a "
"decorator, e.g., use @sensitive_post_parameters(), not "
"@sensitive_post_parameters."
)
with self.assertRaisesMessage(TypeError, msg):
@sensitive_post_parameters
def test_func(request):
return index_page(request)
def test_sensitive_post_parameters_http_request(self):
class MyClass:
@sensitive_post_parameters()
def a_view(self, request):
return HttpResponse()
msg = (
"sensitive_post_parameters didn't receive an HttpRequest object. "
"If you are decorating a classmethod, make sure to use "
"@method_decorator."
)
with self.assertRaisesMessage(TypeError, msg):
MyClass().a_view(HttpRequest())
|
12c0f146b5a3e486be62aa566857862fd0b5b1cd3affa076d33c331bc3207fa2 | # Django documentation build configuration file, created by
# sphinx-quickstart on Thu Mar 27 09:06:53 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't picklable (module imports are okay, they're removed automatically).
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
from os.path import abspath, dirname, join
# Workaround for sphinx-build recursion limit overflow:
# pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
# RuntimeError: maximum recursion depth exceeded while pickling an object
#
# Python's default allowed recursion depth is 1000 but this isn't enough for
# building docs/ref/settings.txt sometimes.
# https://groups.google.com/g/sphinx-dev/c/MtRf64eGtv4/discussion
sys.setrecursionlimit(2000)
# Make sure we get the version of this copy of Django
sys.path.insert(1, dirname(dirname(abspath(__file__))))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(abspath(join(dirname(__file__), "_ext")))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "4.5.0"
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"djangodocs",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinx.ext.autosectionlabel",
]
# AutosectionLabel settings.
# Uses a <page>:<label> schema which doesn't work for duplicate sub-section
# labels, so set max depth.
autosectionlabel_prefix_document = True
autosectionlabel_maxdepth = 2
# Linkcheck settings.
linkcheck_ignore = [
# Special-use addresses and domain names. (RFC 6761/6890)
r"^https?://(?:127\.0\.0\.1|\[::1\])(?::\d+)?/",
r"^https?://(?:[^/\.]+\.)*example\.(?:com|net|org)(?::\d+)?/",
r"^https?://(?:[^/\.]+\.)*(?:example|invalid|localhost|test)(?::\d+)?/",
# Pages that are inaccessible because they require authentication.
r"^https://github\.com/[^/]+/[^/]+/fork",
r"^https://code\.djangoproject\.com/github/login",
r"^https://code\.djangoproject\.com/newticket",
r"^https://(?:code|www)\.djangoproject\.com/admin/",
r"^https://www\.djangoproject\.com/community/add/blogs/",
r"^https://www\.google\.com/webmasters/tools/ping",
r"^https://search\.google\.com/search-console/welcome",
# Fragments used to dynamically switch content or populate fields.
r"^https://web\.libera\.chat/#",
r"^https://github\.com/[^#]+#L\d+-L\d+$",
r"^https://help\.apple\.com/itc/podcasts_connect/#/itc",
# Anchors on certain pages with missing a[name] attributes.
r"^https://tools\.ietf\.org/html/rfc1123\.html#section-",
]
# Spelling check needs an additional module that is not installed by default.
# Add it only if spelling check is requested so docs can be generated without it.
if "spelling" in sys.argv:
extensions.append("sphinxcontrib.spelling")
# Spelling language.
spelling_lang = "en_US"
# Location of word list.
spelling_word_list_filename = "spelling_wordlist"
spelling_warning = True
# Add any paths that contain templates here, relative to this directory.
# templates_path = []
# The suffix of source filenames.
source_suffix = ".txt"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The root toctree document.
root_doc = "contents"
# Disable auto-created table of contents entries for all domain objects (e.g.
# functions, classes, attributes, etc.) in Sphinx 5.2+.
toc_object_entries = False
# General substitutions.
project = "Django"
copyright = "Django Software Foundation and contributors"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "4.2"
# The full version, including alpha/beta/rc tags.
try:
from django import VERSION, get_version
except ImportError:
release = version
else:
def django_release():
pep440ver = get_version()
if VERSION[3:5] == ("alpha", 0) and "dev" not in pep440ver:
return pep440ver + ".dev"
return pep440ver
release = django_release()
# The "development version" of Django
django_next_version = "4.2"
extlinks = {
"bpo": ("https://bugs.python.org/issue?@action=redirect&bpo=%s", "bpo-%s"),
"commit": ("https://github.com/django/django/commit/%s", "%s"),
"cve": ("https://nvd.nist.gov/vuln/detail/CVE-%s", "CVE-%s"),
# A file or directory. GitHub redirects from blob to tree if needed.
"source": ("https://github.com/django/django/blob/main/%s", "%s"),
"ticket": ("https://code.djangoproject.com/ticket/%s", "#%s"),
}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# Location for .po/.mo translation files used when language is set
locale_dirs = ["locale/"]
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = "%B %d, %Y"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "_theme", "requirements.txt"]
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "default-role-error"
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "trac"
# Links to Python's docs should reference the most recent version of the 3.x
# branch, which is located at this URL.
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"sphinx": ("https://www.sphinx-doc.org/en/master/", None),
"psycopg": ("https://www.psycopg.org/psycopg3/docs/", None),
}
# Python's docs don't change every week.
intersphinx_cache_limit = 90 # days
# The 'versionadded' and 'versionchanged' directives are overridden.
suppress_warnings = ["app.add_directive"]
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "djangodocs"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_theme"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = "%b %d, %Y"
# Content template for the index page.
# html_index = ''
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "Djangodoc"
modindex_common_prefix = ["django."]
# Appended to every page
rst_epilog = """
.. |django-users| replace:: :ref:`django-users <django-users-mailing-list>`
.. |django-developers| replace:: :ref:`django-developers <django-developers-mailing-list>`
.. |django-announce| replace:: :ref:`django-announce <django-announce-mailing-list>`
.. |django-updates| replace:: :ref:`django-updates <django-updates-mailing-list>`
""" # NOQA
# -- Options for LaTeX output --------------------------------------------------
# Use XeLaTeX for Unicode support.
latex_engine = "xelatex"
latex_use_xindy = False
# Set font for CJK and fallbacks for unicode characters.
latex_elements = {
"fontpkg": r"""
\setmainfont{Symbola}
""",
"preamble": r"""
\usepackage{newunicodechar}
\usepackage[UTF8]{ctex}
\newunicodechar{π}{\ensuremath{\pi}}
\newunicodechar{≤}{\ensuremath{\le}}
\newunicodechar{≥}{\ensuremath{\ge}}
\newunicodechar{♥}{\ensuremath{\heartsuit}}
\newunicodechar{…}{\ensuremath{\ldots}}
""",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
# latex_documents = []
latex_documents = [
(
"contents",
"django.tex",
"Django Documentation",
"Django Software Foundation",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"ref/django-admin",
"django-admin",
"Utility script for the Django web framework",
["Django Software Foundation"],
1,
)
]
# -- Options for Texinfo output ------------------------------------------------
# List of tuples (startdocname, targetname, title, author, dir_entry,
# description, category, toctree_only)
texinfo_documents = [
(
root_doc,
"django",
"",
"",
"Django",
"Documentation of the Django framework",
"Web development",
False,
)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = "Django Software Foundation"
epub_publisher = "Django Software Foundation"
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = 'Django'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
epub_theme = "djangodocs-epub"
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be an ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
epub_cover = ("", "epub-cover.html")
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
|
f9ccc67bd05fe5657ce6dce95155ca6e1fb92e83102ad883c3c960865c2c6351 | import difflib
import inspect
import json
import logging
import posixpath
import sys
import threading
import unittest
import warnings
from collections import Counter
from contextlib import contextmanager
from copy import copy, deepcopy
from difflib import get_close_matches
from functools import wraps
from unittest.suite import _DebugResult
from unittest.util import safe_repr
from urllib.parse import (
parse_qsl,
unquote,
urlencode,
urljoin,
urlparse,
urlsplit,
urlunparse,
)
from urllib.request import url2pathname
from asgiref.sync import async_to_sync, iscoroutinefunction
from django.apps import apps
from django.conf import settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.files import locks
from django.core.handlers.wsgi import WSGIHandler, get_path_info
from django.core.management import call_command
from django.core.management.color import no_style
from django.core.management.sql import emit_post_migrate_signal
from django.core.servers.basehttp import ThreadedWSGIServer, WSGIRequestHandler
from django.core.signals import setting_changed
from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction
from django.forms.fields import CharField
from django.http import QueryDict
from django.http.request import split_domain_port, validate_host
from django.http.response import HttpResponseBase
from django.test.client import AsyncClient, Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import template_rendered
from django.test.utils import (
CaptureQueriesContext,
ContextList,
compare_xml,
modify_settings,
override_settings,
)
from django.utils.deprecation import RemovedInDjango50Warning, RemovedInDjango51Warning
from django.utils.functional import classproperty
from django.utils.version import PY310
from django.views.static import serve
logger = logging.getLogger("django.test")
__all__ = (
"TestCase",
"TransactionTestCase",
"SimpleTestCase",
"skipIfDBFeature",
"skipUnlessDBFeature",
)
def to_list(value):
"""Put value into a list if it's not already one."""
if not isinstance(value, list):
value = [value]
return value
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = "%s\n%s" % (msg, e)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
super().__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super().__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
executed = len(self)
self.test_case.assertEqual(
executed,
self.num,
"%d queries executed, %d expected\nCaptured queries were:\n%s"
% (
executed,
self.num,
"\n".join(
"%d. %s" % (i, query["sql"])
for i, query in enumerate(self.captured_queries, start=1)
),
),
)
class _AssertTemplateUsedContext:
def __init__(self, test_case, template_name, msg_prefix="", count=None):
self.test_case = test_case
self.template_name = template_name
self.msg_prefix = msg_prefix
self.count = count
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
self.test_case._assert_template_used(
self.template_name,
self.rendered_template_names,
self.msg_prefix,
self.count,
)
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
self.test()
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
self.test_case.assertFalse(
self.template_name in self.rendered_template_names,
f"{self.msg_prefix}Template '{self.template_name}' was used "
f"unexpectedly in rendering the response",
)
class DatabaseOperationForbidden(AssertionError):
pass
class _DatabaseFailure:
def __init__(self, wrapped, message):
self.wrapped = wrapped
self.message = message
def __call__(self):
raise DatabaseOperationForbidden(self.message)
# RemovedInDjango50Warning
class _AssertFormErrorDeprecationHelper:
@staticmethod
def assertFormError(self, response, form, field, errors, msg_prefix=""):
"""
Search through all the rendered contexts of the `response` for a form named
`form` then dispatch to the new assertFormError() using that instance.
If multiple contexts contain the form, they're all checked in order and any
failure will abort (this matches the old behavior).
"""
warning_msg = (
f"Passing response to assertFormError() is deprecated. Use the form object "
f"directly: assertFormError(response.context[{form!r}], {field!r}, ...)"
)
warnings.warn(warning_msg, RemovedInDjango50Warning, stacklevel=2)
full_msg_prefix = f"{msg_prefix}: " if msg_prefix else ""
contexts = to_list(response.context) if response.context is not None else []
if not contexts:
self.fail(
f"{full_msg_prefix}Response did not use any contexts to render the "
f"response"
)
# Search all contexts for the error.
found_form = False
for i, context in enumerate(contexts):
if form not in context:
continue
found_form = True
self.assertFormError(context[form], field, errors, msg_prefix=msg_prefix)
if not found_form:
self.fail(
f"{full_msg_prefix}The form '{form}' was not used to render the "
f"response"
)
@staticmethod
def assertFormSetError(
self, response, formset, form_index, field, errors, msg_prefix=""
):
"""
Search for a formset named "formset" in the "response" and dispatch to
the new assertFormSetError() using that instance. If the name is found
in multiple contexts they're all checked in order and any failure will
abort the test.
"""
warning_msg = (
f"Passing response to assertFormSetError() is deprecated. Use the formset "
f"object directly: assertFormSetError(response.context[{formset!r}], "
f"{form_index!r}, ...)"
)
warnings.warn(warning_msg, RemovedInDjango50Warning, stacklevel=2)
full_msg_prefix = f"{msg_prefix}: " if msg_prefix else ""
contexts = to_list(response.context) if response.context is not None else []
if not contexts:
self.fail(
f"{full_msg_prefix}Response did not use any contexts to render the "
f"response"
)
found_formset = False
for i, context in enumerate(contexts):
if formset not in context or not hasattr(context[formset], "forms"):
continue
found_formset = True
self.assertFormSetError(
context[formset], form_index, field, errors, msg_prefix
)
if not found_formset:
self.fail(
f"{full_msg_prefix}The formset '{formset}' was not used to render the "
f"response"
)
@classmethod
def patch_signature(cls, new_method):
"""
Replace the decorated method with a new one that inspects the passed
args/kwargs and dispatch to the old implementation (with deprecation
warning) when it detects the old signature.
"""
@wraps(new_method)
def patched_method(self, *args, **kwargs):
old_method = getattr(cls, new_method.__name__)
old_signature = inspect.signature(old_method)
try:
old_bound_args = old_signature.bind(self, *args, **kwargs)
except TypeError:
# If old signature doesn't match then either:
# 1) new signature will match
# 2) or a TypeError will be raised showing the user information
# about the new signature.
return new_method(self, *args, **kwargs)
new_signature = inspect.signature(new_method)
try:
new_bound_args = new_signature.bind(self, *args, **kwargs)
except TypeError:
# Old signature matches but not the new one (because of
# previous try/except).
return old_method(self, *args, **kwargs)
# If both signatures match, decide on which method to call by
# inspecting the first arg (arg[0] = self).
assert old_bound_args.args[1] == new_bound_args.args[1]
if hasattr(
old_bound_args.args[1], "context"
): # Looks like a response object => old method.
return old_method(self, *args, **kwargs)
elif isinstance(old_bound_args.args[1], HttpResponseBase):
raise ValueError(
f"{old_method.__name__}() is only usable on responses fetched "
f"using the Django test Client."
)
else:
return new_method(self, *args, **kwargs)
return patched_method
class SimpleTestCase(unittest.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
async_client_class = AsyncClient
_overridden_settings = None
_modified_settings = None
databases = set()
_disallowed_database_msg = (
"Database %(operation)s to %(alias)r are not allowed in SimpleTestCase "
"subclasses. Either subclass TestCase or TransactionTestCase to ensure "
"proper test isolation or add %(alias)r to %(test)s.databases to silence "
"this failure."
)
_disallowed_connection_methods = [
("connect", "connections"),
("temporary_connection", "connections"),
("cursor", "queries"),
("chunked_cursor", "queries"),
]
@classmethod
def setUpClass(cls):
super().setUpClass()
if cls._overridden_settings:
cls._cls_overridden_context = override_settings(**cls._overridden_settings)
cls._cls_overridden_context.enable()
cls.addClassCleanup(cls._cls_overridden_context.disable)
if cls._modified_settings:
cls._cls_modified_context = modify_settings(cls._modified_settings)
cls._cls_modified_context.enable()
cls.addClassCleanup(cls._cls_modified_context.disable)
cls._add_databases_failures()
cls.addClassCleanup(cls._remove_databases_failures)
@classmethod
def _validate_databases(cls):
if cls.databases == "__all__":
return frozenset(connections)
for alias in cls.databases:
if alias not in connections:
message = (
"%s.%s.databases refers to %r which is not defined in "
"settings.DATABASES."
% (
cls.__module__,
cls.__qualname__,
alias,
)
)
close_matches = get_close_matches(alias, list(connections))
if close_matches:
message += " Did you mean %r?" % close_matches[0]
raise ImproperlyConfigured(message)
return frozenset(cls.databases)
@classmethod
def _add_databases_failures(cls):
cls.databases = cls._validate_databases()
for alias in connections:
if alias in cls.databases:
continue
connection = connections[alias]
for name, operation in cls._disallowed_connection_methods:
message = cls._disallowed_database_msg % {
"test": "%s.%s" % (cls.__module__, cls.__qualname__),
"alias": alias,
"operation": operation,
}
method = getattr(connection, name)
setattr(connection, name, _DatabaseFailure(method, message))
@classmethod
def _remove_databases_failures(cls):
for alias in connections:
if alias in cls.databases:
continue
connection = connections[alias]
for name, _ in cls._disallowed_connection_methods:
method = getattr(connection, name)
setattr(connection, name, method.wrapped)
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
self._setup_and_call(result)
def debug(self):
"""Perform the same as __call__(), without catching the exception."""
debug_result = _DebugResult()
self._setup_and_call(debug_result, debug=True)
def _setup_and_call(self, result, debug=False):
"""
Perform the following in order: pre-setup, run test, post-teardown,
skipping pre/post hooks if test is set to be skipped.
If debug=True, reraise any errors in setup and use super().debug()
instead of __call__() to run the test.
"""
testMethod = getattr(self, self._testMethodName)
skipped = getattr(self.__class__, "__unittest_skip__", False) or getattr(
testMethod, "__unittest_skip__", False
)
# Convert async test methods.
if iscoroutinefunction(testMethod):
setattr(self, self._testMethodName, async_to_sync(testMethod))
if not skipped:
try:
self._pre_setup()
except Exception:
if debug:
raise
result.addError(self, sys.exc_info())
return
if debug:
super().debug()
else:
super().__call__(result)
if not skipped:
try:
self._post_teardown()
except Exception:
if debug:
raise
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
"""
Perform pre-test setup:
* Create a test client.
* Clear the mail test outbox.
"""
self.client = self.client_class()
self.async_client = self.async_client_class()
mail.outbox = []
def _post_teardown(self):
"""Perform post-test things."""
pass
def settings(self, **kwargs):
"""
A context manager that temporarily sets a setting and reverts to the
original value when exiting the context.
"""
return override_settings(**kwargs)
def modify_settings(self, **kwargs):
"""
A context manager that temporarily applies changes a list setting and
reverts back to the original value when exiting the context.
"""
return modify_settings(**kwargs)
def assertRedirects(
self,
response,
expected_url,
status_code=302,
target_status_code=200,
msg_prefix="",
fetch_redirect_response=True,
):
"""
Assert that a response redirected to a specific URL and that the
redirect URL can be loaded.
Won't work for external links since it uses the test client to do a
request (use fetch_redirect_response=False to check such links without
fetching them).
"""
if msg_prefix:
msg_prefix += ": "
if hasattr(response, "redirect_chain"):
# The request was a followed redirect
self.assertTrue(
response.redirect_chain,
msg_prefix
+ (
"Response didn't redirect as expected: Response code was %d "
"(expected %d)"
)
% (response.status_code, status_code),
)
self.assertEqual(
response.redirect_chain[0][1],
status_code,
msg_prefix
+ (
"Initial response didn't redirect as expected: Response code was "
"%d (expected %d)"
)
% (response.redirect_chain[0][1], status_code),
)
url, status_code = response.redirect_chain[-1]
self.assertEqual(
response.status_code,
target_status_code,
msg_prefix
+ (
"Response didn't redirect as expected: Final Response code was %d "
"(expected %d)"
)
% (response.status_code, target_status_code),
)
else:
# Not a followed redirect
self.assertEqual(
response.status_code,
status_code,
msg_prefix
+ (
"Response didn't redirect as expected: Response code was %d "
"(expected %d)"
)
% (response.status_code, status_code),
)
url = response.url
scheme, netloc, path, query, fragment = urlsplit(url)
# Prepend the request path to handle relative path redirects.
if not path.startswith("/"):
url = urljoin(response.request["PATH_INFO"], url)
path = urljoin(response.request["PATH_INFO"], path)
if fetch_redirect_response:
# netloc might be empty, or in cases where Django tests the
# HTTP scheme, the convention is for netloc to be 'testserver'.
# Trust both as "internal" URLs here.
domain, port = split_domain_port(netloc)
if domain and not validate_host(domain, settings.ALLOWED_HOSTS):
raise ValueError(
"The test client is unable to fetch remote URLs (got %s). "
"If the host is served by Django, add '%s' to ALLOWED_HOSTS. "
"Otherwise, use "
"assertRedirects(..., fetch_redirect_response=False)."
% (url, domain)
)
# Get the redirection page, using the same client that was used
# to obtain the original response.
extra = response.client.extra or {}
redirect_response = response.client.get(
path,
QueryDict(query),
secure=(scheme == "https"),
**extra,
)
self.assertEqual(
redirect_response.status_code,
target_status_code,
msg_prefix
+ (
"Couldn't retrieve redirection page '%s': response code was %d "
"(expected %d)"
)
% (path, redirect_response.status_code, target_status_code),
)
self.assertURLEqual(
url,
expected_url,
msg_prefix
+ "Response redirected to '%s', expected '%s'" % (url, expected_url),
)
def assertURLEqual(self, url1, url2, msg_prefix=""):
"""
Assert that two URLs are the same, ignoring the order of query string
parameters except for parameters with the same name.
For example, /path/?x=1&y=2 is equal to /path/?y=2&x=1, but
/path/?a=1&a=2 isn't equal to /path/?a=2&a=1.
"""
def normalize(url):
"""Sort the URL's query string parameters."""
url = str(url) # Coerce reverse_lazy() URLs.
scheme, netloc, path, params, query, fragment = urlparse(url)
query_parts = sorted(parse_qsl(query))
return urlunparse(
(scheme, netloc, path, params, urlencode(query_parts), fragment)
)
self.assertEqual(
normalize(url1),
normalize(url2),
msg_prefix + "Expected '%s' to equal '%s'." % (url1, url2),
)
def _assert_contains(self, response, text, status_code, msg_prefix, html):
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if (
hasattr(response, "render")
and callable(response.render)
and not response.is_rendered
):
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(
response.status_code,
status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code),
)
if response.streaming:
content = b"".join(response.streaming_content)
else:
content = response.content
if not isinstance(text, bytes) or html:
text = str(text)
content = content.decode(response.charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(
self, content, None, "Response's content is not valid HTML:"
)
text = assert_and_parse_html(
self, text, None, "Second argument is not valid HTML:"
)
real_count = content.count(text)
return (text_repr, real_count, msg_prefix)
def assertContains(
self, response, text, count=None, status_code=200, msg_prefix="", html=False
):
"""
Assert that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected) and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html
)
if count is not None:
self.assertEqual(
real_count,
count,
msg_prefix
+ "Found %d instances of %s in response (expected %d)"
% (real_count, text_repr, count),
)
else:
self.assertTrue(
real_count != 0, msg_prefix + "Couldn't find %s in response" % text_repr
)
def assertNotContains(
self, response, text, status_code=200, msg_prefix="", html=False
):
"""
Assert that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected) and that
``text`` doesn't occur in the content of the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html
)
self.assertEqual(
real_count, 0, msg_prefix + "Response should not contain %s" % text_repr
)
def _check_test_client_response(self, response, attribute, method_name):
"""
Raise a ValueError if the given response doesn't have the required
attribute.
"""
if not hasattr(response, attribute):
raise ValueError(
f"{method_name}() is only usable on responses fetched using "
"the Django test Client."
)
def _assert_form_error(self, form, field, errors, msg_prefix, form_repr):
if not form.is_bound:
self.fail(
f"{msg_prefix}The {form_repr} is not bound, it will never have any "
f"errors."
)
if field is not None and field not in form.fields:
self.fail(
f"{msg_prefix}The {form_repr} does not contain the field {field!r}."
)
if field is None:
field_errors = form.non_field_errors()
failure_message = f"The non-field errors of {form_repr} don't match."
else:
field_errors = form.errors.get(field, [])
failure_message = (
f"The errors of field {field!r} on {form_repr} don't match."
)
self.assertEqual(field_errors, errors, msg_prefix + failure_message)
# RemovedInDjango50Warning: When the deprecation ends, remove the
# decorator.
@_AssertFormErrorDeprecationHelper.patch_signature
def assertFormError(self, form, field, errors, msg_prefix=""):
"""
Assert that a field named "field" on the given form object has specific
errors.
errors can be either a single error message or a list of errors
messages. Using errors=[] test that the field has no errors.
You can pass field=None to check the form's non-field errors.
"""
if errors is None:
warnings.warn(
"Passing errors=None to assertFormError() is deprecated, use "
"errors=[] instead.",
RemovedInDjango50Warning,
stacklevel=2,
)
errors = []
if msg_prefix:
msg_prefix += ": "
errors = to_list(errors)
self._assert_form_error(form, field, errors, msg_prefix, f"form {form!r}")
# RemovedInDjango51Warning.
def assertFormsetError(self, *args, **kw):
warnings.warn(
"assertFormsetError() is deprecated in favor of assertFormSetError().",
category=RemovedInDjango51Warning,
stacklevel=2,
)
return self.assertFormSetError(*args, **kw)
# RemovedInDjango50Warning: When the deprecation ends, remove the
# decorator.
@_AssertFormErrorDeprecationHelper.patch_signature
def assertFormSetError(self, formset, form_index, field, errors, msg_prefix=""):
"""
Similar to assertFormError() but for formsets.
Use form_index=None to check the formset's non-form errors (in that
case, you must also use field=None).
Otherwise use an integer to check the formset's n-th form for errors.
Other parameters are the same as assertFormError().
"""
if errors is None:
warnings.warn(
"Passing errors=None to assertFormSetError() is deprecated, "
"use errors=[] instead.",
RemovedInDjango50Warning,
stacklevel=2,
)
errors = []
if form_index is None and field is not None:
raise ValueError("You must use field=None with form_index=None.")
if msg_prefix:
msg_prefix += ": "
errors = to_list(errors)
if not formset.is_bound:
self.fail(
f"{msg_prefix}The formset {formset!r} is not bound, it will never have "
f"any errors."
)
if form_index is not None and form_index >= formset.total_form_count():
form_count = formset.total_form_count()
form_or_forms = "forms" if form_count > 1 else "form"
self.fail(
f"{msg_prefix}The formset {formset!r} only has {form_count} "
f"{form_or_forms}."
)
if form_index is not None:
form_repr = f"form {form_index} of formset {formset!r}"
self._assert_form_error(
formset.forms[form_index], field, errors, msg_prefix, form_repr
)
else:
failure_message = f"The non-form errors of formset {formset!r} don't match."
self.assertEqual(
formset.non_form_errors(), errors, msg_prefix + failure_message
)
def _get_template_used(self, response, template_name, msg_prefix, method_name):
if response is None and template_name is None:
raise TypeError("response and/or template_name argument must be provided")
if msg_prefix:
msg_prefix += ": "
if template_name is not None and response is not None:
self._check_test_client_response(response, "templates", method_name)
if not hasattr(response, "templates") or (response is None and template_name):
if response:
template_name = response
response = None
# use this template with context manager
return template_name, None, msg_prefix
template_names = [t.name for t in response.templates if t.name is not None]
return None, template_names, msg_prefix
def _assert_template_used(self, template_name, template_names, msg_prefix, count):
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(
template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s"
% (template_name, ", ".join(template_names)),
)
if count is not None:
self.assertEqual(
template_names.count(template_name),
count,
msg_prefix + "Template '%s' was expected to be rendered %d "
"time(s) but was actually rendered %d time(s)."
% (template_name, count, template_names.count(template_name)),
)
def assertTemplateUsed(
self, response=None, template_name=None, msg_prefix="", count=None
):
"""
Assert that the template with the provided name was used in rendering
the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._get_template_used(
response,
template_name,
msg_prefix,
"assertTemplateUsed",
)
if context_mgr_template:
# Use assertTemplateUsed as context manager.
return _AssertTemplateUsedContext(
self, context_mgr_template, msg_prefix, count
)
self._assert_template_used(template_name, template_names, msg_prefix, count)
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=""):
"""
Assert that the template with the provided name was NOT used in
rendering the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._get_template_used(
response,
template_name,
msg_prefix,
"assertTemplateNotUsed",
)
if context_mgr_template:
# Use assertTemplateNotUsed as context manager.
return _AssertTemplateNotUsedContext(self, context_mgr_template, msg_prefix)
self.assertFalse(
template_name in template_names,
msg_prefix
+ "Template '%s' was used unexpectedly in rendering the response"
% template_name,
)
@contextmanager
def _assert_raises_or_warns_cm(
self, func, cm_attr, expected_exception, expected_message
):
with func(expected_exception) as cm:
yield cm
self.assertIn(expected_message, str(getattr(cm, cm_attr)))
def _assertFooMessage(
self, func, cm_attr, expected_exception, expected_message, *args, **kwargs
):
callable_obj = None
if args:
callable_obj, *args = args
cm = self._assert_raises_or_warns_cm(
func, cm_attr, expected_exception, expected_message
)
# Assertion used in context manager fashion.
if callable_obj is None:
return cm
# Assertion was passed a callable.
with cm:
callable_obj(*args, **kwargs)
def assertRaisesMessage(
self, expected_exception, expected_message, *args, **kwargs
):
"""
Assert that expected_message is found in the message of a raised
exception.
Args:
expected_exception: Exception class expected to be raised.
expected_message: expected error message string value.
args: Function to be called and extra positional args.
kwargs: Extra kwargs.
"""
return self._assertFooMessage(
self.assertRaises,
"exception",
expected_exception,
expected_message,
*args,
**kwargs,
)
def assertWarnsMessage(self, expected_warning, expected_message, *args, **kwargs):
"""
Same as assertRaisesMessage but for assertWarns() instead of
assertRaises().
"""
return self._assertFooMessage(
self.assertWarns,
"warning",
expected_warning,
expected_message,
*args,
**kwargs,
)
# A similar method is available in Python 3.10+.
if not PY310:
@contextmanager
def assertNoLogs(self, logger, level=None):
"""
Assert no messages are logged on the logger, with at least the
given level.
"""
if isinstance(level, int):
level = logging.getLevelName(level)
elif level is None:
level = "INFO"
try:
with self.assertLogs(logger, level) as cm:
yield
except AssertionError as e:
msg = e.args[0]
expected_msg = (
f"no logs of level {level} or higher triggered on {logger}"
)
if msg != expected_msg:
raise e
else:
self.fail(f"Unexpected logs found: {cm.output!r}")
def assertFieldOutput(
self,
fieldclass,
valid,
invalid,
field_args=None,
field_kwargs=None,
empty_value="",
):
"""
Assert that a form field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in empty_values
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args, **{**field_kwargs, "required": False})
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [required.error_messages["required"]]
for e in required.empty_values:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages, error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({"min_length": 2, "max_length": 20})
self.assertIsInstance(fieldclass(*field_args, **field_kwargs), fieldclass)
def assertHTMLEqual(self, html1, html2, msg=None):
"""
Assert that two HTML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The arguments must be valid HTML.
"""
dom1 = assert_and_parse_html(
self, html1, msg, "First argument is not valid HTML:"
)
dom2 = assert_and_parse_html(
self, html2, msg, "Second argument is not valid HTML:"
)
if dom1 != dom2:
standardMsg = "%s != %s" % (safe_repr(dom1, True), safe_repr(dom2, True))
diff = "\n" + "\n".join(
difflib.ndiff(
str(dom1).splitlines(),
str(dom2).splitlines(),
)
)
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
"""Assert that two HTML snippets are not semantically equivalent."""
dom1 = assert_and_parse_html(
self, html1, msg, "First argument is not valid HTML:"
)
dom2 = assert_and_parse_html(
self, html2, msg, "Second argument is not valid HTML:"
)
if dom1 == dom2:
standardMsg = "%s == %s" % (safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertInHTML(self, needle, haystack, count=None, msg_prefix=""):
needle = assert_and_parse_html(
self, needle, None, "First argument is not valid HTML:"
)
haystack = assert_and_parse_html(
self, haystack, None, "Second argument is not valid HTML:"
)
real_count = haystack.count(needle)
if count is not None:
self.assertEqual(
real_count,
count,
msg_prefix
+ "Found %d instances of '%s' in response (expected %d)"
% (real_count, needle, count),
)
else:
self.assertTrue(
real_count != 0, msg_prefix + "Couldn't find '%s' in response" % needle
)
def assertJSONEqual(self, raw, expected_data, msg=None):
"""
Assert that the JSON fragments raw and expected_data are equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except json.JSONDecodeError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, str):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertEqual(data, expected_data, msg=msg)
def assertJSONNotEqual(self, raw, expected_data, msg=None):
"""
Assert that the JSON fragments raw and expected_data are not equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except json.JSONDecodeError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, str):
try:
expected_data = json.loads(expected_data)
except json.JSONDecodeError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertNotEqual(data, expected_data, msg=msg)
def assertXMLEqual(self, xml1, xml2, msg=None):
"""
Assert that two XML snippets are semantically the same.
Whitespace in most cases is ignored and attribute ordering is not
significant. The arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = "First or second argument is not valid XML\n%s" % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if not result:
standardMsg = "%s != %s" % (
safe_repr(xml1, True),
safe_repr(xml2, True),
)
diff = "\n" + "\n".join(
difflib.ndiff(xml1.splitlines(), xml2.splitlines())
)
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertXMLNotEqual(self, xml1, xml2, msg=None):
"""
Assert that two XML snippets are not semantically equivalent.
Whitespace in most cases is ignored and attribute ordering is not
significant. The arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = "First or second argument is not valid XML\n%s" % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if result:
standardMsg = "%s == %s" % (
safe_repr(xml1, True),
safe_repr(xml2, True),
)
self.fail(self._formatMessage(msg, standardMsg))
class TransactionTestCase(SimpleTestCase):
# Subclasses can ask for resetting of auto increment sequence before each
# test case
reset_sequences = False
# Subclasses can enable only a subset of apps for faster tests
available_apps = None
# Subclasses can define fixtures which will be automatically installed.
fixtures = None
databases = {DEFAULT_DB_ALIAS}
_disallowed_database_msg = (
"Database %(operation)s to %(alias)r are not allowed in this test. "
"Add %(alias)r to %(test)s.databases to ensure proper test isolation "
"and silence this failure."
)
# If transactions aren't available, Django will serialize the database
# contents into a fixture during setup and flush and reload them
# during teardown (as flush does not restore data from migrations).
# This can be slow; this flag allows enabling on a per-case basis.
serialized_rollback = False
def _pre_setup(self):
"""
Perform pre-test setup:
* If the class has an 'available_apps' attribute, restrict the app
registry to these applications, then fire the post_migrate signal --
it must run with the correct set of applications for the test case.
* If the class has a 'fixtures' attribute, install those fixtures.
"""
super()._pre_setup()
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
setting_changed.send(
sender=settings._wrapped.__class__,
setting="INSTALLED_APPS",
value=self.available_apps,
enter=True,
)
for db_name in self._databases_names(include_mirrors=False):
emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name)
try:
self._fixture_setup()
except Exception:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(
sender=settings._wrapped.__class__,
setting="INSTALLED_APPS",
value=settings.INSTALLED_APPS,
enter=False,
)
raise
# Clear the queries_log so that it's less likely to overflow (a single
# test probably won't execute 9K queries). If queries_log overflows,
# then assertNumQueries() doesn't work.
for db_name in self._databases_names(include_mirrors=False):
connections[db_name].queries_log.clear()
@classmethod
def _databases_names(cls, include_mirrors=True):
# Only consider allowed database aliases, including mirrors or not.
return [
alias
for alias in connections
if alias in cls.databases
and (
include_mirrors
or not connections[alias].settings_dict["TEST"]["MIRROR"]
)
]
def _reset_sequences(self, db_name):
conn = connections[db_name]
if conn.features.supports_sequence_reset:
sql_list = conn.ops.sequence_reset_by_name_sql(
no_style(), conn.introspection.sequence_list()
)
if sql_list:
with transaction.atomic(using=db_name):
with conn.cursor() as cursor:
for sql in sql_list:
cursor.execute(sql)
def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False):
# Reset sequences
if self.reset_sequences:
self._reset_sequences(db_name)
# Provide replica initial data from migrated apps, if needed.
if self.serialized_rollback and hasattr(
connections[db_name], "_test_serialized_contents"
):
if self.available_apps is not None:
apps.unset_available_apps()
connections[db_name].creation.deserialize_db_from_string(
connections[db_name]._test_serialized_contents
)
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
if self.fixtures:
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command(
"loaddata", *self.fixtures, **{"verbosity": 0, "database": db_name}
)
def _should_reload_connections(self):
return True
def _post_teardown(self):
"""
Perform post-test things:
* Flush the contents of the database to leave a clean slate. If the
class has an 'available_apps' attribute, don't fire post_migrate.
* Force-close the connection so the next test gets a clean cursor.
"""
try:
self._fixture_teardown()
super()._post_teardown()
if self._should_reload_connections():
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does a rollback, the effect
# of these statements is lost, which can affect the operation of
# tests (e.g., losing a timezone setting causing objects to be
# created with the wrong time). To make sure this doesn't
# happen, get a clean connection at the start of every test.
for conn in connections.all(initialized_only=True):
conn.close()
finally:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(
sender=settings._wrapped.__class__,
setting="INSTALLED_APPS",
value=settings.INSTALLED_APPS,
enter=False,
)
def _fixture_teardown(self):
# Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal
# when flushing only a subset of the apps
for db_name in self._databases_names(include_mirrors=False):
# Flush the database
inhibit_post_migrate = (
self.available_apps is not None
or ( # Inhibit the post_migrate signal when using serialized
# rollback to avoid trying to recreate the serialized data.
self.serialized_rollback
and hasattr(connections[db_name], "_test_serialized_contents")
)
)
call_command(
"flush",
verbosity=0,
interactive=False,
database=db_name,
reset_sequences=False,
allow_cascade=self.available_apps is not None,
inhibit_post_migrate=inhibit_post_migrate,
)
# RemovedInDjango51Warning.
def assertQuerysetEqual(self, *args, **kw):
warnings.warn(
"assertQuerysetEqual() is deprecated in favor of assertQuerySetEqual().",
category=RemovedInDjango51Warning,
stacklevel=2,
)
return self.assertQuerySetEqual(*args, **kw)
def assertQuerySetEqual(self, qs, values, transform=None, ordered=True, msg=None):
values = list(values)
items = qs
if transform is not None:
items = map(transform, items)
if not ordered:
return self.assertDictEqual(Counter(items), Counter(values), msg=msg)
# For example qs.iterator() could be passed as qs, but it does not
# have 'ordered' attribute.
if len(values) > 1 and hasattr(qs, "ordered") and not qs.ordered:
raise ValueError(
"Trying to compare non-ordered queryset against more than one "
"ordered value."
)
return self.assertEqual(list(items), values, msg=msg)
def assertNumQueries(self, num, func=None, *args, using=DEFAULT_DB_ALIAS, **kwargs):
conn = connections[using]
context = _AssertNumQueriesContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def connections_support_transactions(aliases=None):
"""
Return whether or not all (or specified) connections support
transactions.
"""
conns = (
connections.all()
if aliases is None
else (connections[alias] for alias in aliases)
)
return all(conn.features.supports_transactions for conn in conns)
class TestData:
"""
Descriptor to provide TestCase instance isolation for attributes assigned
during the setUpTestData() phase.
Allow safe alteration of objects assigned in setUpTestData() by test
methods by exposing deep copies instead of the original objects.
Objects are deep copied using a memo kept on the test case instance in
order to maintain their original relationships.
"""
memo_attr = "_testdata_memo"
def __init__(self, name, data):
self.name = name
self.data = data
def get_memo(self, testcase):
try:
memo = getattr(testcase, self.memo_attr)
except AttributeError:
memo = {}
setattr(testcase, self.memo_attr, memo)
return memo
def __get__(self, instance, owner):
if instance is None:
return self.data
memo = self.get_memo(instance)
data = deepcopy(self.data, memo)
setattr(instance, self.name, data)
return data
def __repr__(self):
return "<TestData: name=%r, data=%r>" % (self.name, self.data)
class TestCase(TransactionTestCase):
"""
Similar to TransactionTestCase, but use `transaction.atomic()` to achieve
test isolation.
In most situations, TestCase should be preferred to TransactionTestCase as
it allows faster execution. However, there are some situations where using
TransactionTestCase might be necessary (e.g. testing some transactional
behavior).
On database backends with no transaction support, TestCase behaves as
TransactionTestCase.
"""
@classmethod
def _enter_atomics(cls):
"""Open atomic blocks for multiple databases."""
atomics = {}
for db_name in cls._databases_names():
atomic = transaction.atomic(using=db_name)
atomic._from_testcase = True
atomic.__enter__()
atomics[db_name] = atomic
return atomics
@classmethod
def _rollback_atomics(cls, atomics):
"""Rollback atomic blocks opened by the previous method."""
for db_name in reversed(cls._databases_names()):
transaction.set_rollback(True, using=db_name)
atomics[db_name].__exit__(None, None, None)
@classmethod
def _databases_support_transactions(cls):
return connections_support_transactions(cls.databases)
@classmethod
def setUpClass(cls):
super().setUpClass()
if not cls._databases_support_transactions():
return
cls.cls_atomics = cls._enter_atomics()
if cls.fixtures:
for db_name in cls._databases_names(include_mirrors=False):
try:
call_command(
"loaddata",
*cls.fixtures,
**{"verbosity": 0, "database": db_name},
)
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
pre_attrs = cls.__dict__.copy()
try:
cls.setUpTestData()
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
for name, value in cls.__dict__.items():
if value is not pre_attrs.get(name):
setattr(cls, name, TestData(name, value))
@classmethod
def tearDownClass(cls):
if cls._databases_support_transactions():
cls._rollback_atomics(cls.cls_atomics)
for conn in connections.all(initialized_only=True):
conn.close()
super().tearDownClass()
@classmethod
def setUpTestData(cls):
"""Load initial data for the TestCase."""
pass
def _should_reload_connections(self):
if self._databases_support_transactions():
return False
return super()._should_reload_connections()
def _fixture_setup(self):
if not self._databases_support_transactions():
# If the backend does not support transactions, we should reload
# class data before each test
self.setUpTestData()
return super()._fixture_setup()
if self.reset_sequences:
raise TypeError("reset_sequences cannot be used on TestCase instances")
self.atomics = self._enter_atomics()
def _fixture_teardown(self):
if not self._databases_support_transactions():
return super()._fixture_teardown()
try:
for db_name in reversed(self._databases_names()):
if self._should_check_constraints(connections[db_name]):
connections[db_name].check_constraints()
finally:
self._rollback_atomics(self.atomics)
def _should_check_constraints(self, connection):
return (
connection.features.can_defer_constraint_checks
and not connection.needs_rollback
and connection.is_usable()
)
@classmethod
@contextmanager
def captureOnCommitCallbacks(cls, *, using=DEFAULT_DB_ALIAS, execute=False):
"""Context manager to capture transaction.on_commit() callbacks."""
callbacks = []
start_count = len(connections[using].run_on_commit)
try:
yield callbacks
finally:
while True:
callback_count = len(connections[using].run_on_commit)
for _, callback, robust in connections[using].run_on_commit[
start_count:
]:
callbacks.append(callback)
if execute:
if robust:
try:
callback()
except Exception as e:
logger.error(
f"Error calling {callback.__qualname__} in "
f"on_commit() (%s).",
e,
exc_info=True,
)
else:
callback()
if callback_count == len(connections[using].run_on_commit):
break
start_count = callback_count
class CheckCondition:
"""Descriptor class for deferred condition checking."""
def __init__(self, *conditions):
self.conditions = conditions
def add_condition(self, condition, reason):
return self.__class__(*self.conditions, (condition, reason))
def __get__(self, instance, cls=None):
# Trigger access for all bases.
if any(getattr(base, "__unittest_skip__", False) for base in cls.__bases__):
return True
for condition, reason in self.conditions:
if condition():
# Override this descriptor's value and set the skip reason.
cls.__unittest_skip__ = True
cls.__unittest_skip_why__ = reason
return True
return False
def _deferredSkip(condition, reason, name):
def decorator(test_func):
nonlocal condition
if not (
isinstance(test_func, type) and issubclass(test_func, unittest.TestCase)
):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if (
args
and isinstance(args[0], unittest.TestCase)
and connection.alias not in getattr(args[0], "databases", {})
):
raise ValueError(
"%s cannot be used on %s as %s doesn't allow queries "
"against the %r database."
% (
name,
args[0],
args[0].__class__.__qualname__,
connection.alias,
)
)
if condition():
raise unittest.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
# Assume a class is decorated
test_item = test_func
databases = getattr(test_item, "databases", None)
if not databases or connection.alias not in databases:
# Defer raising to allow importing test class's module.
def condition():
raise ValueError(
"%s cannot be used on %s as it doesn't allow queries "
"against the '%s' database."
% (
name,
test_item,
connection.alias,
)
)
# Retrieve the possibly existing value from the class's dict to
# avoid triggering the descriptor.
skip = test_func.__dict__.get("__unittest_skip__")
if isinstance(skip, CheckCondition):
test_item.__unittest_skip__ = skip.add_condition(condition, reason)
elif skip is not True:
test_item.__unittest_skip__ = CheckCondition((condition, reason))
return test_item
return decorator
def skipIfDBFeature(*features):
"""Skip a test if a database has at least one of the named features."""
return _deferredSkip(
lambda: any(
getattr(connection.features, feature, False) for feature in features
),
"Database has feature(s) %s" % ", ".join(features),
"skipIfDBFeature",
)
def skipUnlessDBFeature(*features):
"""Skip a test unless a database has all the named features."""
return _deferredSkip(
lambda: not all(
getattr(connection.features, feature, False) for feature in features
),
"Database doesn't support feature(s): %s" % ", ".join(features),
"skipUnlessDBFeature",
)
def skipUnlessAnyDBFeature(*features):
"""Skip a test unless a database has any of the named features."""
return _deferredSkip(
lambda: not any(
getattr(connection.features, feature, False) for feature in features
),
"Database doesn't support any of the feature(s): %s" % ", ".join(features),
"skipUnlessAnyDBFeature",
)
class QuietWSGIRequestHandler(WSGIRequestHandler):
"""
A WSGIRequestHandler that doesn't log to standard output any of the
requests received, so as to not clutter the test result output.
"""
def log_message(*args):
pass
class FSFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to a directory, as defined by one of
the *_ROOT settings, and serves those files, publishing them under *_URL.
"""
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super().__init__()
def _should_handle(self, path):
"""
Check if the path should be handled. Ignore the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""Return the relative path to the file on disk for the given URL."""
relative_url = url[len(self.base_url[2]) :]
return url2pathname(relative_url)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404:
pass
return super().get_response(request)
def serve(self, request):
os_rel_path = self.file_path(request.path)
os_rel_path = posixpath.normpath(unquote(os_rel_path))
# Emulate behavior of django.contrib.staticfiles.views.serve() when it
# invokes staticfiles' finders functionality.
# TODO: Modify if/when that internal API is refactored
final_rel_path = os_rel_path.replace("\\", "/").lstrip("/")
return serve(request, final_rel_path, document_root=self.get_base_dir())
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super().__call__(environ, start_response)
class _StaticFilesHandler(FSFilesHandler):
"""
Handler for serving static files. A private class that is meant to be used
solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
return settings.STATIC_URL
class _MediaFilesHandler(FSFilesHandler):
"""
Handler for serving the media files. A private class that is meant to be
used solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
class LiveServerThread(threading.Thread):
"""Thread for running a live HTTP server while the tests are running."""
server_class = ThreadedWSGIServer
def __init__(self, host, static_handler, connections_override=None, port=0):
self.host = host
self.port = port
self.is_ready = threading.Event()
self.error = None
self.static_handler = static_handler
self.connections_override = connections_override
super().__init__()
def run(self):
"""
Set up the live server and databases, and then loop over handling
HTTP requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
self.httpd = self._create_server(
connections_override=self.connections_override,
)
# If binding to port zero, assign the port allocated by the OS.
if self.port == 0:
self.port = self.httpd.server_address[1]
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
finally:
connections.close_all()
def _create_server(self, connections_override=None):
return self.server_class(
(self.host, self.port),
QuietWSGIRequestHandler,
allow_reuse_address=False,
connections_override=connections_override,
)
def terminate(self):
if hasattr(self, "httpd"):
# Stop the WSGI server
self.httpd.shutdown()
self.httpd.server_close()
self.join()
class LiveServerTestCase(TransactionTestCase):
"""
Do basically the same as TransactionTestCase but also launch a live HTTP
server in a separate thread so that the tests may use another testing
framework, such as Selenium for example, instead of the built-in dummy
client.
It inherits from TransactionTestCase instead of TestCase because the
threads don't share the same transactions (unless if using in-memory sqlite)
and each thread needs to commit all their transactions so that the other
thread can see the changes.
"""
host = "localhost"
port = 0
server_thread_class = LiveServerThread
static_handler = _StaticFilesHandler
@classproperty
def live_server_url(cls):
return "http://%s:%s" % (cls.host, cls.server_thread.port)
@classproperty
def allowed_host(cls):
return cls.host
@classmethod
def _make_connections_override(cls):
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if conn.vendor == "sqlite" and conn.is_in_memory_db():
connections_override[conn.alias] = conn
return connections_override
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._live_server_modified_settings = modify_settings(
ALLOWED_HOSTS={"append": cls.allowed_host},
)
cls._live_server_modified_settings.enable()
cls.addClassCleanup(cls._live_server_modified_settings.disable)
cls._start_server_thread()
@classmethod
def _start_server_thread(cls):
connections_override = cls._make_connections_override()
for conn in connections_override.values():
# Explicitly enable thread-shareability for this connection.
conn.inc_thread_sharing()
cls.server_thread = cls._create_server_thread(connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
cls.addClassCleanup(cls._terminate_thread)
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
raise cls.server_thread.error
@classmethod
def _create_server_thread(cls, connections_override):
return cls.server_thread_class(
cls.host,
cls.static_handler,
connections_override=connections_override,
port=cls.port,
)
@classmethod
def _terminate_thread(cls):
# Terminate the live server's thread.
cls.server_thread.terminate()
# Restore shared connections' non-shareability.
for conn in cls.server_thread.connections_override.values():
conn.dec_thread_sharing()
class SerializeMixin:
"""
Enforce serialization of TestCases that share a common resource.
Define a common 'lockfile' for each set of TestCases to serialize. This
file must exist on the filesystem.
Place it early in the MRO in order to isolate setUpClass()/tearDownClass().
"""
lockfile = None
def __init_subclass__(cls, /, **kwargs):
super().__init_subclass__(**kwargs)
if cls.lockfile is None:
raise ValueError(
"{}.lockfile isn't set. Set it to a unique value "
"in the base class.".format(cls.__name__)
)
@classmethod
def setUpClass(cls):
cls._lockfile = open(cls.lockfile)
cls.addClassCleanup(cls._lockfile.close)
locks.lock(cls._lockfile, locks.LOCK_EX)
super().setUpClass()
|
5ae7e6e7f3c35e0816d589e207c7cfa5c94178f744a07cc48480dbc1f815c20a | import collections
import logging
import os
import re
import sys
import time
import warnings
from contextlib import contextmanager
from functools import wraps
from io import StringIO
from itertools import chain
from types import SimpleNamespace
from unittest import TestCase, skipIf, skipUnless
from xml.dom.minidom import Node, parseString
from asgiref.sync import iscoroutinefunction
from django.apps import apps
from django.apps.registry import Apps
from django.conf import UserSettingsHolder, settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import request_started, setting_changed
from django.db import DEFAULT_DB_ALIAS, connections, reset_queries
from django.db.models.options import Options
from django.template import Template
from django.test.signals import template_rendered
from django.urls import get_script_prefix, set_script_prefix
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.translation import deactivate
try:
import jinja2
except ImportError:
jinja2 = None
__all__ = (
"Approximate",
"ContextList",
"isolate_lru_cache",
"get_runner",
"CaptureQueriesContext",
"ignore_warnings",
"isolate_apps",
"modify_settings",
"override_settings",
"override_system_checks",
"tag",
"requires_tz_support",
"setup_databases",
"setup_test_environment",
"teardown_test_environment",
)
TZ_SUPPORT = hasattr(time, "tzset")
class Approximate:
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
return self.val == other or round(abs(self.val - other), self.places) == 0
class ContextList(list):
"""
A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, str):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super().__getitem__(key)
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
return True
def keys(self):
"""
Flattened keys of subcontexts.
"""
return set(chain.from_iterable(d for subcontext in self for d in subcontext))
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal that can be
intercepted by the test Client.
"""
template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
class _TestState:
pass
def setup_test_environment(debug=None):
"""
Perform global pre-test setup, such as installing the instrumented template
renderer and setting the email backend to the locmem email backend.
"""
if hasattr(_TestState, "saved_data"):
# Executing this function twice would overwrite the saved values.
raise RuntimeError(
"setup_test_environment() was already called and can't be called "
"again without first calling teardown_test_environment()."
)
if debug is None:
debug = settings.DEBUG
saved_data = SimpleNamespace()
_TestState.saved_data = saved_data
saved_data.allowed_hosts = settings.ALLOWED_HOSTS
# Add the default host of the test client.
settings.ALLOWED_HOSTS = [*settings.ALLOWED_HOSTS, "testserver"]
saved_data.debug = settings.DEBUG
settings.DEBUG = debug
saved_data.email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
saved_data.template_render = Template._render
Template._render = instrumented_test_render
mail.outbox = []
deactivate()
def teardown_test_environment():
"""
Perform any global post-test teardown, such as restoring the original
template renderer and restoring the email sending functions.
"""
saved_data = _TestState.saved_data
settings.ALLOWED_HOSTS = saved_data.allowed_hosts
settings.DEBUG = saved_data.debug
settings.EMAIL_BACKEND = saved_data.email_backend
Template._render = saved_data.template_render
del _TestState.saved_data
del mail.outbox
def setup_databases(
verbosity,
interactive,
*,
time_keeper=None,
keepdb=False,
debug_sql=False,
parallel=0,
aliases=None,
serialized_aliases=None,
**kwargs,
):
"""Create the test databases."""
if time_keeper is None:
time_keeper = NullTimeKeeper()
test_databases, mirrored_aliases = get_unique_databases_and_mirrors(aliases)
old_names = []
for db_name, aliases in test_databases.values():
first_alias = None
for alias in aliases:
connection = connections[alias]
old_names.append((connection, db_name, first_alias is None))
# Actually create the database for the first connection
if first_alias is None:
first_alias = alias
with time_keeper.timed(" Creating '%s'" % alias):
# RemovedInDjango50Warning: when the deprecation ends,
# replace with:
# serialize_alias = (
# serialized_aliases is None
# or alias in serialized_aliases
# )
try:
serialize_alias = connection.settings_dict["TEST"]["SERIALIZE"]
except KeyError:
serialize_alias = (
serialized_aliases is None or alias in serialized_aliases
)
else:
warnings.warn(
"The SERIALIZE test database setting is "
"deprecated as it can be inferred from the "
"TestCase/TransactionTestCase.databases that "
"enable the serialized_rollback feature.",
category=RemovedInDjango50Warning,
)
connection.creation.create_test_db(
verbosity=verbosity,
autoclobber=not interactive,
keepdb=keepdb,
serialize=serialize_alias,
)
if parallel > 1:
for index in range(parallel):
with time_keeper.timed(" Cloning '%s'" % alias):
connection.creation.clone_test_db(
suffix=str(index + 1),
verbosity=verbosity,
keepdb=keepdb,
)
# Configure all other connections as mirrors of the first one
else:
connections[alias].creation.set_as_test_mirror(
connections[first_alias].settings_dict
)
# Configure the test mirrors.
for alias, mirror_alias in mirrored_aliases.items():
connections[alias].creation.set_as_test_mirror(
connections[mirror_alias].settings_dict
)
if debug_sql:
for alias in connections:
connections[alias].force_debug_cursor = True
return old_names
def iter_test_cases(tests):
"""
Return an iterator over a test suite's unittest.TestCase objects.
The tests argument can also be an iterable of TestCase objects.
"""
for test in tests:
if isinstance(test, str):
# Prevent an unfriendly RecursionError that can happen with
# strings.
raise TypeError(
f"Test {test!r} must be a test case or test suite not string "
f"(was found in {tests!r})."
)
if isinstance(test, TestCase):
yield test
else:
# Otherwise, assume it is a test suite.
yield from iter_test_cases(test)
def dependency_ordered(test_databases, dependencies):
"""
Reorder test_databases into an order that honors the dependencies
described in TEST[DEPENDENCIES].
"""
ordered_test_databases = []
resolved_databases = set()
# Maps db signature to dependencies of all its aliases
dependencies_map = {}
# Check that no database depends on its own alias
for sig, (_, aliases) in test_databases:
all_deps = set()
for alias in aliases:
all_deps.update(dependencies.get(alias, []))
if not all_deps.isdisjoint(aliases):
raise ImproperlyConfigured(
"Circular dependency: databases %r depend on each other, "
"but are aliases." % aliases
)
dependencies_map[sig] = all_deps
while test_databases:
changed = False
deferred = []
# Try to find a DB that has all its dependencies met
for signature, (db_name, aliases) in test_databases:
if dependencies_map[signature].issubset(resolved_databases):
resolved_databases.update(aliases)
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured("Circular dependency in TEST[DEPENDENCIES]")
test_databases = deferred
return ordered_test_databases
def get_unique_databases_and_mirrors(aliases=None):
"""
Figure out which databases actually need to be created.
Deduplicate entries in DATABASES that correspond the same database or are
configured as test mirrors.
Return two values:
- test_databases: ordered mapping of signatures to (name, list of aliases)
where all aliases share the same underlying database.
- mirrored_aliases: mapping of mirror aliases to original aliases.
"""
if aliases is None:
aliases = connections
mirrored_aliases = {}
test_databases = {}
dependencies = {}
default_sig = connections[DEFAULT_DB_ALIAS].creation.test_db_signature()
for alias in connections:
connection = connections[alias]
test_settings = connection.settings_dict["TEST"]
if test_settings["MIRROR"]:
# If the database is marked as a test mirror, save the alias.
mirrored_aliases[alias] = test_settings["MIRROR"]
elif alias in aliases:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict["NAME"], []),
)
# The default database must be the first because data migrations
# use the default alias by default.
if alias == DEFAULT_DB_ALIAS:
item[1].insert(0, alias)
else:
item[1].append(alias)
if "DEPENDENCIES" in test_settings:
dependencies[alias] = test_settings["DEPENDENCIES"]
else:
if (
alias != DEFAULT_DB_ALIAS
and connection.creation.test_db_signature() != default_sig
):
dependencies[alias] = test_settings.get(
"DEPENDENCIES", [DEFAULT_DB_ALIAS]
)
test_databases = dict(dependency_ordered(test_databases.items(), dependencies))
return test_databases, mirrored_aliases
def teardown_databases(old_config, verbosity, parallel=0, keepdb=False):
"""Destroy all the non-mirror databases."""
for connection, old_name, destroy in old_config:
if destroy:
if parallel > 1:
for index in range(parallel):
connection.creation.destroy_test_db(
suffix=str(index + 1),
verbosity=verbosity,
keepdb=keepdb,
)
connection.creation.destroy_test_db(old_name, verbosity, keepdb)
def get_runner(settings, test_runner_class=None):
test_runner_class = test_runner_class or settings.TEST_RUNNER
test_path = test_runner_class.split(".")
# Allow for relative paths
if len(test_path) > 1:
test_module_name = ".".join(test_path[:-1])
else:
test_module_name = "."
test_module = __import__(test_module_name, {}, {}, test_path[-1])
return getattr(test_module, test_path[-1])
class TestContextDecorator:
"""
A base class that can either be used as a context manager during tests
or as a test function or unittest.TestCase subclass decorator to perform
temporary alterations.
`attr_name`: attribute assigned the return value of enable() if used as
a class decorator.
`kwarg_name`: keyword argument passing the return value of enable() if
used as a function decorator.
"""
def __init__(self, attr_name=None, kwarg_name=None):
self.attr_name = attr_name
self.kwarg_name = kwarg_name
def enable(self):
raise NotImplementedError
def disable(self):
raise NotImplementedError
def __enter__(self):
return self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def decorate_class(self, cls):
if issubclass(cls, TestCase):
decorated_setUp = cls.setUp
def setUp(inner_self):
context = self.enable()
inner_self.addCleanup(self.disable)
if self.attr_name:
setattr(inner_self, self.attr_name, context)
decorated_setUp(inner_self)
cls.setUp = setUp
return cls
raise TypeError("Can only decorate subclasses of unittest.TestCase")
def decorate_callable(self, func):
if iscoroutinefunction(func):
# If the inner function is an async function, we must execute async
# as well so that the `with` statement executes at the right time.
@wraps(func)
async def inner(*args, **kwargs):
with self as context:
if self.kwarg_name:
kwargs[self.kwarg_name] = context
return await func(*args, **kwargs)
else:
@wraps(func)
def inner(*args, **kwargs):
with self as context:
if self.kwarg_name:
kwargs[self.kwarg_name] = context
return func(*args, **kwargs)
return inner
def __call__(self, decorated):
if isinstance(decorated, type):
return self.decorate_class(decorated)
elif callable(decorated):
return self.decorate_callable(decorated)
raise TypeError("Cannot decorate object of type %s" % type(decorated))
class override_settings(TestContextDecorator):
"""
Act as either a decorator or a context manager. If it's a decorator, take a
function and return a wrapped function. If it's a contextmanager, use it
with the ``with`` statement. In either event, entering/exiting are called
before and after, respectively, the function/block is executed.
"""
enable_exception = None
def __init__(self, **kwargs):
self.options = kwargs
super().__init__()
def enable(self):
# Keep this code at the beginning to leave the settings unchanged
# in case it raises an exception because INSTALLED_APPS is invalid.
if "INSTALLED_APPS" in self.options:
try:
apps.set_installed_apps(self.options["INSTALLED_APPS"])
except Exception:
apps.unset_installed_apps()
raise
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
self.wrapped = settings._wrapped
settings._wrapped = override
for key, new_value in self.options.items():
try:
setting_changed.send(
sender=settings._wrapped.__class__,
setting=key,
value=new_value,
enter=True,
)
except Exception as exc:
self.enable_exception = exc
self.disable()
def disable(self):
if "INSTALLED_APPS" in self.options:
apps.unset_installed_apps()
settings._wrapped = self.wrapped
del self.wrapped
responses = []
for key in self.options:
new_value = getattr(settings, key, None)
responses_for_setting = setting_changed.send_robust(
sender=settings._wrapped.__class__,
setting=key,
value=new_value,
enter=False,
)
responses.extend(responses_for_setting)
if self.enable_exception is not None:
exc = self.enable_exception
self.enable_exception = None
raise exc
for _, response in responses:
if isinstance(response, Exception):
raise response
def save_options(self, test_func):
if test_func._overridden_settings is None:
test_func._overridden_settings = self.options
else:
# Duplicate dict to prevent subclasses from altering their parent.
test_func._overridden_settings = {
**test_func._overridden_settings,
**self.options,
}
def decorate_class(self, cls):
from django.test import SimpleTestCase
if not issubclass(cls, SimpleTestCase):
raise ValueError(
"Only subclasses of Django SimpleTestCase can be decorated "
"with override_settings"
)
self.save_options(cls)
return cls
class modify_settings(override_settings):
"""
Like override_settings, but makes it possible to append, prepend, or remove
items instead of redefining the entire list.
"""
def __init__(self, *args, **kwargs):
if args:
# Hack used when instantiating from SimpleTestCase.setUpClass.
assert not kwargs
self.operations = args[0]
else:
assert not args
self.operations = list(kwargs.items())
super(override_settings, self).__init__()
def save_options(self, test_func):
if test_func._modified_settings is None:
test_func._modified_settings = self.operations
else:
# Duplicate list to prevent subclasses from altering their parent.
test_func._modified_settings = (
list(test_func._modified_settings) + self.operations
)
def enable(self):
self.options = {}
for name, operations in self.operations:
try:
# When called from SimpleTestCase.setUpClass, values may be
# overridden several times; cumulate changes.
value = self.options[name]
except KeyError:
value = list(getattr(settings, name, []))
for action, items in operations.items():
# items my be a single value or an iterable.
if isinstance(items, str):
items = [items]
if action == "append":
value += [item for item in items if item not in value]
elif action == "prepend":
value = [item for item in items if item not in value] + value
elif action == "remove":
value = [item for item in value if item not in items]
else:
raise ValueError("Unsupported action: %s" % action)
self.options[name] = value
super().enable()
class override_system_checks(TestContextDecorator):
"""
Act as a decorator. Override list of registered system checks.
Useful when you override `INSTALLED_APPS`, e.g. if you exclude `auth` app,
you also need to exclude its system checks.
"""
def __init__(self, new_checks, deployment_checks=None):
from django.core.checks.registry import registry
self.registry = registry
self.new_checks = new_checks
self.deployment_checks = deployment_checks
super().__init__()
def enable(self):
self.old_checks = self.registry.registered_checks
self.registry.registered_checks = set()
for check in self.new_checks:
self.registry.register(check, *getattr(check, "tags", ()))
self.old_deployment_checks = self.registry.deployment_checks
if self.deployment_checks is not None:
self.registry.deployment_checks = set()
for check in self.deployment_checks:
self.registry.register(check, *getattr(check, "tags", ()), deploy=True)
def disable(self):
self.registry.registered_checks = self.old_checks
self.registry.deployment_checks = self.old_deployment_checks
def compare_xml(want, got):
"""
Try to do a 'xml-comparison' of want and got. Plain string comparison
doesn't always work because, for example, attribute ordering should not be
important. Ignore comment nodes, processing instructions, document type
node, and leading and trailing whitespaces.
Based on https://github.com/lxml/lxml/blob/master/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r"[ \t\n][ \t\n]+")
def norm_whitespace(v):
return _norm_whitespace_re.sub(" ", v)
def child_text(element):
return "".join(
c.data for c in element.childNodes if c.nodeType == Node.TEXT_NODE
)
def children(element):
return [c for c in element.childNodes if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
return all(
check_element(want, got) for want, got in zip(want_children, got_children)
)
def first_node(document):
for node in document.childNodes:
if node.nodeType not in (
Node.COMMENT_NODE,
Node.DOCUMENT_TYPE_NODE,
Node.PROCESSING_INSTRUCTION_NODE,
):
return node
want = want.strip().replace("\\n", "\n")
got = got.strip().replace("\\n", "\n")
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith("<?xml"):
wrapper = "<root>%s</root>"
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
want_root = first_node(parseString(want))
got_root = first_node(parseString(got))
return check_element(want_root, got_root)
class CaptureQueriesContext:
"""
Context manager that captures queries executed by the specified connection.
"""
def __init__(self, connection):
self.connection = connection
def __iter__(self):
return iter(self.captured_queries)
def __getitem__(self, index):
return self.captured_queries[index]
def __len__(self):
return len(self.captured_queries)
@property
def captured_queries(self):
return self.connection.queries[self.initial_queries : self.final_queries]
def __enter__(self):
self.force_debug_cursor = self.connection.force_debug_cursor
self.connection.force_debug_cursor = True
# Run any initialization queries if needed so that they won't be
# included as part of the count.
self.connection.ensure_connection()
self.initial_queries = len(self.connection.queries_log)
self.final_queries = None
request_started.disconnect(reset_queries)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.connection.force_debug_cursor = self.force_debug_cursor
request_started.connect(reset_queries)
if exc_type is not None:
return
self.final_queries = len(self.connection.queries_log)
class ignore_warnings(TestContextDecorator):
def __init__(self, **kwargs):
self.ignore_kwargs = kwargs
if "message" in self.ignore_kwargs or "module" in self.ignore_kwargs:
self.filter_func = warnings.filterwarnings
else:
self.filter_func = warnings.simplefilter
super().__init__()
def enable(self):
self.catch_warnings = warnings.catch_warnings()
self.catch_warnings.__enter__()
self.filter_func("ignore", **self.ignore_kwargs)
def disable(self):
self.catch_warnings.__exit__(*sys.exc_info())
# On OSes that don't provide tzset (Windows), we can't set the timezone
# in which the program runs. As a consequence, we must skip tests that
# don't enforce a specific timezone (with timezone.override or equivalent),
# or attempt to interpret naive datetimes in the default timezone.
requires_tz_support = skipUnless(
TZ_SUPPORT,
"This test relies on the ability to run a program in an arbitrary "
"time zone, but your operating system isn't able to do that.",
)
@contextmanager
def extend_sys_path(*paths):
"""Context manager to temporarily add paths to sys.path."""
_orig_sys_path = sys.path[:]
sys.path.extend(paths)
try:
yield
finally:
sys.path = _orig_sys_path
@contextmanager
def isolate_lru_cache(lru_cache_object):
"""Clear the cache of an LRU cache object on entering and exiting."""
lru_cache_object.cache_clear()
try:
yield
finally:
lru_cache_object.cache_clear()
@contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Note: This function and the following ``captured_std*`` are copied
from CPython's ``test.support`` module."""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
"""
return captured_output("stderr")
def captured_stdin():
"""Capture the input to sys.stdin:
with captured_stdin() as stdin:
stdin.write('hello\n')
stdin.seek(0)
# call test code that consumes from sys.stdin
captured = input()
self.assertEqual(captured, "hello")
"""
return captured_output("stdin")
@contextmanager
def freeze_time(t):
"""
Context manager to temporarily freeze time.time(). This temporarily
modifies the time function of the time module. Modules which import the
time function directly (e.g. `from time import time`) won't be affected
This isn't meant as a public API, but helps reduce some repetitive code in
Django's test suite.
"""
_real_time = time.time
time.time = lambda: t
try:
yield
finally:
time.time = _real_time
def require_jinja2(test_func):
"""
Decorator to enable a Jinja2 template engine in addition to the regular
Django template engine for a test or skip it if Jinja2 isn't available.
"""
test_func = skipIf(jinja2 is None, "this test requires jinja2")(test_func)
return override_settings(
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
},
{
"BACKEND": "django.template.backends.jinja2.Jinja2",
"APP_DIRS": True,
"OPTIONS": {"keep_trailing_newline": True},
},
]
)(test_func)
class override_script_prefix(TestContextDecorator):
"""Decorator or context manager to temporary override the script prefix."""
def __init__(self, prefix):
self.prefix = prefix
super().__init__()
def enable(self):
self.old_prefix = get_script_prefix()
set_script_prefix(self.prefix)
def disable(self):
set_script_prefix(self.old_prefix)
class LoggingCaptureMixin:
"""
Capture the output from the 'django' logger and store it on the class's
logger_output attribute.
"""
def setUp(self):
self.logger = logging.getLogger("django")
self.old_stream = self.logger.handlers[0].stream
self.logger_output = StringIO()
self.logger.handlers[0].stream = self.logger_output
def tearDown(self):
self.logger.handlers[0].stream = self.old_stream
class isolate_apps(TestContextDecorator):
"""
Act as either a decorator or a context manager to register models defined
in its wrapped context to an isolated registry.
The list of installed apps the isolated registry should contain must be
passed as arguments.
Two optional keyword arguments can be specified:
`attr_name`: attribute assigned the isolated registry if used as a class
decorator.
`kwarg_name`: keyword argument passing the isolated registry if used as a
function decorator.
"""
def __init__(self, *installed_apps, **kwargs):
self.installed_apps = installed_apps
super().__init__(**kwargs)
def enable(self):
self.old_apps = Options.default_apps
apps = Apps(self.installed_apps)
setattr(Options, "default_apps", apps)
return apps
def disable(self):
setattr(Options, "default_apps", self.old_apps)
class TimeKeeper:
def __init__(self):
self.records = collections.defaultdict(list)
@contextmanager
def timed(self, name):
self.records[name]
start_time = time.perf_counter()
try:
yield
finally:
end_time = time.perf_counter() - start_time
self.records[name].append(end_time)
def print_results(self):
for name, end_times in self.records.items():
for record_time in end_times:
record = "%s took %.3fs" % (name, record_time)
sys.stderr.write(record + os.linesep)
class NullTimeKeeper:
@contextmanager
def timed(self, name):
yield
def print_results(self):
pass
def tag(*tags):
"""Decorator to add tags to a test class or method."""
def decorator(obj):
if hasattr(obj, "tags"):
obj.tags = obj.tags.union(tags)
else:
setattr(obj, "tags", set(tags))
return obj
return decorator
@contextmanager
def register_lookup(field, *lookups, lookup_name=None):
"""
Context manager to temporarily register lookups on a model field using
lookup_name (or the lookup's lookup_name if not provided).
"""
try:
for lookup in lookups:
field.register_lookup(lookup, lookup_name)
yield
finally:
for lookup in lookups:
field._unregister_lookup(lookup, lookup_name)
|
16610647197cbb0ff9c43ef604e3f0a06943fb4938ad8680c82c13a26d43b143 | import functools
import itertools
import re
import sys
import types
import warnings
from pathlib import Path
from django.conf import settings
from django.http import Http404, HttpResponse, HttpResponseNotFound
from django.template import Context, Engine, TemplateDoesNotExist
from django.template.defaultfilters import pprint
from django.urls import resolve
from django.utils import timezone
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str
from django.utils.module_loading import import_string
from django.utils.regex_helper import _lazy_re_compile
from django.utils.version import PY311, get_docs_version
# Minimal Django templates engine to render the error templates
# regardless of the project's TEMPLATES setting. Templates are
# read directly from the filesystem so that the error handler
# works even if the template loader is broken.
DEBUG_ENGINE = Engine(
debug=True,
libraries={"i18n": "django.templatetags.i18n"},
)
def builtin_template_path(name):
"""
Return a path to a builtin template.
Avoid calling this function at the module level or in a class-definition
because __file__ may not exist, e.g. in frozen environments.
"""
return Path(__file__).parent / "templates" / name
class ExceptionCycleWarning(UserWarning):
pass
class CallableSettingWrapper:
"""
Object to wrap callable appearing in settings.
* Not to call in the debug page (#21345).
* Not to break the debug page if the callable forbidding to set attributes
(#23070).
"""
def __init__(self, callable_setting):
self._wrapped = callable_setting
def __repr__(self):
return repr(self._wrapped)
def technical_500_response(request, exc_type, exc_value, tb, status_code=500):
"""
Create a technical server error response. The last three arguments are
the values returned from sys.exc_info() and friends.
"""
reporter = get_exception_reporter_class(request)(request, exc_type, exc_value, tb)
if request.accepts("text/html"):
html = reporter.get_traceback_html()
return HttpResponse(html, status=status_code)
else:
text = reporter.get_traceback_text()
return HttpResponse(
text, status=status_code, content_type="text/plain; charset=utf-8"
)
@functools.lru_cache
def get_default_exception_reporter_filter():
# Instantiate the default filter for the first time and cache it.
return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)()
def get_exception_reporter_filter(request):
default_filter = get_default_exception_reporter_filter()
return getattr(request, "exception_reporter_filter", default_filter)
def get_exception_reporter_class(request):
default_exception_reporter_class = import_string(
settings.DEFAULT_EXCEPTION_REPORTER
)
return getattr(
request, "exception_reporter_class", default_exception_reporter_class
)
def get_caller(request):
resolver_match = request.resolver_match
if resolver_match is None:
try:
resolver_match = resolve(request.path)
except Http404:
pass
return "" if resolver_match is None else resolver_match._func_path
class SafeExceptionReporterFilter:
"""
Use annotations made by the sensitive_post_parameters and
sensitive_variables decorators to filter out sensitive information.
"""
cleansed_substitute = "********************"
hidden_settings = _lazy_re_compile(
"API|TOKEN|KEY|SECRET|PASS|SIGNATURE|HTTP_COOKIE", flags=re.I
)
def cleanse_setting(self, key, value):
"""
Cleanse an individual setting key/value of sensitive content. If the
value is a dictionary, recursively cleanse the keys in that dictionary.
"""
if key == settings.SESSION_COOKIE_NAME:
is_sensitive = True
else:
try:
is_sensitive = self.hidden_settings.search(key)
except TypeError:
is_sensitive = False
if is_sensitive:
cleansed = self.cleansed_substitute
elif isinstance(value, dict):
cleansed = {k: self.cleanse_setting(k, v) for k, v in value.items()}
elif isinstance(value, list):
cleansed = [self.cleanse_setting("", v) for v in value]
elif isinstance(value, tuple):
cleansed = tuple([self.cleanse_setting("", v) for v in value])
else:
cleansed = value
if callable(cleansed):
cleansed = CallableSettingWrapper(cleansed)
return cleansed
def get_safe_settings(self):
"""
Return a dictionary of the settings module with values of sensitive
settings replaced with stars (*********).
"""
settings_dict = {}
for k in dir(settings):
if k.isupper():
settings_dict[k] = self.cleanse_setting(k, getattr(settings, k))
return settings_dict
def get_safe_request_meta(self, request):
"""
Return a dictionary of request.META with sensitive values redacted.
"""
if not hasattr(request, "META"):
return {}
return {k: self.cleanse_setting(k, v) for k, v in request.META.items()}
def get_safe_cookies(self, request):
"""
Return a dictionary of request.COOKIES with sensitive values redacted.
"""
if not hasattr(request, "COOKIES"):
return {}
return {k: self.cleanse_setting(k, v) for k, v in request.COOKIES.items()}
def is_active(self, request):
"""
This filter is to add safety in production environments (i.e. DEBUG
is False). If DEBUG is True then your site is not safe anyway.
This hook is provided as a convenience to easily activate or
deactivate the filter on a per request basis.
"""
return settings.DEBUG is False
def get_cleansed_multivaluedict(self, request, multivaluedict):
"""
Replace the keys in a MultiValueDict marked as sensitive with stars.
This mitigates leaking sensitive POST parameters if something like
request.POST['nonexistent_key'] throws an exception (#21098).
"""
sensitive_post_parameters = getattr(request, "sensitive_post_parameters", [])
if self.is_active(request) and sensitive_post_parameters:
multivaluedict = multivaluedict.copy()
for param in sensitive_post_parameters:
if param in multivaluedict:
multivaluedict[param] = self.cleansed_substitute
return multivaluedict
def get_post_parameters(self, request):
"""
Replace the values of POST parameters marked as sensitive with
stars (*********).
"""
if request is None:
return {}
else:
sensitive_post_parameters = getattr(
request, "sensitive_post_parameters", []
)
if self.is_active(request) and sensitive_post_parameters:
cleansed = request.POST.copy()
if sensitive_post_parameters == "__ALL__":
# Cleanse all parameters.
for k in cleansed:
cleansed[k] = self.cleansed_substitute
return cleansed
else:
# Cleanse only the specified parameters.
for param in sensitive_post_parameters:
if param in cleansed:
cleansed[param] = self.cleansed_substitute
return cleansed
else:
return request.POST
def cleanse_special_types(self, request, value):
try:
# If value is lazy or a complex object of another kind, this check
# might raise an exception. isinstance checks that lazy
# MultiValueDicts will have a return value.
is_multivalue_dict = isinstance(value, MultiValueDict)
except Exception as e:
return "{!r} while evaluating {!r}".format(e, value)
if is_multivalue_dict:
# Cleanse MultiValueDicts (request.POST is the one we usually care about)
value = self.get_cleansed_multivaluedict(request, value)
return value
def get_traceback_frame_variables(self, request, tb_frame):
"""
Replace the values of variables marked as sensitive with
stars (*********).
"""
# Loop through the frame's callers to see if the sensitive_variables
# decorator was used.
current_frame = tb_frame.f_back
sensitive_variables = None
while current_frame is not None:
if (
current_frame.f_code.co_name == "sensitive_variables_wrapper"
and "sensitive_variables_wrapper" in current_frame.f_locals
):
# The sensitive_variables decorator was used, so we take note
# of the sensitive variables' names.
wrapper = current_frame.f_locals["sensitive_variables_wrapper"]
sensitive_variables = getattr(wrapper, "sensitive_variables", None)
break
current_frame = current_frame.f_back
cleansed = {}
if self.is_active(request) and sensitive_variables:
if sensitive_variables == "__ALL__":
# Cleanse all variables
for name in tb_frame.f_locals:
cleansed[name] = self.cleansed_substitute
else:
# Cleanse specified variables
for name, value in tb_frame.f_locals.items():
if name in sensitive_variables:
value = self.cleansed_substitute
else:
value = self.cleanse_special_types(request, value)
cleansed[name] = value
else:
# Potentially cleanse the request and any MultiValueDicts if they
# are one of the frame variables.
for name, value in tb_frame.f_locals.items():
cleansed[name] = self.cleanse_special_types(request, value)
if (
tb_frame.f_code.co_name == "sensitive_variables_wrapper"
and "sensitive_variables_wrapper" in tb_frame.f_locals
):
# For good measure, obfuscate the decorated function's arguments in
# the sensitive_variables decorator's frame, in case the variables
# associated with those arguments were meant to be obfuscated from
# the decorated function's frame.
cleansed["func_args"] = self.cleansed_substitute
cleansed["func_kwargs"] = self.cleansed_substitute
return cleansed.items()
class ExceptionReporter:
"""Organize and coordinate reporting on exceptions."""
@property
def html_template_path(self):
return builtin_template_path("technical_500.html")
@property
def text_template_path(self):
return builtin_template_path("technical_500.txt")
def __init__(self, request, exc_type, exc_value, tb, is_email=False):
self.request = request
self.filter = get_exception_reporter_filter(self.request)
self.exc_type = exc_type
self.exc_value = exc_value
self.tb = tb
self.is_email = is_email
self.template_info = getattr(self.exc_value, "template_debug", None)
self.template_does_not_exist = False
self.postmortem = None
def _get_raw_insecure_uri(self):
"""
Return an absolute URI from variables available in this request. Skip
allowed hosts protection, so may return insecure URI.
"""
return "{scheme}://{host}{path}".format(
scheme=self.request.scheme,
host=self.request._get_raw_host(),
path=self.request.get_full_path(),
)
def get_traceback_data(self):
"""Return a dictionary containing traceback information."""
if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist):
self.template_does_not_exist = True
self.postmortem = self.exc_value.chain or [self.exc_value]
frames = self.get_traceback_frames()
for i, frame in enumerate(frames):
if "vars" in frame:
frame_vars = []
for k, v in frame["vars"]:
v = pprint(v)
# Trim large blobs of data
if len(v) > 4096:
v = "%s… <trimmed %d bytes string>" % (v[0:4096], len(v))
frame_vars.append((k, v))
frame["vars"] = frame_vars
frames[i] = frame
unicode_hint = ""
if self.exc_type and issubclass(self.exc_type, UnicodeError):
start = getattr(self.exc_value, "start", None)
end = getattr(self.exc_value, "end", None)
if start is not None and end is not None:
unicode_str = self.exc_value.args[1]
unicode_hint = force_str(
unicode_str[max(start - 5, 0) : min(end + 5, len(unicode_str))],
"ascii",
errors="replace",
)
from django import get_version
if self.request is None:
user_str = None
else:
try:
user_str = str(self.request.user)
except Exception:
# request.user may raise OperationalError if the database is
# unavailable, for example.
user_str = "[unable to retrieve the current user]"
c = {
"is_email": self.is_email,
"unicode_hint": unicode_hint,
"frames": frames,
"request": self.request,
"request_meta": self.filter.get_safe_request_meta(self.request),
"request_COOKIES_items": self.filter.get_safe_cookies(self.request).items(),
"user_str": user_str,
"filtered_POST_items": list(
self.filter.get_post_parameters(self.request).items()
),
"settings": self.filter.get_safe_settings(),
"sys_executable": sys.executable,
"sys_version_info": "%d.%d.%d" % sys.version_info[0:3],
"server_time": timezone.now(),
"django_version_info": get_version(),
"sys_path": sys.path,
"template_info": self.template_info,
"template_does_not_exist": self.template_does_not_exist,
"postmortem": self.postmortem,
}
if self.request is not None:
c["request_GET_items"] = self.request.GET.items()
c["request_FILES_items"] = self.request.FILES.items()
c["request_insecure_uri"] = self._get_raw_insecure_uri()
c["raising_view_name"] = get_caller(self.request)
# Check whether exception info is available
if self.exc_type:
c["exception_type"] = self.exc_type.__name__
if self.exc_value:
c["exception_value"] = str(self.exc_value)
if exc_notes := getattr(self.exc_value, "__notes__", None):
c["exception_notes"] = "\n" + "\n".join(exc_notes)
if frames:
c["lastframe"] = frames[-1]
return c
def get_traceback_html(self):
"""Return HTML version of debug 500 HTTP error page."""
with self.html_template_path.open(encoding="utf-8") as fh:
t = DEBUG_ENGINE.from_string(fh.read())
c = Context(self.get_traceback_data(), use_l10n=False)
return t.render(c)
def get_traceback_text(self):
"""Return plain text version of debug 500 HTTP error page."""
with self.text_template_path.open(encoding="utf-8") as fh:
t = DEBUG_ENGINE.from_string(fh.read())
c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False)
return t.render(c)
def _get_source(self, filename, loader, module_name):
source = None
if hasattr(loader, "get_source"):
try:
source = loader.get_source(module_name)
except ImportError:
pass
if source is not None:
source = source.splitlines()
if source is None:
try:
with open(filename, "rb") as fp:
source = fp.read().splitlines()
except OSError:
pass
return source
def _get_lines_from_file(
self, filename, lineno, context_lines, loader=None, module_name=None
):
"""
Return context_lines before and after lineno from file.
Return (pre_context_lineno, pre_context, context_line, post_context).
"""
source = self._get_source(filename, loader, module_name)
if source is None:
return None, [], None, []
# If we just read the source from a file, or if the loader did not
# apply tokenize.detect_encoding to decode the source into a
# string, then we should do that ourselves.
if isinstance(source[0], bytes):
encoding = "ascii"
for line in source[:2]:
# File coding may be specified. Match pattern from PEP-263
# (https://www.python.org/dev/peps/pep-0263/)
match = re.search(rb"coding[:=]\s*([-\w.]+)", line)
if match:
encoding = match[1].decode("ascii")
break
source = [str(sline, encoding, "replace") for sline in source]
lower_bound = max(0, lineno - context_lines)
upper_bound = lineno + context_lines
try:
pre_context = source[lower_bound:lineno]
context_line = source[lineno]
post_context = source[lineno + 1 : upper_bound]
except IndexError:
return None, [], None, []
return lower_bound, pre_context, context_line, post_context
def _get_explicit_or_implicit_cause(self, exc_value):
explicit = getattr(exc_value, "__cause__", None)
suppress_context = getattr(exc_value, "__suppress_context__", None)
implicit = getattr(exc_value, "__context__", None)
return explicit or (None if suppress_context else implicit)
def get_traceback_frames(self):
# Get the exception and all its causes
exceptions = []
exc_value = self.exc_value
while exc_value:
exceptions.append(exc_value)
exc_value = self._get_explicit_or_implicit_cause(exc_value)
if exc_value in exceptions:
warnings.warn(
"Cycle in the exception chain detected: exception '%s' "
"encountered again." % exc_value,
ExceptionCycleWarning,
)
# Avoid infinite loop if there's a cyclic reference (#29393).
break
frames = []
# No exceptions were supplied to ExceptionReporter
if not exceptions:
return frames
# In case there's just one exception, take the traceback from self.tb
exc_value = exceptions.pop()
tb = self.tb if not exceptions else exc_value.__traceback__
while True:
frames.extend(self.get_exception_traceback_frames(exc_value, tb))
try:
exc_value = exceptions.pop()
except IndexError:
break
tb = exc_value.__traceback__
return frames
def get_exception_traceback_frames(self, exc_value, tb):
exc_cause = self._get_explicit_or_implicit_cause(exc_value)
exc_cause_explicit = getattr(exc_value, "__cause__", True)
if tb is None:
yield {
"exc_cause": exc_cause,
"exc_cause_explicit": exc_cause_explicit,
"tb": None,
"type": "user",
}
while tb is not None:
# Support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
if tb.tb_frame.f_locals.get("__traceback_hide__"):
tb = tb.tb_next
continue
filename = tb.tb_frame.f_code.co_filename
function = tb.tb_frame.f_code.co_name
lineno = tb.tb_lineno - 1
loader = tb.tb_frame.f_globals.get("__loader__")
module_name = tb.tb_frame.f_globals.get("__name__") or ""
(
pre_context_lineno,
pre_context,
context_line,
post_context,
) = self._get_lines_from_file(
filename,
lineno,
7,
loader,
module_name,
)
if pre_context_lineno is None:
pre_context_lineno = lineno
pre_context = []
context_line = "<source code not available>"
post_context = []
colno = tb_area_colno = ""
if PY311:
_, _, start_column, end_column = next(
itertools.islice(
tb.tb_frame.f_code.co_positions(), tb.tb_lasti // 2, None
)
)
if start_column and end_column:
underline = "^" * (end_column - start_column)
spaces = " " * (start_column + len(str(lineno + 1)) + 2)
colno = f"\n{spaces}{underline}"
tb_area_spaces = " " * (
4
+ start_column
- (len(context_line) - len(context_line.lstrip()))
)
tb_area_colno = f"\n{tb_area_spaces}{underline}"
yield {
"exc_cause": exc_cause,
"exc_cause_explicit": exc_cause_explicit,
"tb": tb,
"type": "django" if module_name.startswith("django.") else "user",
"filename": filename,
"function": function,
"lineno": lineno + 1,
"vars": self.filter.get_traceback_frame_variables(
self.request, tb.tb_frame
),
"id": id(tb),
"pre_context": pre_context,
"context_line": context_line,
"post_context": post_context,
"pre_context_lineno": pre_context_lineno + 1,
"colno": colno,
"tb_area_colno": tb_area_colno,
}
tb = tb.tb_next
def technical_404_response(request, exception):
"""Create a technical 404 error response. `exception` is the Http404."""
try:
error_url = exception.args[0]["path"]
except (IndexError, TypeError, KeyError):
error_url = request.path_info[1:] # Trim leading slash
try:
tried = exception.args[0]["tried"]
except (IndexError, TypeError, KeyError):
resolved = True
tried = request.resolver_match.tried if request.resolver_match else None
else:
resolved = False
if not tried or ( # empty URLconf
request.path == "/"
and len(tried) == 1
and len(tried[0]) == 1 # default URLconf
and getattr(tried[0][0], "app_name", "")
== getattr(tried[0][0], "namespace", "")
== "admin"
):
return default_urlconf(request)
urlconf = getattr(request, "urlconf", settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
with builtin_template_path("technical_404.html").open(encoding="utf-8") as fh:
t = DEBUG_ENGINE.from_string(fh.read())
reporter_filter = get_default_exception_reporter_filter()
c = Context(
{
"urlconf": urlconf,
"root_urlconf": settings.ROOT_URLCONF,
"request_path": error_url,
"urlpatterns": tried,
"resolved": resolved,
"reason": str(exception),
"request": request,
"settings": reporter_filter.get_safe_settings(),
"raising_view_name": get_caller(request),
}
)
return HttpResponseNotFound(t.render(c))
def default_urlconf(request):
"""Create an empty URLconf 404 error response."""
with builtin_template_path("default_urlconf.html").open(encoding="utf-8") as fh:
t = DEBUG_ENGINE.from_string(fh.read())
c = Context(
{
"version": get_docs_version(),
}
)
return HttpResponse(t.render(c))
|
062e78d83788fd4321c811256c3ad5c6d5fae6712d218ca264304404a944a2d2 | import gzip
import re
import secrets
import unicodedata
from gzip import GzipFile
from gzip import compress as gzip_compress
from io import BytesIO
from django.core.exceptions import SuspiciousFileOperation
from django.utils.functional import SimpleLazyObject, keep_lazy_text, lazy
from django.utils.regex_helper import _lazy_re_compile
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy, pgettext
@keep_lazy_text
def capfirst(x):
"""Capitalize the first letter of a string."""
if not x:
return x
if not isinstance(x, str):
x = str(x)
return x[0].upper() + x[1:]
# Set up regular expressions
re_words = _lazy_re_compile(r"<[^>]+?>|([^<>\s]+)", re.S)
re_chars = _lazy_re_compile(r"<[^>]+?>|(.)", re.S)
re_tag = _lazy_re_compile(r"<(/)?(\S+?)(?:(\s*/)|\s.*?)?>", re.S)
re_newlines = _lazy_re_compile(r"\r\n|\r") # Used in normalize_newlines
re_camel_case = _lazy_re_compile(r"(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))")
@keep_lazy_text
def wrap(text, width):
"""
A word-wrap function that preserves existing line breaks. Expects that
existing line breaks are posix newlines.
Preserve all white space except added line breaks consume the space on
which they break the line.
Don't wrap long words, thus the output text may have lines longer than
``width``.
"""
def _generator():
for line in text.splitlines(True): # True keeps trailing linebreaks
max_width = min((line.endswith("\n") and width + 1 or width), width)
while len(line) > max_width:
space = line[: max_width + 1].rfind(" ") + 1
if space == 0:
space = line.find(" ") + 1
if space == 0:
yield line
line = ""
break
yield "%s\n" % line[: space - 1]
line = line[space:]
max_width = min((line.endswith("\n") and width + 1 or width), width)
if line:
yield line
return "".join(_generator())
class Truncator(SimpleLazyObject):
"""
An object used to truncate text, either by characters or words.
"""
def __init__(self, text):
super().__init__(lambda: str(text))
def add_truncation_text(self, text, truncate=None):
if truncate is None:
truncate = pgettext(
"String to return when truncating text", "%(truncated_text)s…"
)
if "%(truncated_text)s" in truncate:
return truncate % {"truncated_text": text}
# The truncation text didn't contain the %(truncated_text)s string
# replacement argument so just append it to the text.
if text.endswith(truncate):
# But don't append the truncation text if the current text already
# ends in this.
return text
return "%s%s" % (text, truncate)
def chars(self, num, truncate=None, html=False):
"""
Return the text truncated to be no longer than the specified number
of characters.
`truncate` specifies what should be used to notify that the string has
been truncated, defaulting to a translatable string of an ellipsis.
"""
self._setup()
length = int(num)
text = unicodedata.normalize("NFC", self._wrapped)
# Calculate the length to truncate to (max length - end_text length)
truncate_len = length
for char in self.add_truncation_text("", truncate):
if not unicodedata.combining(char):
truncate_len -= 1
if truncate_len == 0:
break
if html:
return self._truncate_html(length, truncate, text, truncate_len, False)
return self._text_chars(length, truncate, text, truncate_len)
def _text_chars(self, length, truncate, text, truncate_len):
"""Truncate a string after a certain number of chars."""
s_len = 0
end_index = None
for i, char in enumerate(text):
if unicodedata.combining(char):
# Don't consider combining characters
# as adding to the string length
continue
s_len += 1
if end_index is None and s_len > truncate_len:
end_index = i
if s_len > length:
# Return the truncated string
return self.add_truncation_text(text[: end_index or 0], truncate)
# Return the original string since no truncation was necessary
return text
def words(self, num, truncate=None, html=False):
"""
Truncate a string after a certain number of words. `truncate` specifies
what should be used to notify that the string has been truncated,
defaulting to ellipsis.
"""
self._setup()
length = int(num)
if html:
return self._truncate_html(length, truncate, self._wrapped, length, True)
return self._text_words(length, truncate)
def _text_words(self, length, truncate):
"""
Truncate a string after a certain number of words.
Strip newlines in the string.
"""
words = self._wrapped.split()
if len(words) > length:
words = words[:length]
return self.add_truncation_text(" ".join(words), truncate)
return " ".join(words)
def _truncate_html(self, length, truncate, text, truncate_len, words):
"""
Truncate HTML to a certain number of chars (not counting tags and
comments), or, if words is True, then to a certain number of words.
Close opened tags if they were correctly closed in the given HTML.
Preserve newlines in the HTML.
"""
if words and length <= 0:
return ""
html4_singlets = (
"br",
"col",
"link",
"base",
"img",
"param",
"area",
"hr",
"input",
)
# Count non-HTML chars/words and keep note of open tags
pos = 0
end_text_pos = 0
current_len = 0
open_tags = []
regex = re_words if words else re_chars
while current_len <= length:
m = regex.search(text, pos)
if not m:
# Checked through whole string
break
pos = m.end(0)
if m[1]:
# It's an actual non-HTML word or char
current_len += 1
if current_len == truncate_len:
end_text_pos = pos
continue
# Check for tag
tag = re_tag.match(m[0])
if not tag or current_len >= truncate_len:
# Don't worry about non tags or tags after our truncate point
continue
closing_tag, tagname, self_closing = tag.groups()
# Element names are always case-insensitive
tagname = tagname.lower()
if self_closing or tagname in html4_singlets:
pass
elif closing_tag:
# Check for match in open tags list
try:
i = open_tags.index(tagname)
except ValueError:
pass
else:
# SGML: An end tag closes, back to the matching start tag,
# all unclosed intervening start tags with omitted end tags
open_tags = open_tags[i + 1 :]
else:
# Add it to the start of the open tags list
open_tags.insert(0, tagname)
if current_len <= length:
return text
out = text[:end_text_pos]
truncate_text = self.add_truncation_text("", truncate)
if truncate_text:
out += truncate_text
# Close any tags still open
for tag in open_tags:
out += "</%s>" % tag
# Return string
return out
@keep_lazy_text
def get_valid_filename(name):
"""
Return the given string converted to a string that can be used for a clean
filename. Remove leading and trailing spaces; convert other spaces to
underscores; and remove anything that is not an alphanumeric, dash,
underscore, or dot.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
"""
s = str(name).strip().replace(" ", "_")
s = re.sub(r"(?u)[^-\w.]", "", s)
if s in {"", ".", ".."}:
raise SuspiciousFileOperation("Could not derive file name from '%s'" % name)
return s
@keep_lazy_text
def get_text_list(list_, last_word=gettext_lazy("or")):
"""
>>> get_text_list(['a', 'b', 'c', 'd'])
'a, b, c or d'
>>> get_text_list(['a', 'b', 'c'], 'and')
'a, b and c'
>>> get_text_list(['a', 'b'], 'and')
'a and b'
>>> get_text_list(['a'])
'a'
>>> get_text_list([])
''
"""
if not list_:
return ""
if len(list_) == 1:
return str(list_[0])
return "%s %s %s" % (
# Translators: This string is used as a separator between list elements
_(", ").join(str(i) for i in list_[:-1]),
str(last_word),
str(list_[-1]),
)
@keep_lazy_text
def normalize_newlines(text):
"""Normalize CRLF and CR newlines to just LF."""
return re_newlines.sub("\n", str(text))
@keep_lazy_text
def phone2numeric(phone):
"""Convert a phone number with letters into its numeric equivalent."""
char2number = {
"a": "2",
"b": "2",
"c": "2",
"d": "3",
"e": "3",
"f": "3",
"g": "4",
"h": "4",
"i": "4",
"j": "5",
"k": "5",
"l": "5",
"m": "6",
"n": "6",
"o": "6",
"p": "7",
"q": "7",
"r": "7",
"s": "7",
"t": "8",
"u": "8",
"v": "8",
"w": "9",
"x": "9",
"y": "9",
"z": "9",
}
return "".join(char2number.get(c, c) for c in phone.lower())
def _get_random_filename(max_random_bytes):
return b"a" * secrets.randbelow(max_random_bytes)
def compress_string(s, *, max_random_bytes=None):
compressed_data = gzip_compress(s, compresslevel=6, mtime=0)
if not max_random_bytes:
return compressed_data
compressed_view = memoryview(compressed_data)
header = bytearray(compressed_view[:10])
header[3] = gzip.FNAME
filename = _get_random_filename(max_random_bytes) + b"\x00"
return bytes(header) + filename + compressed_view[10:]
class StreamingBuffer(BytesIO):
def read(self):
ret = self.getvalue()
self.seek(0)
self.truncate()
return ret
# Like compress_string, but for iterators of strings.
def compress_sequence(sequence, *, max_random_bytes=None):
buf = StreamingBuffer()
filename = _get_random_filename(max_random_bytes) if max_random_bytes else None
with GzipFile(
filename=filename, mode="wb", compresslevel=6, fileobj=buf, mtime=0
) as zfile:
# Output headers...
yield buf.read()
for item in sequence:
zfile.write(item)
data = buf.read()
if data:
yield data
yield buf.read()
# Expression to match some_token and some_token="with spaces" (and similarly
# for single-quoted strings).
smart_split_re = _lazy_re_compile(
r"""
((?:
[^\s'"]*
(?:
(?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*')
[^\s'"]*
)+
) | \S+)
""",
re.VERBOSE,
)
def smart_split(text):
r"""
Generator that splits a string by spaces, leaving quoted phrases together.
Supports both single and double quotes, and supports escaping quotes with
backslashes. In the output, strings will keep their initial and trailing
quote marks and escaped quotes will remain escaped (the results can then
be further processed with unescape_string_literal()).
>>> list(smart_split(r'This is "a person\'s" test.'))
['This', 'is', '"a person\\\'s"', 'test.']
>>> list(smart_split(r"Another 'person\'s' test."))
['Another', "'person\\'s'", 'test.']
>>> list(smart_split(r'A "\"funky\" style" test.'))
['A', '"\\"funky\\" style"', 'test.']
"""
for bit in smart_split_re.finditer(str(text)):
yield bit[0]
@keep_lazy_text
def unescape_string_literal(s):
r"""
Convert quoted string literals to unquoted strings with escaped quotes and
backslashes unquoted::
>>> unescape_string_literal('"abc"')
'abc'
>>> unescape_string_literal("'abc'")
'abc'
>>> unescape_string_literal('"a \"bc\""')
'a "bc"'
>>> unescape_string_literal("'\'ab\' c'")
"'ab' c"
"""
if not s or s[0] not in "\"'" or s[-1] != s[0]:
raise ValueError("Not a string literal: %r" % s)
quote = s[0]
return s[1:-1].replace(r"\%s" % quote, quote).replace(r"\\", "\\")
@keep_lazy_text
def slugify(value, allow_unicode=False):
"""
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
dashes to single dashes. Remove characters that aren't alphanumerics,
underscores, or hyphens. Convert to lowercase. Also strip leading and
trailing whitespace, dashes, and underscores.
"""
value = str(value)
if allow_unicode:
value = unicodedata.normalize("NFKC", value)
else:
value = (
unicodedata.normalize("NFKD", value)
.encode("ascii", "ignore")
.decode("ascii")
)
value = re.sub(r"[^\w\s-]", "", value.lower())
return re.sub(r"[-\s]+", "-", value).strip("-_")
def camel_case_to_spaces(value):
"""
Split CamelCase and convert to lowercase. Strip surrounding whitespace.
"""
return re_camel_case.sub(r" \1", value).strip().lower()
def _format_lazy(format_string, *args, **kwargs):
"""
Apply str.format() on 'format_string' where format_string, args,
and/or kwargs might be lazy.
"""
return format_string.format(*args, **kwargs)
format_lazy = lazy(_format_lazy, str)
|
7526129a1736e29c67958d709a3e5077780817934e2c3fb74aa2e488a7a6bc3e | import inspect
import warnings
from asgiref.sync import iscoroutinefunction, markcoroutinefunction, sync_to_async
class RemovedInDjango50Warning(DeprecationWarning):
pass
class RemovedInDjango51Warning(PendingDeprecationWarning):
pass
RemovedInNextVersionWarning = RemovedInDjango50Warning
RemovedAfterNextVersionWarning = RemovedInDjango51Warning
class warn_about_renamed_method:
def __init__(
self, class_name, old_method_name, new_method_name, deprecation_warning
):
self.class_name = class_name
self.old_method_name = old_method_name
self.new_method_name = new_method_name
self.deprecation_warning = deprecation_warning
def __call__(self, f):
def wrapper(*args, **kwargs):
warnings.warn(
"`%s.%s` is deprecated, use `%s` instead."
% (self.class_name, self.old_method_name, self.new_method_name),
self.deprecation_warning,
2,
)
return f(*args, **kwargs)
return wrapper
class RenameMethodsBase(type):
"""
Handles the deprecation paths when renaming a method.
It does the following:
1) Define the new method if missing and complain about it.
2) Define the old method if missing.
3) Complain whenever an old method is called.
See #15363 for more details.
"""
renamed_methods = ()
def __new__(cls, name, bases, attrs):
new_class = super().__new__(cls, name, bases, attrs)
for base in inspect.getmro(new_class):
class_name = base.__name__
for renamed_method in cls.renamed_methods:
old_method_name = renamed_method[0]
old_method = base.__dict__.get(old_method_name)
new_method_name = renamed_method[1]
new_method = base.__dict__.get(new_method_name)
deprecation_warning = renamed_method[2]
wrapper = warn_about_renamed_method(class_name, *renamed_method)
# Define the new method if missing and complain about it
if not new_method and old_method:
warnings.warn(
"`%s.%s` method should be renamed `%s`."
% (class_name, old_method_name, new_method_name),
deprecation_warning,
2,
)
setattr(base, new_method_name, old_method)
setattr(base, old_method_name, wrapper(old_method))
# Define the old method as a wrapped call to the new method.
if not old_method and new_method:
setattr(base, old_method_name, wrapper(new_method))
return new_class
class DeprecationInstanceCheck(type):
def __instancecheck__(self, instance):
warnings.warn(
"`%s` is deprecated, use `%s` instead." % (self.__name__, self.alternative),
self.deprecation_warning,
2,
)
return super().__instancecheck__(instance)
class MiddlewareMixin:
sync_capable = True
async_capable = True
def __init__(self, get_response):
if get_response is None:
raise ValueError("get_response must be provided.")
self.get_response = get_response
self._async_check()
super().__init__()
def __repr__(self):
return "<%s get_response=%s>" % (
self.__class__.__qualname__,
getattr(
self.get_response,
"__qualname__",
self.get_response.__class__.__name__,
),
)
def _async_check(self):
"""
If get_response is a coroutine function, turns us into async mode so
a thread is not consumed during a whole request.
"""
if iscoroutinefunction(self.get_response):
# Mark the class as async-capable, but do the actual switch
# inside __call__ to avoid swapping out dunder methods
markcoroutinefunction(self)
def __call__(self, request):
# Exit out to async mode, if needed
if iscoroutinefunction(self):
return self.__acall__(request)
response = None
if hasattr(self, "process_request"):
response = self.process_request(request)
response = response or self.get_response(request)
if hasattr(self, "process_response"):
response = self.process_response(request, response)
return response
async def __acall__(self, request):
"""
Async version of __call__ that is swapped in when an async request
is running.
"""
response = None
if hasattr(self, "process_request"):
response = await sync_to_async(
self.process_request,
thread_sensitive=True,
)(request)
response = response or await self.get_response(request)
if hasattr(self, "process_response"):
response = await sync_to_async(
self.process_response,
thread_sensitive=True,
)(request, response)
return response
|
b63237e1e36ca9bb593559ed80ff4032fb84d22b7fc3ceba090eaffff645b359 | import datetime
from django.utils.html import avoid_wrapping
from django.utils.timezone import is_aware
from django.utils.translation import gettext, ngettext_lazy
TIME_STRINGS = {
"year": ngettext_lazy("%(num)d year", "%(num)d years", "num"),
"month": ngettext_lazy("%(num)d month", "%(num)d months", "num"),
"week": ngettext_lazy("%(num)d week", "%(num)d weeks", "num"),
"day": ngettext_lazy("%(num)d day", "%(num)d days", "num"),
"hour": ngettext_lazy("%(num)d hour", "%(num)d hours", "num"),
"minute": ngettext_lazy("%(num)d minute", "%(num)d minutes", "num"),
}
TIME_STRINGS_KEYS = list(TIME_STRINGS.keys())
TIME_CHUNKS = [
60 * 60 * 24 * 7, # week
60 * 60 * 24, # day
60 * 60, # hour
60, # minute
]
MONTHS_DAYS = (31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
def timesince(d, now=None, reversed=False, time_strings=None, depth=2):
"""
Take two datetime objects and return the time between d and now as a nicely
formatted string, e.g. "10 minutes". If d occurs after now, return
"0 minutes".
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored.
The algorithm takes into account the varying duration of years and months.
There is exactly "1 year, 1 month" between 2013/02/10 and 2014/03/10,
but also between 2007/08/10 and 2008/09/10 despite the delta being 393 days
in the former case and 397 in the latter.
Up to `depth` adjacent units will be displayed. For example,
"2 weeks, 3 days" and "1 year, 3 months" are possible outputs, but
"2 weeks, 3 hours" and "1 year, 5 days" are not.
`time_strings` is an optional dict of strings to replace the default
TIME_STRINGS dict.
`depth` is an optional integer to control the number of adjacent time
units returned.
Originally adapted from
https://web.archive.org/web/20060617175230/http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
Modified to improve results for years and months.
"""
if time_strings is None:
time_strings = TIME_STRINGS
if depth <= 0:
raise ValueError("depth must be greater than 0.")
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
now = now or datetime.datetime.now(datetime.timezone.utc if is_aware(d) else None)
if reversed:
d, now = now, d
delta = now - d
# Ignore microseconds.
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return avoid_wrapping(time_strings["minute"] % {"num": 0})
# Get years and months.
total_months = (now.year - d.year) * 12 + (now.month - d.month)
if d.day > now.day or (d.day == now.day and d.time() > now.time()):
total_months -= 1
years, months = divmod(total_months, 12)
# Calculate the remaining time.
# Create a "pivot" datetime shifted from d by years and months, then use
# that to determine the other parts.
if years or months:
pivot_year = d.year + years
pivot_month = d.month + months
if pivot_month > 12:
pivot_month -= 12
pivot_year += 1
pivot = datetime.datetime(
pivot_year,
pivot_month,
min(MONTHS_DAYS[pivot_month - 1], d.day),
d.hour,
d.minute,
d.second,
)
else:
pivot = d
remaining_time = (now - pivot).total_seconds()
partials = [years, months]
for chunk in TIME_CHUNKS:
count = remaining_time // chunk
partials.append(count)
remaining_time -= chunk * count
# Find the first non-zero part (if any) and then build the result, until
# depth.
i = 0
for i, value in enumerate(partials):
if value != 0:
break
else:
return avoid_wrapping(time_strings["minute"] % {"num": 0})
result = []
current_depth = 0
while i < len(TIME_STRINGS_KEYS) and current_depth < depth:
value = partials[i]
if value == 0:
break
name = TIME_STRINGS_KEYS[i]
result.append(avoid_wrapping(time_strings[name] % {"num": value}))
current_depth += 1
i += 1
return gettext(", ").join(result)
def timeuntil(d, now=None, time_strings=None, depth=2):
"""
Like timesince, but return a string measuring the time until the given time.
"""
return timesince(d, now, reversed=True, time_strings=time_strings, depth=depth)
|
71e1e874b800dcb708db0e3ad29dcac032e2889c54a79da0c682f829e53a8ad0 | """
PHP date() style date formatting
See https://www.php.net/date for format strings
Usage:
>>> from datetime import datetime
>>> d = datetime.now()
>>> df = DateFormat(d)
>>> print(df.format('jS F Y H:i'))
7th October 2003 11:39
>>>
"""
import calendar
from datetime import date, datetime, time
from email.utils import format_datetime as format_datetime_rfc5322
from django.utils.dates import (
MONTHS,
MONTHS_3,
MONTHS_ALT,
MONTHS_AP,
WEEKDAYS,
WEEKDAYS_ABBR,
)
from django.utils.regex_helper import _lazy_re_compile
from django.utils.timezone import (
_datetime_ambiguous_or_imaginary,
get_default_timezone,
is_naive,
make_aware,
)
from django.utils.translation import gettext as _
re_formatchars = _lazy_re_compile(r"(?<!\\)([aAbcdDeEfFgGhHiIjlLmMnNoOPrsStTUuwWyYzZ])")
re_escaped = _lazy_re_compile(r"\\(.)")
class Formatter:
def format(self, formatstr):
pieces = []
for i, piece in enumerate(re_formatchars.split(str(formatstr))):
if i % 2:
if type(self.data) is date and hasattr(TimeFormat, piece):
raise TypeError(
"The format for date objects may not contain "
"time-related format specifiers (found '%s')." % piece
)
pieces.append(str(getattr(self, piece)()))
elif piece:
pieces.append(re_escaped.sub(r"\1", piece))
return "".join(pieces)
class TimeFormat(Formatter):
def __init__(self, obj):
self.data = obj
self.timezone = None
if isinstance(obj, datetime):
# Timezone is only supported when formatting datetime objects, not
# date objects (timezone information not appropriate), or time
# objects (against established django policy).
if is_naive(obj):
timezone = get_default_timezone()
else:
timezone = obj.tzinfo
if not _datetime_ambiguous_or_imaginary(obj, timezone):
self.timezone = timezone
def a(self):
"'a.m.' or 'p.m.'"
if self.data.hour > 11:
return _("p.m.")
return _("a.m.")
def A(self):
"'AM' or 'PM'"
if self.data.hour > 11:
return _("PM")
return _("AM")
def e(self):
"""
Timezone name.
If timezone information is not available, return an empty string.
"""
if not self.timezone:
return ""
try:
if getattr(self.data, "tzinfo", None):
return self.data.tzname() or ""
except NotImplementedError:
pass
return ""
def f(self):
"""
Time, in 12-hour hours and minutes, with minutes left off if they're
zero.
Examples: '1', '1:30', '2:05', '2'
Proprietary extension.
"""
hour = self.data.hour % 12 or 12
minute = self.data.minute
return "%d:%02d" % (hour, minute) if minute else hour
def g(self):
"Hour, 12-hour format without leading zeros; i.e. '1' to '12'"
return self.data.hour % 12 or 12
def G(self):
"Hour, 24-hour format without leading zeros; i.e. '0' to '23'"
return self.data.hour
def h(self):
"Hour, 12-hour format; i.e. '01' to '12'"
return "%02d" % (self.data.hour % 12 or 12)
def H(self):
"Hour, 24-hour format; i.e. '00' to '23'"
return "%02d" % self.data.hour
def i(self):
"Minutes; i.e. '00' to '59'"
return "%02d" % self.data.minute
def O(self): # NOQA: E743, E741
"""
Difference to Greenwich time in hours; e.g. '+0200', '-0430'.
If timezone information is not available, return an empty string.
"""
if self.timezone is None:
return ""
offset = self.timezone.utcoffset(self.data)
seconds = offset.days * 86400 + offset.seconds
sign = "-" if seconds < 0 else "+"
seconds = abs(seconds)
return "%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60)
def P(self):
"""
Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off
if they're zero and the strings 'midnight' and 'noon' if appropriate.
Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.'
Proprietary extension.
"""
if self.data.minute == 0 and self.data.hour == 0:
return _("midnight")
if self.data.minute == 0 and self.data.hour == 12:
return _("noon")
return "%s %s" % (self.f(), self.a())
def s(self):
"Seconds; i.e. '00' to '59'"
return "%02d" % self.data.second
def T(self):
"""
Time zone of this machine; e.g. 'EST' or 'MDT'.
If timezone information is not available, return an empty string.
"""
if self.timezone is None:
return ""
return str(self.timezone.tzname(self.data))
def u(self):
"Microseconds; i.e. '000000' to '999999'"
return "%06d" % self.data.microsecond
def Z(self):
"""
Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for
timezones west of UTC is always negative, and for those east of UTC is
always positive.
If timezone information is not available, return an empty string.
"""
if self.timezone is None:
return ""
offset = self.timezone.utcoffset(self.data)
# `offset` is a datetime.timedelta. For negative values (to the west of
# UTC) only days can be negative (days=-1) and seconds are always
# positive. e.g. UTC-1 -> timedelta(days=-1, seconds=82800, microseconds=0)
# Positive offsets have days=0
return offset.days * 86400 + offset.seconds
class DateFormat(TimeFormat):
def b(self):
"Month, textual, 3 letters, lowercase; e.g. 'jan'"
return MONTHS_3[self.data.month]
def c(self):
"""
ISO 8601 Format
Example : '2008-01-02T10:30:00.000123'
"""
return self.data.isoformat()
def d(self):
"Day of the month, 2 digits with leading zeros; i.e. '01' to '31'"
return "%02d" % self.data.day
def D(self):
"Day of the week, textual, 3 letters; e.g. 'Fri'"
return WEEKDAYS_ABBR[self.data.weekday()]
def E(self):
"Alternative month names as required by some locales. Proprietary extension."
return MONTHS_ALT[self.data.month]
def F(self):
"Month, textual, long; e.g. 'January'"
return MONTHS[self.data.month]
def I(self): # NOQA: E743, E741
"'1' if daylight saving time, '0' otherwise."
if self.timezone is None:
return ""
return "1" if self.timezone.dst(self.data) else "0"
def j(self):
"Day of the month without leading zeros; i.e. '1' to '31'"
return self.data.day
def l(self): # NOQA: E743, E741
"Day of the week, textual, long; e.g. 'Friday'"
return WEEKDAYS[self.data.weekday()]
def L(self):
"Boolean for whether it is a leap year; i.e. True or False"
return calendar.isleap(self.data.year)
def m(self):
"Month; i.e. '01' to '12'"
return "%02d" % self.data.month
def M(self):
"Month, textual, 3 letters; e.g. 'Jan'"
return MONTHS_3[self.data.month].title()
def n(self):
"Month without leading zeros; i.e. '1' to '12'"
return self.data.month
def N(self):
"Month abbreviation in Associated Press style. Proprietary extension."
return MONTHS_AP[self.data.month]
def o(self):
"ISO 8601 year number matching the ISO week number (W)"
return self.data.isocalendar()[0]
def r(self):
"RFC 5322 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'"
value = self.data
if not isinstance(value, datetime):
# Assume midnight in default timezone if datetime.date provided.
default_timezone = get_default_timezone()
value = datetime.combine(value, time.min).replace(tzinfo=default_timezone)
elif is_naive(value):
value = make_aware(value, timezone=self.timezone)
return format_datetime_rfc5322(value)
def S(self):
"""
English ordinal suffix for the day of the month, 2 characters; i.e.
'st', 'nd', 'rd' or 'th'.
"""
if self.data.day in (11, 12, 13): # Special case
return "th"
last = self.data.day % 10
if last == 1:
return "st"
if last == 2:
return "nd"
if last == 3:
return "rd"
return "th"
def t(self):
"Number of days in the given month; i.e. '28' to '31'"
return calendar.monthrange(self.data.year, self.data.month)[1]
def U(self):
"Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"
value = self.data
if not isinstance(value, datetime):
value = datetime.combine(value, time.min)
return int(value.timestamp())
def w(self):
"Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)"
return (self.data.weekday() + 1) % 7
def W(self):
"ISO-8601 week number of year, weeks starting on Monday"
return self.data.isocalendar()[1]
def y(self):
"""Year, 2 digits with leading zeros; e.g. '99'."""
return "%02d" % (self.data.year % 100)
def Y(self):
"""Year, 4 digits with leading zeros; e.g. '1999'."""
return "%04d" % self.data.year
def z(self):
"""Day of the year, i.e. 1 to 366."""
return self.data.timetuple().tm_yday
def format(value, format_string):
"Convenience function"
df = DateFormat(value)
return df.format(format_string)
def time_format(value, format_string):
"Convenience function"
tf = TimeFormat(value)
return tf.format(format_string)
|
f2f5b2c546a9260c4927d10e9e99e13c726dcdf14fbb207838f5d3e1834418f1 | import os
from asyncio import get_running_loop
from functools import wraps
from django.core.exceptions import SynchronousOnlyOperation
def async_unsafe(message):
"""
Decorator to mark functions as async-unsafe. Someone trying to access
the function while in an async context will get an error message.
"""
def decorator(func):
@wraps(func)
def inner(*args, **kwargs):
# Detect a running event loop in this thread.
try:
get_running_loop()
except RuntimeError:
pass
else:
if not os.environ.get("DJANGO_ALLOW_ASYNC_UNSAFE"):
raise SynchronousOnlyOperation(message)
# Pass onward.
return func(*args, **kwargs)
return inner
# If the message is actually a function, then be a no-arguments decorator.
if callable(message):
func = message
message = (
"You cannot call this from an async context - use a thread or "
"sync_to_async."
)
return decorator(func)
else:
return decorator
try:
from contextlib import aclosing
except ImportError:
# TODO: Remove when dropping support for PY39.
from contextlib import AbstractAsyncContextManager
# Backport of contextlib.aclosing() from Python 3.10. Copyright (C) Python
# Software Foundation (see LICENSE.python).
class aclosing(AbstractAsyncContextManager):
"""
Async context manager for safely finalizing an asynchronously
cleaned-up resource such as an async generator, calling its
``aclose()`` method.
"""
def __init__(self, thing):
self.thing = thing
async def __aenter__(self):
return self.thing
async def __aexit__(self, *exc_info):
await self.thing.aclose()
|
15d936fb9f8147e28e5feaa52e85cbd2eb0e5a4776c1961eb7ba716f4ca4a0b8 | import base64
import datetime
import re
import unicodedata
from binascii import Error as BinasciiError
from email.utils import formatdate
from urllib.parse import (
ParseResult,
SplitResult,
_coerce_args,
_splitnetloc,
_splitparams,
quote,
scheme_chars,
unquote,
)
from urllib.parse import urlencode as original_urlencode
from urllib.parse import uses_params
from django.utils.datastructures import MultiValueDict
from django.utils.regex_helper import _lazy_re_compile
# Based on RFC 9110 Appendix A.
ETAG_MATCH = _lazy_re_compile(
r"""
\A( # start of string and capture group
(?:W/)? # optional weak indicator
" # opening quote
[^"]* # any sequence of non-quote characters
" # end quote
)\Z # end of string and capture group
""",
re.X,
)
MONTHS = "jan feb mar apr may jun jul aug sep oct nov dec".split()
__D = r"(?P<day>[0-9]{2})"
__D2 = r"(?P<day>[ 0-9][0-9])"
__M = r"(?P<mon>\w{3})"
__Y = r"(?P<year>[0-9]{4})"
__Y2 = r"(?P<year>[0-9]{2})"
__T = r"(?P<hour>[0-9]{2}):(?P<min>[0-9]{2}):(?P<sec>[0-9]{2})"
RFC1123_DATE = _lazy_re_compile(r"^\w{3}, %s %s %s %s GMT$" % (__D, __M, __Y, __T))
RFC850_DATE = _lazy_re_compile(r"^\w{6,9}, %s-%s-%s %s GMT$" % (__D, __M, __Y2, __T))
ASCTIME_DATE = _lazy_re_compile(r"^\w{3} %s %s %s %s$" % (__M, __D2, __T, __Y))
RFC3986_GENDELIMS = ":/?#[]@"
RFC3986_SUBDELIMS = "!$&'()*+,;="
# TODO: Remove when dropping support for PY38.
# Unsafe bytes to be removed per WHATWG spec.
_UNSAFE_URL_BYTES_TO_REMOVE = ["\t", "\r", "\n"]
def urlencode(query, doseq=False):
"""
A version of Python's urllib.parse.urlencode() function that can operate on
MultiValueDict and non-string values.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, "items"):
query = query.items()
query_params = []
for key, value in query:
if value is None:
raise TypeError(
"Cannot encode None for key '%s' in a query string. Did you "
"mean to pass an empty string or omit the value?" % key
)
elif not doseq or isinstance(value, (str, bytes)):
query_val = value
else:
try:
itr = iter(value)
except TypeError:
query_val = value
else:
# Consume generators and iterators, when doseq=True, to
# work around https://bugs.python.org/issue31706.
query_val = []
for item in itr:
if item is None:
raise TypeError(
"Cannot encode None for key '%s' in a query "
"string. Did you mean to pass an empty string or "
"omit the value?" % key
)
elif not isinstance(item, bytes):
item = str(item)
query_val.append(item)
query_params.append((key, query_val))
return original_urlencode(query_params, doseq)
def http_date(epoch_seconds=None):
"""
Format the time to match the RFC 5322 date format as specified by RFC 9110
Section 5.6.7.
`epoch_seconds` is a floating point number expressed in seconds since the
epoch, in UTC - such as that outputted by time.time(). If set to None, it
defaults to the current time.
Output a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parse a date format as specified by HTTP RFC 9110 Section 5.6.7.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Return an integer expressed in seconds since the epoch, in UTC.
"""
# email.utils.parsedate() does the job for RFC 1123 dates; unfortunately
# RFC 9110 makes it mandatory to support RFC 850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
tz = datetime.timezone.utc
year = int(m["year"])
if year < 100:
current_year = datetime.datetime.now(tz=tz).year
current_century = current_year - (current_year % 100)
if year - (current_year % 100) > 50:
# year that appears to be more than 50 years in the future are
# interpreted as representing the past.
year += current_century - 100
else:
year += current_century
month = MONTHS.index(m["mon"].lower()) + 1
day = int(m["day"])
hour = int(m["hour"])
min = int(m["min"])
sec = int(m["sec"])
result = datetime.datetime(year, month, day, hour, min, sec, tzinfo=tz)
return int(result.timestamp())
except Exception as exc:
raise ValueError("%r is not a valid date" % date) from exc
def parse_http_date_safe(date):
"""
Same as parse_http_date, but return None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Convert a base 36 string to an int. Raise ValueError if the input won't fit
into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is longer than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
return int(s, 36)
def int_to_base36(i):
"""Convert an integer to a base36 string."""
char_set = "0123456789abcdefghijklmnopqrstuvwxyz"
if i < 0:
raise ValueError("Negative base36 conversion input.")
if i < 36:
return char_set[i]
b36 = ""
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
def urlsafe_base64_encode(s):
"""
Encode a bytestring to a base64 string for use in URLs. Strip any trailing
equal signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b"\n=").decode("ascii")
def urlsafe_base64_decode(s):
"""
Decode a base64 encoded string. Add back any trailing equal signs that
might have been stripped.
"""
s = s.encode()
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b"="))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parse a string of ETags given in an If-None-Match or If-Match header as
defined by RFC 9110. Return a list of quoted ETags, or ['*'] if all ETags
should be matched.
"""
if etag_str.strip() == "*":
return ["*"]
else:
# Parse each ETag individually, and return any that are valid.
etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(","))
return [match[1] for match in etag_matches if match]
def quote_etag(etag_str):
"""
If the provided string is already a quoted ETag, return it. Otherwise, wrap
the string in quotes, making it a strong ETag.
"""
if ETAG_MATCH.match(etag_str):
return etag_str
else:
return '"%s"' % etag_str
def is_same_domain(host, pattern):
"""
Return ``True`` if the host is either an exact match or a match
to the wildcard pattern.
Any pattern beginning with a period matches a domain and all of its
subdomains. (e.g. ``.example.com`` matches ``example.com`` and
``foo.example.com``). Anything else is an exact string match.
"""
if not pattern:
return False
pattern = pattern.lower()
return (
pattern[0] == "."
and (host.endswith(pattern) or host == pattern[1:])
or pattern == host
)
def url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):
"""
Return ``True`` if the url uses an allowed host and a safe scheme.
Always return ``False`` on an empty url.
If ``require_https`` is ``True``, only 'https' will be considered a valid
scheme, as opposed to 'http' and 'https' with the default, ``False``.
Note: "True" doesn't entail that a URL is "safe". It may still be e.g.
quoted incorrectly. Ensure to also use django.utils.encoding.iri_to_uri()
on the path component of untrusted URLs.
"""
if url is not None:
url = url.strip()
if not url:
return False
if allowed_hosts is None:
allowed_hosts = set()
elif isinstance(allowed_hosts, str):
allowed_hosts = {allowed_hosts}
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return _url_has_allowed_host_and_scheme(
url, allowed_hosts, require_https=require_https
) and _url_has_allowed_host_and_scheme(
url.replace("\\", "/"), allowed_hosts, require_https=require_https
)
# TODO: Remove when dropping support for PY38.
# Copied from urllib.parse.urlparse() but uses fixed urlsplit() function.
def _urlparse(url, scheme="", allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = _urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
if scheme in uses_params and ";" in url:
url, params = _splitparams(url)
else:
params = ""
result = ParseResult(scheme, netloc, url, params, query, fragment)
return _coerce_result(result)
# TODO: Remove when dropping support for PY38.
def _remove_unsafe_bytes_from_url(url):
for b in _UNSAFE_URL_BYTES_TO_REMOVE:
url = url.replace(b, "")
return url
# TODO: Remove when dropping support for PY38.
# Backport of urllib.parse.urlsplit() from Python 3.9.
def _urlsplit(url, scheme="", allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
url = _remove_unsafe_bytes_from_url(url)
scheme = _remove_unsafe_bytes_from_url(scheme)
netloc = query = fragment = ""
i = url.find(":")
if i > 0:
for c in url[:i]:
if c not in scheme_chars:
break
else:
scheme, url = url[:i].lower(), url[i + 1 :]
if url[:2] == "//":
netloc, url = _splitnetloc(url, 2)
if ("[" in netloc and "]" not in netloc) or (
"]" in netloc and "[" not in netloc
):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and "#" in url:
url, fragment = url.split("#", 1)
if "?" in url:
url, query = url.split("?", 1)
v = SplitResult(scheme, netloc, url, query, fragment)
return _coerce_result(v)
def _url_has_allowed_host_and_scheme(url, allowed_hosts, require_https=False):
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith("///"):
return False
try:
url_info = _urlparse(url)
except ValueError: # e.g. invalid IPv6 addresses
return False
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == "C":
return False
scheme = url_info.scheme
# Consider URLs without a scheme (e.g. //example.com/p) to be http.
if not url_info.scheme and url_info.netloc:
scheme = "http"
valid_schemes = ["https"] if require_https else ["http", "https"]
return (not url_info.netloc or url_info.netloc in allowed_hosts) and (
not scheme or scheme in valid_schemes
)
def escape_leading_slashes(url):
"""
If redirecting to an absolute path (two leading slashes), a slash must be
escaped to prevent browsers from handling the path as schemaless and
redirecting to another host.
"""
if url.startswith("//"):
url = "/%2F{}".format(url[2:])
return url
def _parseparam(s):
while s[:1] == ";":
s = s[1:]
end = s.find(";")
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(";", end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]
def parse_header_parameters(line):
"""
Parse a Content-type like header.
Return the main content-type and a dictionary of options.
"""
parts = _parseparam(";" + line)
key = parts.__next__().lower()
pdict = {}
for p in parts:
i = p.find("=")
if i >= 0:
has_encoding = False
name = p[:i].strip().lower()
if name.endswith("*"):
# Lang/encoding embedded in the value (like "filename*=UTF-8''file.ext")
# https://tools.ietf.org/html/rfc2231#section-4
name = name[:-1]
if p.count("'") == 2:
has_encoding = True
value = p[i + 1 :].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace("\\\\", "\\").replace('\\"', '"')
if has_encoding:
encoding, lang, value = value.split("'")
value = unquote(value, encoding=encoding)
pdict[name] = value
return key, pdict
def content_disposition_header(as_attachment, filename):
"""
Construct a Content-Disposition HTTP header value from the given filename
as specified by RFC 6266.
"""
if filename:
disposition = "attachment" if as_attachment else "inline"
try:
filename.encode("ascii")
file_expr = 'filename="{}"'.format(
filename.replace("\\", "\\\\").replace('"', r"\"")
)
except UnicodeEncodeError:
file_expr = "filename*=utf-8''{}".format(quote(filename))
return f"{disposition}; {file_expr}"
elif as_attachment:
return "attachment"
else:
return None
|
9acfddb563677418d2eee891362bb05187ab53aa441f44d2f182d2187cb4b29c | import datetime
import decimal
import functools
import re
import unicodedata
from importlib import import_module
from django.conf import settings
from django.utils import dateformat, numberformat
from django.utils.functional import lazy
from django.utils.translation import check_for_language, get_language, to_locale
# format_cache is a mapping from (format_type, lang) to the format string.
# By using the cache, it is possible to avoid running get_format_modules
# repeatedly.
_format_cache = {}
_format_modules_cache = {}
ISO_INPUT_FORMATS = {
"DATE_INPUT_FORMATS": ["%Y-%m-%d"],
"TIME_INPUT_FORMATS": ["%H:%M:%S", "%H:%M:%S.%f", "%H:%M"],
"DATETIME_INPUT_FORMATS": [
"%Y-%m-%d %H:%M:%S",
"%Y-%m-%d %H:%M:%S.%f",
"%Y-%m-%d %H:%M",
"%Y-%m-%d",
],
}
FORMAT_SETTINGS = frozenset(
[
"DECIMAL_SEPARATOR",
"THOUSAND_SEPARATOR",
"NUMBER_GROUPING",
"FIRST_DAY_OF_WEEK",
"MONTH_DAY_FORMAT",
"TIME_FORMAT",
"DATE_FORMAT",
"DATETIME_FORMAT",
"SHORT_DATE_FORMAT",
"SHORT_DATETIME_FORMAT",
"YEAR_MONTH_FORMAT",
"DATE_INPUT_FORMATS",
"TIME_INPUT_FORMATS",
"DATETIME_INPUT_FORMATS",
]
)
def reset_format_cache():
"""Clear any cached formats.
This method is provided primarily for testing purposes,
so that the effects of cached formats can be removed.
"""
global _format_cache, _format_modules_cache
_format_cache = {}
_format_modules_cache = {}
def iter_format_modules(lang, format_module_path=None):
"""Find format modules."""
if not check_for_language(lang):
return
if format_module_path is None:
format_module_path = settings.FORMAT_MODULE_PATH
format_locations = []
if format_module_path:
if isinstance(format_module_path, str):
format_module_path = [format_module_path]
for path in format_module_path:
format_locations.append(path + ".%s")
format_locations.append("django.conf.locale.%s")
locale = to_locale(lang)
locales = [locale]
if "_" in locale:
locales.append(locale.split("_")[0])
for location in format_locations:
for loc in locales:
try:
yield import_module("%s.formats" % (location % loc))
except ImportError:
pass
def get_format_modules(lang=None):
"""Return a list of the format modules found."""
if lang is None:
lang = get_language()
if lang not in _format_modules_cache:
_format_modules_cache[lang] = list(
iter_format_modules(lang, settings.FORMAT_MODULE_PATH)
)
return _format_modules_cache[lang]
def get_format(format_type, lang=None, use_l10n=None):
"""
For a specific format type, return the format for the current
language (locale). Default to the format in the settings.
format_type is the name of the format, e.g. 'DATE_FORMAT'.
If use_l10n is provided and is not None, it forces the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if use_l10n is None:
try:
use_l10n = settings._USE_L10N_INTERNAL
except AttributeError:
use_l10n = settings.USE_L10N
if use_l10n and lang is None:
lang = get_language()
format_type = str(format_type) # format_type may be lazy.
cache_key = (format_type, lang)
try:
return _format_cache[cache_key]
except KeyError:
pass
# The requested format_type has not been cached yet. Try to find it in any
# of the format_modules for the given lang if l10n is enabled. If it's not
# there or if l10n is disabled, fall back to the project settings.
val = None
if use_l10n:
for module in get_format_modules(lang):
val = getattr(module, format_type, None)
if val is not None:
break
if val is None:
if format_type not in FORMAT_SETTINGS:
return format_type
val = getattr(settings, format_type)
elif format_type in ISO_INPUT_FORMATS:
# If a list of input formats from one of the format_modules was
# retrieved, make sure the ISO_INPUT_FORMATS are in this list.
val = list(val)
for iso_input in ISO_INPUT_FORMATS.get(format_type, ()):
if iso_input not in val:
val.append(iso_input)
_format_cache[cache_key] = val
return val
get_format_lazy = lazy(get_format, str, list, tuple)
def date_format(value, format=None, use_l10n=None):
"""
Format a datetime.date or datetime.datetime object using a
localizable format.
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.format(
value, get_format(format or "DATE_FORMAT", use_l10n=use_l10n)
)
def time_format(value, format=None, use_l10n=None):
"""
Format a datetime.time object using a localizable format.
If use_l10n is provided and is not None, it forces the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.time_format(
value, get_format(format or "TIME_FORMAT", use_l10n=use_l10n)
)
def number_format(value, decimal_pos=None, use_l10n=None, force_grouping=False):
"""
Format a numeric value using localization settings.
If use_l10n is provided and is not None, it forces the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if use_l10n is None:
try:
use_l10n = settings._USE_L10N_INTERNAL
except AttributeError:
use_l10n = settings.USE_L10N
lang = get_language() if use_l10n else None
return numberformat.format(
value,
get_format("DECIMAL_SEPARATOR", lang, use_l10n=use_l10n),
decimal_pos,
get_format("NUMBER_GROUPING", lang, use_l10n=use_l10n),
get_format("THOUSAND_SEPARATOR", lang, use_l10n=use_l10n),
force_grouping=force_grouping,
use_l10n=use_l10n,
)
def localize(value, use_l10n=None):
"""
Check if value is a localizable type (date, number...) and return it
formatted as a string using current locale format.
If use_l10n is provided and is not None, it forces the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if isinstance(value, str): # Handle strings first for performance reasons.
return value
elif isinstance(value, bool): # Make sure booleans don't get treated as numbers
return str(value)
elif isinstance(value, (decimal.Decimal, float, int)):
if use_l10n is False:
return str(value)
return number_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.datetime):
return date_format(value, "DATETIME_FORMAT", use_l10n=use_l10n)
elif isinstance(value, datetime.date):
return date_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.time):
return time_format(value, use_l10n=use_l10n)
return value
def localize_input(value, default=None):
"""
Check if an input value is a localizable type and return it
formatted with the appropriate formatting string of the current locale.
"""
if isinstance(value, str): # Handle strings first for performance reasons.
return value
elif isinstance(value, bool): # Don't treat booleans as numbers.
return str(value)
elif isinstance(value, (decimal.Decimal, float, int)):
return number_format(value)
elif isinstance(value, datetime.datetime):
format = default or get_format("DATETIME_INPUT_FORMATS")[0]
format = sanitize_strftime_format(format)
return value.strftime(format)
elif isinstance(value, datetime.date):
format = default or get_format("DATE_INPUT_FORMATS")[0]
format = sanitize_strftime_format(format)
return value.strftime(format)
elif isinstance(value, datetime.time):
format = default or get_format("TIME_INPUT_FORMATS")[0]
return value.strftime(format)
return value
@functools.lru_cache
def sanitize_strftime_format(fmt):
"""
Ensure that certain specifiers are correctly padded with leading zeros.
For years < 1000 specifiers %C, %F, %G, and %Y don't work as expected for
strftime provided by glibc on Linux as they don't pad the year or century
with leading zeros. Support for specifying the padding explicitly is
available, however, which can be used to fix this issue.
FreeBSD, macOS, and Windows do not support explicitly specifying the
padding, but return four digit years (with leading zeros) as expected.
This function checks whether the %Y produces a correctly padded string and,
if not, makes the following substitutions:
- %C → %02C
- %F → %010F
- %G → %04G
- %Y → %04Y
See https://bugs.python.org/issue13305 for more details.
"""
if datetime.date(1, 1, 1).strftime("%Y") == "0001":
return fmt
mapping = {"C": 2, "F": 10, "G": 4, "Y": 4}
return re.sub(
r"((?:^|[^%])(?:%%)*)%([CFGY])",
lambda m: r"%s%%0%s%s" % (m[1], mapping[m[2]], m[2]),
fmt,
)
def sanitize_separators(value):
"""
Sanitize a value according to the current decimal and
thousand separator setting. Used with form field input.
"""
if isinstance(value, str):
parts = []
decimal_separator = get_format("DECIMAL_SEPARATOR")
if decimal_separator in value:
value, decimals = value.split(decimal_separator, 1)
parts.append(decimals)
if settings.USE_THOUSAND_SEPARATOR:
thousand_sep = get_format("THOUSAND_SEPARATOR")
if (
thousand_sep == "."
and value.count(".") == 1
and len(value.split(".")[-1]) != 3
):
# Special case where we suspect a dot meant decimal separator
# (see #22171).
pass
else:
for replacement in {
thousand_sep,
unicodedata.normalize("NFKD", thousand_sep),
}:
value = value.replace(replacement, "")
parts.append(value)
value = ".".join(reversed(parts))
return value
|
8ec25e62fd3e038883ebed4f7771de865d99b6c86913859e0537a2fb83f07220 | from django.utils.cache import patch_vary_headers
from django.utils.deprecation import MiddlewareMixin
from django.utils.regex_helper import _lazy_re_compile
from django.utils.text import compress_sequence, compress_string
re_accepts_gzip = _lazy_re_compile(r"\bgzip\b")
class GZipMiddleware(MiddlewareMixin):
"""
Compress content if the browser allows gzip compression.
Set the Vary header accordingly, so that caches will base their storage
on the Accept-Encoding header.
"""
max_random_bytes = 100
def process_response(self, request, response):
# It's not worth attempting to compress really short responses.
if not response.streaming and len(response.content) < 200:
return response
# Avoid gzipping if we've already got a content-encoding.
if response.has_header("Content-Encoding"):
return response
patch_vary_headers(response, ("Accept-Encoding",))
ae = request.META.get("HTTP_ACCEPT_ENCODING", "")
if not re_accepts_gzip.search(ae):
return response
if response.streaming:
if response.is_async:
# pull to lexical scope to capture fixed reference in case
# streaming_content is set again later.
orignal_iterator = response.streaming_content
async def gzip_wrapper():
async for chunk in orignal_iterator:
yield compress_string(
chunk,
max_random_bytes=self.max_random_bytes,
)
response.streaming_content = gzip_wrapper()
else:
response.streaming_content = compress_sequence(
response.streaming_content,
max_random_bytes=self.max_random_bytes,
)
# Delete the `Content-Length` header for streaming content, because
# we won't know the compressed size until we stream it.
del response.headers["Content-Length"]
else:
# Return the compressed content only if it's actually shorter.
compressed_content = compress_string(
response.content,
max_random_bytes=self.max_random_bytes,
)
if len(compressed_content) >= len(response.content):
return response
response.content = compressed_content
response.headers["Content-Length"] = str(len(response.content))
# If there is a strong ETag, make it weak to fulfill the requirements
# of RFC 9110 Section 8.8.1 while also allowing conditional request
# matches on ETags.
etag = response.get("ETag")
if etag and etag.startswith('"'):
response.headers["ETag"] = "W/" + etag
response.headers["Content-Encoding"] = "gzip"
return response
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.